1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _GNU_SOURCE
14#define _GNU_SOURCE
15#endif
16#include <stdlib.h>
17#include <stdio.h>
18#include <stdarg.h>
19#include <libgen.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <string.h>
23#include <unistd.h>
24#include <endian.h>
25#include <fcntl.h>
26#include <errno.h>
27#include <ctype.h>
28#include <asm/unistd.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
31#include <linux/bpf.h>
32#include <linux/btf.h>
33#include <linux/filter.h>
34#include <linux/list.h>
35#include <linux/limits.h>
36#include <linux/perf_event.h>
37#include <linux/ring_buffer.h>
38#include <linux/version.h>
39#include <sys/epoll.h>
40#include <sys/ioctl.h>
41#include <sys/mman.h>
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/vfs.h>
45#include <sys/utsname.h>
46#include <sys/resource.h>
47#include <libelf.h>
48#include <gelf.h>
49#include <zlib.h>
50
51#include "libbpf.h"
52#include "bpf.h"
53#include "btf.h"
54#include "str_error.h"
55#include "libbpf_internal.h"
56#include "hashmap.h"
57#include "bpf_gen_internal.h"
58
59#ifndef BPF_FS_MAGIC
60#define BPF_FS_MAGIC 0xcafe4a11
61#endif
62
63#define BPF_INSN_SZ (sizeof(struct bpf_insn))
64
65
66
67
68#pragma GCC diagnostic ignored "-Wformat-nonliteral"
69
70#define __printf(a, b) __attribute__((format(printf, a, b)))
71
72static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
73static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
74
75static int __base_pr(enum libbpf_print_level level, const char *format,
76 va_list args)
77{
78 if (level == LIBBPF_DEBUG)
79 return 0;
80
81 return vfprintf(stderr, format, args);
82}
83
84static libbpf_print_fn_t __libbpf_pr = __base_pr;
85
86libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
87{
88 libbpf_print_fn_t old_print_fn = __libbpf_pr;
89
90 __libbpf_pr = fn;
91 return old_print_fn;
92}
93
94__printf(2, 3)
95void libbpf_print(enum libbpf_print_level level, const char *format, ...)
96{
97 va_list args;
98
99 if (!__libbpf_pr)
100 return;
101
102 va_start(args, format);
103 __libbpf_pr(level, format, args);
104 va_end(args);
105}
106
107static void pr_perm_msg(int err)
108{
109 struct rlimit limit;
110 char buf[100];
111
112 if (err != -EPERM || geteuid() != 0)
113 return;
114
115 err = getrlimit(RLIMIT_MEMLOCK, &limit);
116 if (err)
117 return;
118
119 if (limit.rlim_cur == RLIM_INFINITY)
120 return;
121
122 if (limit.rlim_cur < 1024)
123 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
124 else if (limit.rlim_cur < 1024*1024)
125 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
126 else
127 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
128
129 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
130 buf);
131}
132
133#define STRERR_BUFSIZE 128
134
135
136#ifndef zfree
137# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
138#endif
139
140#ifndef zclose
141# define zclose(fd) ({ \
142 int ___err = 0; \
143 if ((fd) >= 0) \
144 ___err = close((fd)); \
145 fd = -1; \
146 ___err; })
147#endif
148
149static inline __u64 ptr_to_u64(const void *ptr)
150{
151 return (__u64) (unsigned long) ptr;
152}
153
154
155enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE;
156
157int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
158{
159
160
161
162
163 if (mode != LIBBPF_STRICT_ALL
164 && (mode & ~((__LIBBPF_STRICT_LAST - 1) * 2 - 1)))
165 return errno = EINVAL, -EINVAL;
166
167 libbpf_mode = mode;
168 return 0;
169}
170
171__u32 libbpf_major_version(void)
172{
173 return LIBBPF_MAJOR_VERSION;
174}
175
176__u32 libbpf_minor_version(void)
177{
178 return LIBBPF_MINOR_VERSION;
179}
180
181const char *libbpf_version_string(void)
182{
183#define __S(X) #X
184#define _S(X) __S(X)
185 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
186#undef _S
187#undef __S
188}
189
190enum reloc_type {
191 RELO_LD64,
192 RELO_CALL,
193 RELO_DATA,
194 RELO_EXTERN_VAR,
195 RELO_EXTERN_FUNC,
196 RELO_SUBPROG_ADDR,
197 RELO_CORE,
198};
199
200struct reloc_desc {
201 enum reloc_type type;
202 int insn_idx;
203 union {
204 const struct bpf_core_relo *core_relo;
205 struct {
206 int map_idx;
207 int sym_off;
208 };
209 };
210};
211
212struct bpf_sec_def;
213
214typedef int (*init_fn_t)(struct bpf_program *prog, long cookie);
215typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_opts *opts, long cookie);
216typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie);
217
218
219enum sec_def_flags {
220 SEC_NONE = 0,
221
222 SEC_EXP_ATTACH_OPT = 1,
223
224
225
226
227
228
229 SEC_ATTACHABLE = 2,
230 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
231
232
233 SEC_ATTACH_BTF = 4,
234
235 SEC_SLEEPABLE = 8,
236
237 SEC_SLOPPY_PFX = 16,
238};
239
240struct bpf_sec_def {
241 const char *sec;
242 enum bpf_prog_type prog_type;
243 enum bpf_attach_type expected_attach_type;
244 long cookie;
245
246 init_fn_t init_fn;
247 preload_fn_t preload_fn;
248 attach_fn_t attach_fn;
249};
250
251
252
253
254
255struct bpf_program {
256 const struct bpf_sec_def *sec_def;
257 char *sec_name;
258 size_t sec_idx;
259
260
261
262 size_t sec_insn_off;
263
264
265
266
267 size_t sec_insn_cnt;
268
269
270
271
272
273
274
275
276 size_t sub_insn_off;
277
278 char *name;
279
280
281
282 char *pin_name;
283
284
285
286
287
288
289 struct bpf_insn *insns;
290
291
292
293
294 size_t insns_cnt;
295
296 struct reloc_desc *reloc_desc;
297 int nr_reloc;
298
299
300 char *log_buf;
301 size_t log_size;
302 __u32 log_level;
303
304 struct {
305 int nr;
306 int *fds;
307 } instances;
308 bpf_program_prep_t preprocessor;
309
310 struct bpf_object *obj;
311 void *priv;
312 bpf_program_clear_priv_t clear_priv;
313
314 bool load;
315 bool mark_btf_static;
316 enum bpf_prog_type type;
317 enum bpf_attach_type expected_attach_type;
318 int prog_ifindex;
319 __u32 attach_btf_obj_fd;
320 __u32 attach_btf_id;
321 __u32 attach_prog_fd;
322 void *func_info;
323 __u32 func_info_rec_size;
324 __u32 func_info_cnt;
325
326 void *line_info;
327 __u32 line_info_rec_size;
328 __u32 line_info_cnt;
329 __u32 prog_flags;
330};
331
332struct bpf_struct_ops {
333 const char *tname;
334 const struct btf_type *type;
335 struct bpf_program **progs;
336 __u32 *kern_func_off;
337
338 void *data;
339
340
341
342
343
344
345
346
347
348
349 void *kern_vdata;
350 __u32 type_id;
351};
352
353#define DATA_SEC ".data"
354#define BSS_SEC ".bss"
355#define RODATA_SEC ".rodata"
356#define KCONFIG_SEC ".kconfig"
357#define KSYMS_SEC ".ksyms"
358#define STRUCT_OPS_SEC ".struct_ops"
359
360enum libbpf_map_type {
361 LIBBPF_MAP_UNSPEC,
362 LIBBPF_MAP_DATA,
363 LIBBPF_MAP_BSS,
364 LIBBPF_MAP_RODATA,
365 LIBBPF_MAP_KCONFIG,
366};
367
368struct bpf_map {
369 char *name;
370
371
372
373
374
375 char *real_name;
376 int fd;
377 int sec_idx;
378 size_t sec_offset;
379 int map_ifindex;
380 int inner_map_fd;
381 struct bpf_map_def def;
382 __u32 numa_node;
383 __u32 btf_var_idx;
384 __u32 btf_key_type_id;
385 __u32 btf_value_type_id;
386 __u32 btf_vmlinux_value_type_id;
387 void *priv;
388 bpf_map_clear_priv_t clear_priv;
389 enum libbpf_map_type libbpf_type;
390 void *mmaped;
391 struct bpf_struct_ops *st_ops;
392 struct bpf_map *inner_map;
393 void **init_slots;
394 int init_slots_sz;
395 char *pin_path;
396 bool pinned;
397 bool reused;
398 bool skipped;
399 __u64 map_extra;
400};
401
402enum extern_type {
403 EXT_UNKNOWN,
404 EXT_KCFG,
405 EXT_KSYM,
406};
407
408enum kcfg_type {
409 KCFG_UNKNOWN,
410 KCFG_CHAR,
411 KCFG_BOOL,
412 KCFG_INT,
413 KCFG_TRISTATE,
414 KCFG_CHAR_ARR,
415};
416
417struct extern_desc {
418 enum extern_type type;
419 int sym_idx;
420 int btf_id;
421 int sec_btf_id;
422 const char *name;
423 bool is_set;
424 bool is_weak;
425 union {
426 struct {
427 enum kcfg_type type;
428 int sz;
429 int align;
430 int data_off;
431 bool is_signed;
432 } kcfg;
433 struct {
434 unsigned long long addr;
435
436
437 int kernel_btf_obj_fd;
438 int kernel_btf_id;
439
440
441 __u32 type_id;
442
443
444
445
446 __s16 btf_fd_idx;
447 } ksym;
448 };
449};
450
451static LIST_HEAD(bpf_objects_list);
452
453struct module_btf {
454 struct btf *btf;
455 char *name;
456 __u32 id;
457 int fd;
458 int fd_array_idx;
459};
460
461enum sec_type {
462 SEC_UNUSED = 0,
463 SEC_RELO,
464 SEC_BSS,
465 SEC_DATA,
466 SEC_RODATA,
467};
468
469struct elf_sec_desc {
470 enum sec_type sec_type;
471 Elf64_Shdr *shdr;
472 Elf_Data *data;
473};
474
475struct elf_state {
476 int fd;
477 const void *obj_buf;
478 size_t obj_buf_sz;
479 Elf *elf;
480 Elf64_Ehdr *ehdr;
481 Elf_Data *symbols;
482 Elf_Data *st_ops_data;
483 size_t shstrndx;
484 size_t strtabidx;
485 struct elf_sec_desc *secs;
486 int sec_cnt;
487 int maps_shndx;
488 int btf_maps_shndx;
489 __u32 btf_maps_sec_btf_id;
490 int text_shndx;
491 int symbols_shndx;
492 int st_ops_shndx;
493};
494
495struct bpf_object {
496 char name[BPF_OBJ_NAME_LEN];
497 char license[64];
498 __u32 kern_version;
499
500 struct bpf_program *programs;
501 size_t nr_programs;
502 struct bpf_map *maps;
503 size_t nr_maps;
504 size_t maps_cap;
505
506 char *kconfig;
507 struct extern_desc *externs;
508 int nr_extern;
509 int kconfig_map_idx;
510
511 bool loaded;
512 bool has_subcalls;
513 bool has_rodata;
514
515 struct bpf_gen *gen_loader;
516
517
518 struct elf_state efile;
519
520
521
522
523
524 struct list_head list;
525
526 struct btf *btf;
527 struct btf_ext *btf_ext;
528
529
530
531
532 struct btf *btf_vmlinux;
533
534
535
536 char *btf_custom_path;
537
538 struct btf *btf_vmlinux_override;
539
540 struct module_btf *btf_modules;
541 bool btf_modules_loaded;
542 size_t btf_module_cnt;
543 size_t btf_module_cap;
544
545
546 char *log_buf;
547 size_t log_size;
548 __u32 log_level;
549
550 void *priv;
551 bpf_object_clear_priv_t clear_priv;
552
553 int *fd_array;
554 size_t fd_array_cap;
555 size_t fd_array_cnt;
556
557 char path[];
558};
559
560static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
561static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
562static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
563static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
564static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
565static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
566static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
567static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
568static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
569
570void bpf_program__unload(struct bpf_program *prog)
571{
572 int i;
573
574 if (!prog)
575 return;
576
577
578
579
580
581 if (prog->instances.nr > 0) {
582 for (i = 0; i < prog->instances.nr; i++)
583 zclose(prog->instances.fds[i]);
584 } else if (prog->instances.nr != -1) {
585 pr_warn("Internal error: instances.nr is %d\n",
586 prog->instances.nr);
587 }
588
589 prog->instances.nr = -1;
590 zfree(&prog->instances.fds);
591
592 zfree(&prog->func_info);
593 zfree(&prog->line_info);
594}
595
596static void bpf_program__exit(struct bpf_program *prog)
597{
598 if (!prog)
599 return;
600
601 if (prog->clear_priv)
602 prog->clear_priv(prog, prog->priv);
603
604 prog->priv = NULL;
605 prog->clear_priv = NULL;
606
607 bpf_program__unload(prog);
608 zfree(&prog->name);
609 zfree(&prog->sec_name);
610 zfree(&prog->pin_name);
611 zfree(&prog->insns);
612 zfree(&prog->reloc_desc);
613
614 prog->nr_reloc = 0;
615 prog->insns_cnt = 0;
616 prog->sec_idx = -1;
617}
618
619static char *__bpf_program__pin_name(struct bpf_program *prog)
620{
621 char *name, *p;
622
623 if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
624 name = strdup(prog->name);
625 else
626 name = strdup(prog->sec_name);
627
628 if (!name)
629 return NULL;
630
631 p = name;
632
633 while ((p = strchr(p, '/')))
634 *p = '_';
635
636 return name;
637}
638
639static bool insn_is_subprog_call(const struct bpf_insn *insn)
640{
641 return BPF_CLASS(insn->code) == BPF_JMP &&
642 BPF_OP(insn->code) == BPF_CALL &&
643 BPF_SRC(insn->code) == BPF_K &&
644 insn->src_reg == BPF_PSEUDO_CALL &&
645 insn->dst_reg == 0 &&
646 insn->off == 0;
647}
648
649static bool is_call_insn(const struct bpf_insn *insn)
650{
651 return insn->code == (BPF_JMP | BPF_CALL);
652}
653
654static bool insn_is_pseudo_func(struct bpf_insn *insn)
655{
656 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
657}
658
659static int
660bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
661 const char *name, size_t sec_idx, const char *sec_name,
662 size_t sec_off, void *insn_data, size_t insn_data_sz)
663{
664 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
665 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
666 sec_name, name, sec_off, insn_data_sz);
667 return -EINVAL;
668 }
669
670 memset(prog, 0, sizeof(*prog));
671 prog->obj = obj;
672
673 prog->sec_idx = sec_idx;
674 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
675 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
676
677 prog->insns_cnt = prog->sec_insn_cnt;
678
679 prog->type = BPF_PROG_TYPE_UNSPEC;
680 prog->load = true;
681
682 prog->instances.fds = NULL;
683 prog->instances.nr = -1;
684
685
686 prog->log_level = obj->log_level;
687
688 prog->sec_name = strdup(sec_name);
689 if (!prog->sec_name)
690 goto errout;
691
692 prog->name = strdup(name);
693 if (!prog->name)
694 goto errout;
695
696 prog->pin_name = __bpf_program__pin_name(prog);
697 if (!prog->pin_name)
698 goto errout;
699
700 prog->insns = malloc(insn_data_sz);
701 if (!prog->insns)
702 goto errout;
703 memcpy(prog->insns, insn_data, insn_data_sz);
704
705 return 0;
706errout:
707 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
708 bpf_program__exit(prog);
709 return -ENOMEM;
710}
711
712static int
713bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
714 const char *sec_name, int sec_idx)
715{
716 Elf_Data *symbols = obj->efile.symbols;
717 struct bpf_program *prog, *progs;
718 void *data = sec_data->d_buf;
719 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
720 int nr_progs, err, i;
721 const char *name;
722 Elf64_Sym *sym;
723
724 progs = obj->programs;
725 nr_progs = obj->nr_programs;
726 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
727 sec_off = 0;
728
729 for (i = 0; i < nr_syms; i++) {
730 sym = elf_sym_by_idx(obj, i);
731
732 if (sym->st_shndx != sec_idx)
733 continue;
734 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
735 continue;
736
737 prog_sz = sym->st_size;
738 sec_off = sym->st_value;
739
740 name = elf_sym_str(obj, sym->st_name);
741 if (!name) {
742 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
743 sec_name, sec_off);
744 return -LIBBPF_ERRNO__FORMAT;
745 }
746
747 if (sec_off + prog_sz > sec_sz) {
748 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
749 sec_name, sec_off);
750 return -LIBBPF_ERRNO__FORMAT;
751 }
752
753 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
754 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
755 return -ENOTSUP;
756 }
757
758 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
759 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
760
761 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
762 if (!progs) {
763
764
765
766
767
768 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
769 sec_name, name);
770 return -ENOMEM;
771 }
772 obj->programs = progs;
773
774 prog = &progs[nr_progs];
775
776 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
777 sec_off, data + sec_off, prog_sz);
778 if (err)
779 return err;
780
781
782
783
784
785
786 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
787 && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
788 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
789 prog->mark_btf_static = true;
790
791 nr_progs++;
792 obj->nr_programs = nr_progs;
793 }
794
795 return 0;
796}
797
798__u32 get_kernel_version(void)
799{
800
801
802
803
804
805
806
807
808
809
810 const char *ubuntu_kver_file = "/proc/version_signature";
811 __u32 major, minor, patch;
812 struct utsname info;
813
814 if (access(ubuntu_kver_file, R_OK) == 0) {
815 FILE *f;
816
817 f = fopen(ubuntu_kver_file, "r");
818 if (f) {
819 if (fscanf(f, "%*s %*s %d.%d.%d\n", &major, &minor, &patch) == 3) {
820 fclose(f);
821 return KERNEL_VERSION(major, minor, patch);
822 }
823 fclose(f);
824 }
825
826 }
827
828 uname(&info);
829 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
830 return 0;
831 return KERNEL_VERSION(major, minor, patch);
832}
833
834static const struct btf_member *
835find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
836{
837 struct btf_member *m;
838 int i;
839
840 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
841 if (btf_member_bit_offset(t, i) == bit_offset)
842 return m;
843 }
844
845 return NULL;
846}
847
848static const struct btf_member *
849find_member_by_name(const struct btf *btf, const struct btf_type *t,
850 const char *name)
851{
852 struct btf_member *m;
853 int i;
854
855 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
856 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
857 return m;
858 }
859
860 return NULL;
861}
862
863#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
864static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
865 const char *name, __u32 kind);
866
867static int
868find_struct_ops_kern_types(const struct btf *btf, const char *tname,
869 const struct btf_type **type, __u32 *type_id,
870 const struct btf_type **vtype, __u32 *vtype_id,
871 const struct btf_member **data_member)
872{
873 const struct btf_type *kern_type, *kern_vtype;
874 const struct btf_member *kern_data_member;
875 __s32 kern_vtype_id, kern_type_id;
876 __u32 i;
877
878 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
879 if (kern_type_id < 0) {
880 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
881 tname);
882 return kern_type_id;
883 }
884 kern_type = btf__type_by_id(btf, kern_type_id);
885
886
887
888
889
890
891 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
892 tname, BTF_KIND_STRUCT);
893 if (kern_vtype_id < 0) {
894 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
895 STRUCT_OPS_VALUE_PREFIX, tname);
896 return kern_vtype_id;
897 }
898 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
899
900
901
902
903
904
905
906 kern_data_member = btf_members(kern_vtype);
907 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
908 if (kern_data_member->type == kern_type_id)
909 break;
910 }
911 if (i == btf_vlen(kern_vtype)) {
912 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
913 tname, STRUCT_OPS_VALUE_PREFIX, tname);
914 return -EINVAL;
915 }
916
917 *type = kern_type;
918 *type_id = kern_type_id;
919 *vtype = kern_vtype;
920 *vtype_id = kern_vtype_id;
921 *data_member = kern_data_member;
922
923 return 0;
924}
925
926static bool bpf_map__is_struct_ops(const struct bpf_map *map)
927{
928 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
929}
930
931
932static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
933 const struct btf *btf,
934 const struct btf *kern_btf)
935{
936 const struct btf_member *member, *kern_member, *kern_data_member;
937 const struct btf_type *type, *kern_type, *kern_vtype;
938 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
939 struct bpf_struct_ops *st_ops;
940 void *data, *kern_data;
941 const char *tname;
942 int err;
943
944 st_ops = map->st_ops;
945 type = st_ops->type;
946 tname = st_ops->tname;
947 err = find_struct_ops_kern_types(kern_btf, tname,
948 &kern_type, &kern_type_id,
949 &kern_vtype, &kern_vtype_id,
950 &kern_data_member);
951 if (err)
952 return err;
953
954 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
955 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
956
957 map->def.value_size = kern_vtype->size;
958 map->btf_vmlinux_value_type_id = kern_vtype_id;
959
960 st_ops->kern_vdata = calloc(1, kern_vtype->size);
961 if (!st_ops->kern_vdata)
962 return -ENOMEM;
963
964 data = st_ops->data;
965 kern_data_off = kern_data_member->offset / 8;
966 kern_data = st_ops->kern_vdata + kern_data_off;
967
968 member = btf_members(type);
969 for (i = 0; i < btf_vlen(type); i++, member++) {
970 const struct btf_type *mtype, *kern_mtype;
971 __u32 mtype_id, kern_mtype_id;
972 void *mdata, *kern_mdata;
973 __s64 msize, kern_msize;
974 __u32 moff, kern_moff;
975 __u32 kern_member_idx;
976 const char *mname;
977
978 mname = btf__name_by_offset(btf, member->name_off);
979 kern_member = find_member_by_name(kern_btf, kern_type, mname);
980 if (!kern_member) {
981 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
982 map->name, mname);
983 return -ENOTSUP;
984 }
985
986 kern_member_idx = kern_member - btf_members(kern_type);
987 if (btf_member_bitfield_size(type, i) ||
988 btf_member_bitfield_size(kern_type, kern_member_idx)) {
989 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
990 map->name, mname);
991 return -ENOTSUP;
992 }
993
994 moff = member->offset / 8;
995 kern_moff = kern_member->offset / 8;
996
997 mdata = data + moff;
998 kern_mdata = kern_data + kern_moff;
999
1000 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1001 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1002 &kern_mtype_id);
1003 if (BTF_INFO_KIND(mtype->info) !=
1004 BTF_INFO_KIND(kern_mtype->info)) {
1005 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1006 map->name, mname, BTF_INFO_KIND(mtype->info),
1007 BTF_INFO_KIND(kern_mtype->info));
1008 return -ENOTSUP;
1009 }
1010
1011 if (btf_is_ptr(mtype)) {
1012 struct bpf_program *prog;
1013
1014 prog = st_ops->progs[i];
1015 if (!prog)
1016 continue;
1017
1018 kern_mtype = skip_mods_and_typedefs(kern_btf,
1019 kern_mtype->type,
1020 &kern_mtype_id);
1021
1022
1023
1024
1025
1026 if (!btf_is_func_proto(kern_mtype)) {
1027 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1028 map->name, mname);
1029 return -ENOTSUP;
1030 }
1031
1032 prog->attach_btf_id = kern_type_id;
1033 prog->expected_attach_type = kern_member_idx;
1034
1035 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1036
1037 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1038 map->name, mname, prog->name, moff,
1039 kern_moff);
1040
1041 continue;
1042 }
1043
1044 msize = btf__resolve_size(btf, mtype_id);
1045 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1046 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
1047 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1048 map->name, mname, (ssize_t)msize,
1049 (ssize_t)kern_msize);
1050 return -ENOTSUP;
1051 }
1052
1053 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1054 map->name, mname, (unsigned int)msize,
1055 moff, kern_moff);
1056 memcpy(kern_mdata, mdata, msize);
1057 }
1058
1059 return 0;
1060}
1061
1062static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1063{
1064 struct bpf_map *map;
1065 size_t i;
1066 int err;
1067
1068 for (i = 0; i < obj->nr_maps; i++) {
1069 map = &obj->maps[i];
1070
1071 if (!bpf_map__is_struct_ops(map))
1072 continue;
1073
1074 err = bpf_map__init_kern_struct_ops(map, obj->btf,
1075 obj->btf_vmlinux);
1076 if (err)
1077 return err;
1078 }
1079
1080 return 0;
1081}
1082
1083static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
1084{
1085 const struct btf_type *type, *datasec;
1086 const struct btf_var_secinfo *vsi;
1087 struct bpf_struct_ops *st_ops;
1088 const char *tname, *var_name;
1089 __s32 type_id, datasec_id;
1090 const struct btf *btf;
1091 struct bpf_map *map;
1092 __u32 i;
1093
1094 if (obj->efile.st_ops_shndx == -1)
1095 return 0;
1096
1097 btf = obj->btf;
1098 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
1099 BTF_KIND_DATASEC);
1100 if (datasec_id < 0) {
1101 pr_warn("struct_ops init: DATASEC %s not found\n",
1102 STRUCT_OPS_SEC);
1103 return -EINVAL;
1104 }
1105
1106 datasec = btf__type_by_id(btf, datasec_id);
1107 vsi = btf_var_secinfos(datasec);
1108 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1109 type = btf__type_by_id(obj->btf, vsi->type);
1110 var_name = btf__name_by_offset(obj->btf, type->name_off);
1111
1112 type_id = btf__resolve_type(obj->btf, vsi->type);
1113 if (type_id < 0) {
1114 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1115 vsi->type, STRUCT_OPS_SEC);
1116 return -EINVAL;
1117 }
1118
1119 type = btf__type_by_id(obj->btf, type_id);
1120 tname = btf__name_by_offset(obj->btf, type->name_off);
1121 if (!tname[0]) {
1122 pr_warn("struct_ops init: anonymous type is not supported\n");
1123 return -ENOTSUP;
1124 }
1125 if (!btf_is_struct(type)) {
1126 pr_warn("struct_ops init: %s is not a struct\n", tname);
1127 return -EINVAL;
1128 }
1129
1130 map = bpf_object__add_map(obj);
1131 if (IS_ERR(map))
1132 return PTR_ERR(map);
1133
1134 map->sec_idx = obj->efile.st_ops_shndx;
1135 map->sec_offset = vsi->offset;
1136 map->name = strdup(var_name);
1137 if (!map->name)
1138 return -ENOMEM;
1139
1140 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1141 map->def.key_size = sizeof(int);
1142 map->def.value_size = type->size;
1143 map->def.max_entries = 1;
1144
1145 map->st_ops = calloc(1, sizeof(*map->st_ops));
1146 if (!map->st_ops)
1147 return -ENOMEM;
1148 st_ops = map->st_ops;
1149 st_ops->data = malloc(type->size);
1150 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1151 st_ops->kern_func_off = malloc(btf_vlen(type) *
1152 sizeof(*st_ops->kern_func_off));
1153 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1154 return -ENOMEM;
1155
1156 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1157 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1158 var_name, STRUCT_OPS_SEC);
1159 return -EINVAL;
1160 }
1161
1162 memcpy(st_ops->data,
1163 obj->efile.st_ops_data->d_buf + vsi->offset,
1164 type->size);
1165 st_ops->tname = tname;
1166 st_ops->type = type;
1167 st_ops->type_id = type_id;
1168
1169 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1170 tname, type_id, var_name, vsi->offset);
1171 }
1172
1173 return 0;
1174}
1175
1176static struct bpf_object *bpf_object__new(const char *path,
1177 const void *obj_buf,
1178 size_t obj_buf_sz,
1179 const char *obj_name)
1180{
1181 bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
1182 struct bpf_object *obj;
1183 char *end;
1184
1185 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1186 if (!obj) {
1187 pr_warn("alloc memory failed for %s\n", path);
1188 return ERR_PTR(-ENOMEM);
1189 }
1190
1191 strcpy(obj->path, path);
1192 if (obj_name) {
1193 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1194 } else {
1195
1196 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1197 end = strchr(obj->name, '.');
1198 if (end)
1199 *end = 0;
1200 }
1201
1202 obj->efile.fd = -1;
1203
1204
1205
1206
1207
1208
1209 obj->efile.obj_buf = obj_buf;
1210 obj->efile.obj_buf_sz = obj_buf_sz;
1211 obj->efile.maps_shndx = -1;
1212 obj->efile.btf_maps_shndx = -1;
1213 obj->efile.st_ops_shndx = -1;
1214 obj->kconfig_map_idx = -1;
1215
1216 obj->kern_version = get_kernel_version();
1217 obj->loaded = false;
1218
1219 INIT_LIST_HEAD(&obj->list);
1220 if (!strict)
1221 list_add(&obj->list, &bpf_objects_list);
1222 return obj;
1223}
1224
1225static void bpf_object__elf_finish(struct bpf_object *obj)
1226{
1227 if (!obj->efile.elf)
1228 return;
1229
1230 if (obj->efile.elf) {
1231 elf_end(obj->efile.elf);
1232 obj->efile.elf = NULL;
1233 }
1234 obj->efile.symbols = NULL;
1235 obj->efile.st_ops_data = NULL;
1236
1237 zfree(&obj->efile.secs);
1238 obj->efile.sec_cnt = 0;
1239 zclose(obj->efile.fd);
1240 obj->efile.obj_buf = NULL;
1241 obj->efile.obj_buf_sz = 0;
1242}
1243
1244static int bpf_object__elf_init(struct bpf_object *obj)
1245{
1246 Elf64_Ehdr *ehdr;
1247 int err = 0;
1248 Elf *elf;
1249
1250 if (obj->efile.elf) {
1251 pr_warn("elf: init internal error\n");
1252 return -LIBBPF_ERRNO__LIBELF;
1253 }
1254
1255 if (obj->efile.obj_buf_sz > 0) {
1256
1257
1258
1259
1260 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1261 } else {
1262 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1263 if (obj->efile.fd < 0) {
1264 char errmsg[STRERR_BUFSIZE], *cp;
1265
1266 err = -errno;
1267 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1268 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1269 return err;
1270 }
1271
1272 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1273 }
1274
1275 if (!elf) {
1276 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1277 err = -LIBBPF_ERRNO__LIBELF;
1278 goto errout;
1279 }
1280
1281 obj->efile.elf = elf;
1282
1283 if (elf_kind(elf) != ELF_K_ELF) {
1284 err = -LIBBPF_ERRNO__FORMAT;
1285 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1286 goto errout;
1287 }
1288
1289 if (gelf_getclass(elf) != ELFCLASS64) {
1290 err = -LIBBPF_ERRNO__FORMAT;
1291 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1292 goto errout;
1293 }
1294
1295 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1296 if (!obj->efile.ehdr) {
1297 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1298 err = -LIBBPF_ERRNO__FORMAT;
1299 goto errout;
1300 }
1301
1302 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1303 pr_warn("elf: failed to get section names section index for %s: %s\n",
1304 obj->path, elf_errmsg(-1));
1305 err = -LIBBPF_ERRNO__FORMAT;
1306 goto errout;
1307 }
1308
1309
1310 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1311 pr_warn("elf: failed to get section names strings from %s: %s\n",
1312 obj->path, elf_errmsg(-1));
1313 err = -LIBBPF_ERRNO__FORMAT;
1314 goto errout;
1315 }
1316
1317
1318 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1319 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1320 err = -LIBBPF_ERRNO__FORMAT;
1321 goto errout;
1322 }
1323
1324 return 0;
1325errout:
1326 bpf_object__elf_finish(obj);
1327 return err;
1328}
1329
1330static int bpf_object__check_endianness(struct bpf_object *obj)
1331{
1332#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1333 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1334 return 0;
1335#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1336 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1337 return 0;
1338#else
1339# error "Unrecognized __BYTE_ORDER__"
1340#endif
1341 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1342 return -LIBBPF_ERRNO__ENDIAN;
1343}
1344
1345static int
1346bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1347{
1348
1349
1350
1351 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1352 pr_debug("license of %s is %s\n", obj->path, obj->license);
1353 return 0;
1354}
1355
1356static int
1357bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1358{
1359 __u32 kver;
1360
1361 if (size != sizeof(kver)) {
1362 pr_warn("invalid kver section in %s\n", obj->path);
1363 return -LIBBPF_ERRNO__FORMAT;
1364 }
1365 memcpy(&kver, data, sizeof(kver));
1366 obj->kern_version = kver;
1367 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1368 return 0;
1369}
1370
1371static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1372{
1373 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1374 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1375 return true;
1376 return false;
1377}
1378
1379static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1380{
1381 int ret = -ENOENT;
1382 Elf_Data *data;
1383 Elf_Scn *scn;
1384
1385 *size = 0;
1386 if (!name)
1387 return -EINVAL;
1388
1389 scn = elf_sec_by_name(obj, name);
1390 data = elf_sec_data(obj, scn);
1391 if (data) {
1392 ret = 0;
1393 *size = data->d_size;
1394 }
1395
1396 return *size ? 0 : ret;
1397}
1398
1399static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off)
1400{
1401 Elf_Data *symbols = obj->efile.symbols;
1402 const char *sname;
1403 size_t si;
1404
1405 if (!name || !off)
1406 return -EINVAL;
1407
1408 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1409 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1410
1411 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL ||
1412 ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1413 continue;
1414
1415 sname = elf_sym_str(obj, sym->st_name);
1416 if (!sname) {
1417 pr_warn("failed to get sym name string for var %s\n", name);
1418 return -EIO;
1419 }
1420 if (strcmp(name, sname) == 0) {
1421 *off = sym->st_value;
1422 return 0;
1423 }
1424 }
1425
1426 return -ENOENT;
1427}
1428
1429static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1430{
1431 struct bpf_map *new_maps;
1432 size_t new_cap;
1433 int i;
1434
1435 if (obj->nr_maps < obj->maps_cap)
1436 return &obj->maps[obj->nr_maps++];
1437
1438 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1439 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1440 if (!new_maps) {
1441 pr_warn("alloc maps for object failed\n");
1442 return ERR_PTR(-ENOMEM);
1443 }
1444
1445 obj->maps_cap = new_cap;
1446 obj->maps = new_maps;
1447
1448
1449 memset(obj->maps + obj->nr_maps, 0,
1450 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1451
1452
1453
1454
1455 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1456 obj->maps[i].fd = -1;
1457 obj->maps[i].inner_map_fd = -1;
1458 }
1459
1460 return &obj->maps[obj->nr_maps++];
1461}
1462
1463static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1464{
1465 long page_sz = sysconf(_SC_PAGE_SIZE);
1466 size_t map_sz;
1467
1468 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1469 map_sz = roundup(map_sz, page_sz);
1470 return map_sz;
1471}
1472
1473static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1474{
1475 char map_name[BPF_OBJ_NAME_LEN], *p;
1476 int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511 if (sfx_len >= BPF_OBJ_NAME_LEN)
1512 sfx_len = BPF_OBJ_NAME_LEN - 1;
1513
1514
1515 if (strchr(real_name + 1, '.') != NULL)
1516 pfx_len = 0;
1517 else
1518 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1519
1520 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1521 sfx_len, real_name);
1522
1523
1524 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1525 if (!isalnum(*p) && *p != '_' && *p != '.')
1526 *p = '_';
1527
1528 return strdup(map_name);
1529}
1530
1531static int
1532bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1533 const char *real_name, int sec_idx, void *data, size_t data_sz)
1534{
1535 struct bpf_map_def *def;
1536 struct bpf_map *map;
1537 int err;
1538
1539 map = bpf_object__add_map(obj);
1540 if (IS_ERR(map))
1541 return PTR_ERR(map);
1542
1543 map->libbpf_type = type;
1544 map->sec_idx = sec_idx;
1545 map->sec_offset = 0;
1546 map->real_name = strdup(real_name);
1547 map->name = internal_map_name(obj, real_name);
1548 if (!map->real_name || !map->name) {
1549 zfree(&map->real_name);
1550 zfree(&map->name);
1551 return -ENOMEM;
1552 }
1553
1554 def = &map->def;
1555 def->type = BPF_MAP_TYPE_ARRAY;
1556 def->key_size = sizeof(int);
1557 def->value_size = data_sz;
1558 def->max_entries = 1;
1559 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1560 ? BPF_F_RDONLY_PROG : 0;
1561 def->map_flags |= BPF_F_MMAPABLE;
1562
1563 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1564 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1565
1566 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1567 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1568 if (map->mmaped == MAP_FAILED) {
1569 err = -errno;
1570 map->mmaped = NULL;
1571 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1572 map->name, err);
1573 zfree(&map->real_name);
1574 zfree(&map->name);
1575 return err;
1576 }
1577
1578 if (data)
1579 memcpy(map->mmaped, data, data_sz);
1580
1581 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1582 return 0;
1583}
1584
1585static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1586{
1587 struct elf_sec_desc *sec_desc;
1588 const char *sec_name;
1589 int err = 0, sec_idx;
1590
1591
1592
1593
1594 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1595 sec_desc = &obj->efile.secs[sec_idx];
1596
1597 switch (sec_desc->sec_type) {
1598 case SEC_DATA:
1599 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1600 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1601 sec_name, sec_idx,
1602 sec_desc->data->d_buf,
1603 sec_desc->data->d_size);
1604 break;
1605 case SEC_RODATA:
1606 obj->has_rodata = true;
1607 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1608 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1609 sec_name, sec_idx,
1610 sec_desc->data->d_buf,
1611 sec_desc->data->d_size);
1612 break;
1613 case SEC_BSS:
1614 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1615 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1616 sec_name, sec_idx,
1617 NULL,
1618 sec_desc->data->d_size);
1619 break;
1620 default:
1621
1622 break;
1623 }
1624 if (err)
1625 return err;
1626 }
1627 return 0;
1628}
1629
1630
1631static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1632 const void *name)
1633{
1634 int i;
1635
1636 for (i = 0; i < obj->nr_extern; i++) {
1637 if (strcmp(obj->externs[i].name, name) == 0)
1638 return &obj->externs[i];
1639 }
1640 return NULL;
1641}
1642
1643static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1644 char value)
1645{
1646 switch (ext->kcfg.type) {
1647 case KCFG_BOOL:
1648 if (value == 'm') {
1649 pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1650 ext->name, value);
1651 return -EINVAL;
1652 }
1653 *(bool *)ext_val = value == 'y' ? true : false;
1654 break;
1655 case KCFG_TRISTATE:
1656 if (value == 'y')
1657 *(enum libbpf_tristate *)ext_val = TRI_YES;
1658 else if (value == 'm')
1659 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1660 else
1661 *(enum libbpf_tristate *)ext_val = TRI_NO;
1662 break;
1663 case KCFG_CHAR:
1664 *(char *)ext_val = value;
1665 break;
1666 case KCFG_UNKNOWN:
1667 case KCFG_INT:
1668 case KCFG_CHAR_ARR:
1669 default:
1670 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1671 ext->name, value);
1672 return -EINVAL;
1673 }
1674 ext->is_set = true;
1675 return 0;
1676}
1677
1678static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1679 const char *value)
1680{
1681 size_t len;
1682
1683 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1684 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1685 return -EINVAL;
1686 }
1687
1688 len = strlen(value);
1689 if (value[len - 1] != '"') {
1690 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1691 ext->name, value);
1692 return -EINVAL;
1693 }
1694
1695
1696 len -= 2;
1697 if (len >= ext->kcfg.sz) {
1698 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1699 ext->name, value, len, ext->kcfg.sz - 1);
1700 len = ext->kcfg.sz - 1;
1701 }
1702 memcpy(ext_val, value + 1, len);
1703 ext_val[len] = '\0';
1704 ext->is_set = true;
1705 return 0;
1706}
1707
1708static int parse_u64(const char *value, __u64 *res)
1709{
1710 char *value_end;
1711 int err;
1712
1713 errno = 0;
1714 *res = strtoull(value, &value_end, 0);
1715 if (errno) {
1716 err = -errno;
1717 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1718 return err;
1719 }
1720 if (*value_end) {
1721 pr_warn("failed to parse '%s' as integer completely\n", value);
1722 return -EINVAL;
1723 }
1724 return 0;
1725}
1726
1727static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1728{
1729 int bit_sz = ext->kcfg.sz * 8;
1730
1731 if (ext->kcfg.sz == 8)
1732 return true;
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 if (ext->kcfg.is_signed)
1747 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1748 else
1749 return (v >> bit_sz) == 0;
1750}
1751
1752static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1753 __u64 value)
1754{
1755 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1756 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1757 ext->name, (unsigned long long)value);
1758 return -EINVAL;
1759 }
1760 if (!is_kcfg_value_in_range(ext, value)) {
1761 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1762 ext->name, (unsigned long long)value, ext->kcfg.sz);
1763 return -ERANGE;
1764 }
1765 switch (ext->kcfg.sz) {
1766 case 1: *(__u8 *)ext_val = value; break;
1767 case 2: *(__u16 *)ext_val = value; break;
1768 case 4: *(__u32 *)ext_val = value; break;
1769 case 8: *(__u64 *)ext_val = value; break;
1770 default:
1771 return -EINVAL;
1772 }
1773 ext->is_set = true;
1774 return 0;
1775}
1776
1777static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1778 char *buf, void *data)
1779{
1780 struct extern_desc *ext;
1781 char *sep, *value;
1782 int len, err = 0;
1783 void *ext_val;
1784 __u64 num;
1785
1786 if (!str_has_pfx(buf, "CONFIG_"))
1787 return 0;
1788
1789 sep = strchr(buf, '=');
1790 if (!sep) {
1791 pr_warn("failed to parse '%s': no separator\n", buf);
1792 return -EINVAL;
1793 }
1794
1795
1796 len = strlen(buf);
1797 if (buf[len - 1] == '\n')
1798 buf[len - 1] = '\0';
1799
1800 *sep = '\0';
1801 if (!sep[1]) {
1802 *sep = '=';
1803 pr_warn("failed to parse '%s': no value\n", buf);
1804 return -EINVAL;
1805 }
1806
1807 ext = find_extern_by_name(obj, buf);
1808 if (!ext || ext->is_set)
1809 return 0;
1810
1811 ext_val = data + ext->kcfg.data_off;
1812 value = sep + 1;
1813
1814 switch (*value) {
1815 case 'y': case 'n': case 'm':
1816 err = set_kcfg_value_tri(ext, ext_val, *value);
1817 break;
1818 case '"':
1819 err = set_kcfg_value_str(ext, ext_val, value);
1820 break;
1821 default:
1822
1823 err = parse_u64(value, &num);
1824 if (err) {
1825 pr_warn("extern (kcfg) %s=%s should be integer\n",
1826 ext->name, value);
1827 return err;
1828 }
1829 err = set_kcfg_value_num(ext, ext_val, num);
1830 break;
1831 }
1832 if (err)
1833 return err;
1834 pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1835 return 0;
1836}
1837
1838static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1839{
1840 char buf[PATH_MAX];
1841 struct utsname uts;
1842 int len, err = 0;
1843 gzFile file;
1844
1845 uname(&uts);
1846 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1847 if (len < 0)
1848 return -EINVAL;
1849 else if (len >= PATH_MAX)
1850 return -ENAMETOOLONG;
1851
1852
1853 file = gzopen(buf, "r");
1854 if (!file)
1855 file = gzopen("/proc/config.gz", "r");
1856
1857 if (!file) {
1858 pr_warn("failed to open system Kconfig\n");
1859 return -ENOENT;
1860 }
1861
1862 while (gzgets(file, buf, sizeof(buf))) {
1863 err = bpf_object__process_kconfig_line(obj, buf, data);
1864 if (err) {
1865 pr_warn("error parsing system Kconfig line '%s': %d\n",
1866 buf, err);
1867 goto out;
1868 }
1869 }
1870
1871out:
1872 gzclose(file);
1873 return err;
1874}
1875
1876static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1877 const char *config, void *data)
1878{
1879 char buf[PATH_MAX];
1880 int err = 0;
1881 FILE *file;
1882
1883 file = fmemopen((void *)config, strlen(config), "r");
1884 if (!file) {
1885 err = -errno;
1886 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1887 return err;
1888 }
1889
1890 while (fgets(buf, sizeof(buf), file)) {
1891 err = bpf_object__process_kconfig_line(obj, buf, data);
1892 if (err) {
1893 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1894 buf, err);
1895 break;
1896 }
1897 }
1898
1899 fclose(file);
1900 return err;
1901}
1902
1903static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1904{
1905 struct extern_desc *last_ext = NULL, *ext;
1906 size_t map_sz;
1907 int i, err;
1908
1909 for (i = 0; i < obj->nr_extern; i++) {
1910 ext = &obj->externs[i];
1911 if (ext->type == EXT_KCFG)
1912 last_ext = ext;
1913 }
1914
1915 if (!last_ext)
1916 return 0;
1917
1918 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1919 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1920 ".kconfig", obj->efile.symbols_shndx,
1921 NULL, map_sz);
1922 if (err)
1923 return err;
1924
1925 obj->kconfig_map_idx = obj->nr_maps - 1;
1926
1927 return 0;
1928}
1929
1930static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1931{
1932 Elf_Data *symbols = obj->efile.symbols;
1933 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1934 Elf_Data *data = NULL;
1935 Elf_Scn *scn;
1936
1937 if (obj->efile.maps_shndx < 0)
1938 return 0;
1939
1940 if (!symbols)
1941 return -EINVAL;
1942
1943 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1944 data = elf_sec_data(obj, scn);
1945 if (!scn || !data) {
1946 pr_warn("elf: failed to get legacy map definitions for %s\n",
1947 obj->path);
1948 return -EINVAL;
1949 }
1950
1951
1952
1953
1954
1955
1956
1957
1958 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
1959 for (i = 0; i < nr_syms; i++) {
1960 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
1961
1962 if (sym->st_shndx != obj->efile.maps_shndx)
1963 continue;
1964 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
1965 continue;
1966 nr_maps++;
1967 }
1968
1969 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1970 nr_maps, data->d_size, obj->path);
1971
1972 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1973 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1974 obj->path);
1975 return -EINVAL;
1976 }
1977 map_def_sz = data->d_size / nr_maps;
1978
1979
1980 for (i = 0; i < nr_syms; i++) {
1981 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
1982 const char *map_name;
1983 struct bpf_map_def *def;
1984 struct bpf_map *map;
1985
1986 if (sym->st_shndx != obj->efile.maps_shndx)
1987 continue;
1988 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
1989 continue;
1990
1991 map = bpf_object__add_map(obj);
1992 if (IS_ERR(map))
1993 return PTR_ERR(map);
1994
1995 map_name = elf_sym_str(obj, sym->st_name);
1996 if (!map_name) {
1997 pr_warn("failed to get map #%d name sym string for obj %s\n",
1998 i, obj->path);
1999 return -LIBBPF_ERRNO__FORMAT;
2000 }
2001
2002 if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
2003 pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
2004 return -ENOTSUP;
2005 }
2006
2007 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2008 map->sec_idx = sym->st_shndx;
2009 map->sec_offset = sym->st_value;
2010 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
2011 map_name, map->sec_idx, map->sec_offset);
2012 if (sym->st_value + map_def_sz > data->d_size) {
2013 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
2014 obj->path, map_name);
2015 return -EINVAL;
2016 }
2017
2018 map->name = strdup(map_name);
2019 if (!map->name) {
2020 pr_warn("map '%s': failed to alloc map name\n", map_name);
2021 return -ENOMEM;
2022 }
2023 pr_debug("map %d is \"%s\"\n", i, map->name);
2024 def = (struct bpf_map_def *)(data->d_buf + sym->st_value);
2025
2026
2027
2028
2029
2030
2031 if (map_def_sz <= sizeof(struct bpf_map_def)) {
2032 memcpy(&map->def, def, map_def_sz);
2033 } else {
2034
2035
2036
2037
2038
2039
2040 char *b;
2041
2042 for (b = ((char *)def) + sizeof(struct bpf_map_def);
2043 b < ((char *)def) + map_def_sz; b++) {
2044 if (*b != 0) {
2045 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
2046 obj->path, map_name);
2047 if (strict)
2048 return -EINVAL;
2049 }
2050 }
2051 memcpy(&map->def, def, sizeof(struct bpf_map_def));
2052 }
2053 }
2054 return 0;
2055}
2056
2057const struct btf_type *
2058skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2059{
2060 const struct btf_type *t = btf__type_by_id(btf, id);
2061
2062 if (res_id)
2063 *res_id = id;
2064
2065 while (btf_is_mod(t) || btf_is_typedef(t)) {
2066 if (res_id)
2067 *res_id = t->type;
2068 t = btf__type_by_id(btf, t->type);
2069 }
2070
2071 return t;
2072}
2073
2074static const struct btf_type *
2075resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2076{
2077 const struct btf_type *t;
2078
2079 t = skip_mods_and_typedefs(btf, id, NULL);
2080 if (!btf_is_ptr(t))
2081 return NULL;
2082
2083 t = skip_mods_and_typedefs(btf, t->type, res_id);
2084
2085 return btf_is_func_proto(t) ? t : NULL;
2086}
2087
2088static const char *__btf_kind_str(__u16 kind)
2089{
2090 switch (kind) {
2091 case BTF_KIND_UNKN: return "void";
2092 case BTF_KIND_INT: return "int";
2093 case BTF_KIND_PTR: return "ptr";
2094 case BTF_KIND_ARRAY: return "array";
2095 case BTF_KIND_STRUCT: return "struct";
2096 case BTF_KIND_UNION: return "union";
2097 case BTF_KIND_ENUM: return "enum";
2098 case BTF_KIND_FWD: return "fwd";
2099 case BTF_KIND_TYPEDEF: return "typedef";
2100 case BTF_KIND_VOLATILE: return "volatile";
2101 case BTF_KIND_CONST: return "const";
2102 case BTF_KIND_RESTRICT: return "restrict";
2103 case BTF_KIND_FUNC: return "func";
2104 case BTF_KIND_FUNC_PROTO: return "func_proto";
2105 case BTF_KIND_VAR: return "var";
2106 case BTF_KIND_DATASEC: return "datasec";
2107 case BTF_KIND_FLOAT: return "float";
2108 case BTF_KIND_DECL_TAG: return "decl_tag";
2109 case BTF_KIND_TYPE_TAG: return "type_tag";
2110 default: return "unknown";
2111 }
2112}
2113
2114const char *btf_kind_str(const struct btf_type *t)
2115{
2116 return __btf_kind_str(btf_kind(t));
2117}
2118
2119
2120
2121
2122
2123
2124
2125
2126static bool get_map_field_int(const char *map_name, const struct btf *btf,
2127 const struct btf_member *m, __u32 *res)
2128{
2129 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2130 const char *name = btf__name_by_offset(btf, m->name_off);
2131 const struct btf_array *arr_info;
2132 const struct btf_type *arr_t;
2133
2134 if (!btf_is_ptr(t)) {
2135 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2136 map_name, name, btf_kind_str(t));
2137 return false;
2138 }
2139
2140 arr_t = btf__type_by_id(btf, t->type);
2141 if (!arr_t) {
2142 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2143 map_name, name, t->type);
2144 return false;
2145 }
2146 if (!btf_is_array(arr_t)) {
2147 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2148 map_name, name, btf_kind_str(arr_t));
2149 return false;
2150 }
2151 arr_info = btf_array(arr_t);
2152 *res = arr_info->nelems;
2153 return true;
2154}
2155
2156static int build_map_pin_path(struct bpf_map *map, const char *path)
2157{
2158 char buf[PATH_MAX];
2159 int len;
2160
2161 if (!path)
2162 path = "/sys/fs/bpf";
2163
2164 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
2165 if (len < 0)
2166 return -EINVAL;
2167 else if (len >= PATH_MAX)
2168 return -ENAMETOOLONG;
2169
2170 return bpf_map__set_pin_path(map, buf);
2171}
2172
2173int parse_btf_map_def(const char *map_name, struct btf *btf,
2174 const struct btf_type *def_t, bool strict,
2175 struct btf_map_def *map_def, struct btf_map_def *inner_def)
2176{
2177 const struct btf_type *t;
2178 const struct btf_member *m;
2179 bool is_inner = inner_def == NULL;
2180 int vlen, i;
2181
2182 vlen = btf_vlen(def_t);
2183 m = btf_members(def_t);
2184 for (i = 0; i < vlen; i++, m++) {
2185 const char *name = btf__name_by_offset(btf, m->name_off);
2186
2187 if (!name) {
2188 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2189 return -EINVAL;
2190 }
2191 if (strcmp(name, "type") == 0) {
2192 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2193 return -EINVAL;
2194 map_def->parts |= MAP_DEF_MAP_TYPE;
2195 } else if (strcmp(name, "max_entries") == 0) {
2196 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2197 return -EINVAL;
2198 map_def->parts |= MAP_DEF_MAX_ENTRIES;
2199 } else if (strcmp(name, "map_flags") == 0) {
2200 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2201 return -EINVAL;
2202 map_def->parts |= MAP_DEF_MAP_FLAGS;
2203 } else if (strcmp(name, "numa_node") == 0) {
2204 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2205 return -EINVAL;
2206 map_def->parts |= MAP_DEF_NUMA_NODE;
2207 } else if (strcmp(name, "key_size") == 0) {
2208 __u32 sz;
2209
2210 if (!get_map_field_int(map_name, btf, m, &sz))
2211 return -EINVAL;
2212 if (map_def->key_size && map_def->key_size != sz) {
2213 pr_warn("map '%s': conflicting key size %u != %u.\n",
2214 map_name, map_def->key_size, sz);
2215 return -EINVAL;
2216 }
2217 map_def->key_size = sz;
2218 map_def->parts |= MAP_DEF_KEY_SIZE;
2219 } else if (strcmp(name, "key") == 0) {
2220 __s64 sz;
2221
2222 t = btf__type_by_id(btf, m->type);
2223 if (!t) {
2224 pr_warn("map '%s': key type [%d] not found.\n",
2225 map_name, m->type);
2226 return -EINVAL;
2227 }
2228 if (!btf_is_ptr(t)) {
2229 pr_warn("map '%s': key spec is not PTR: %s.\n",
2230 map_name, btf_kind_str(t));
2231 return -EINVAL;
2232 }
2233 sz = btf__resolve_size(btf, t->type);
2234 if (sz < 0) {
2235 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2236 map_name, t->type, (ssize_t)sz);
2237 return sz;
2238 }
2239 if (map_def->key_size && map_def->key_size != sz) {
2240 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2241 map_name, map_def->key_size, (ssize_t)sz);
2242 return -EINVAL;
2243 }
2244 map_def->key_size = sz;
2245 map_def->key_type_id = t->type;
2246 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2247 } else if (strcmp(name, "value_size") == 0) {
2248 __u32 sz;
2249
2250 if (!get_map_field_int(map_name, btf, m, &sz))
2251 return -EINVAL;
2252 if (map_def->value_size && map_def->value_size != sz) {
2253 pr_warn("map '%s': conflicting value size %u != %u.\n",
2254 map_name, map_def->value_size, sz);
2255 return -EINVAL;
2256 }
2257 map_def->value_size = sz;
2258 map_def->parts |= MAP_DEF_VALUE_SIZE;
2259 } else if (strcmp(name, "value") == 0) {
2260 __s64 sz;
2261
2262 t = btf__type_by_id(btf, m->type);
2263 if (!t) {
2264 pr_warn("map '%s': value type [%d] not found.\n",
2265 map_name, m->type);
2266 return -EINVAL;
2267 }
2268 if (!btf_is_ptr(t)) {
2269 pr_warn("map '%s': value spec is not PTR: %s.\n",
2270 map_name, btf_kind_str(t));
2271 return -EINVAL;
2272 }
2273 sz = btf__resolve_size(btf, t->type);
2274 if (sz < 0) {
2275 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2276 map_name, t->type, (ssize_t)sz);
2277 return sz;
2278 }
2279 if (map_def->value_size && map_def->value_size != sz) {
2280 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2281 map_name, map_def->value_size, (ssize_t)sz);
2282 return -EINVAL;
2283 }
2284 map_def->value_size = sz;
2285 map_def->value_type_id = t->type;
2286 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2287 }
2288 else if (strcmp(name, "values") == 0) {
2289 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2290 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2291 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2292 char inner_map_name[128];
2293 int err;
2294
2295 if (is_inner) {
2296 pr_warn("map '%s': multi-level inner maps not supported.\n",
2297 map_name);
2298 return -ENOTSUP;
2299 }
2300 if (i != vlen - 1) {
2301 pr_warn("map '%s': '%s' member should be last.\n",
2302 map_name, name);
2303 return -EINVAL;
2304 }
2305 if (!is_map_in_map && !is_prog_array) {
2306 pr_warn("map '%s': should be map-in-map or prog-array.\n",
2307 map_name);
2308 return -ENOTSUP;
2309 }
2310 if (map_def->value_size && map_def->value_size != 4) {
2311 pr_warn("map '%s': conflicting value size %u != 4.\n",
2312 map_name, map_def->value_size);
2313 return -EINVAL;
2314 }
2315 map_def->value_size = 4;
2316 t = btf__type_by_id(btf, m->type);
2317 if (!t) {
2318 pr_warn("map '%s': %s type [%d] not found.\n",
2319 map_name, desc, m->type);
2320 return -EINVAL;
2321 }
2322 if (!btf_is_array(t) || btf_array(t)->nelems) {
2323 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2324 map_name, desc);
2325 return -EINVAL;
2326 }
2327 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2328 if (!btf_is_ptr(t)) {
2329 pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2330 map_name, desc, btf_kind_str(t));
2331 return -EINVAL;
2332 }
2333 t = skip_mods_and_typedefs(btf, t->type, NULL);
2334 if (is_prog_array) {
2335 if (!btf_is_func_proto(t)) {
2336 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2337 map_name, btf_kind_str(t));
2338 return -EINVAL;
2339 }
2340 continue;
2341 }
2342 if (!btf_is_struct(t)) {
2343 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2344 map_name, btf_kind_str(t));
2345 return -EINVAL;
2346 }
2347
2348 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2349 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2350 if (err)
2351 return err;
2352
2353 map_def->parts |= MAP_DEF_INNER_MAP;
2354 } else if (strcmp(name, "pinning") == 0) {
2355 __u32 val;
2356
2357 if (is_inner) {
2358 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2359 return -EINVAL;
2360 }
2361 if (!get_map_field_int(map_name, btf, m, &val))
2362 return -EINVAL;
2363 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2364 pr_warn("map '%s': invalid pinning value %u.\n",
2365 map_name, val);
2366 return -EINVAL;
2367 }
2368 map_def->pinning = val;
2369 map_def->parts |= MAP_DEF_PINNING;
2370 } else if (strcmp(name, "map_extra") == 0) {
2371 __u32 map_extra;
2372
2373 if (!get_map_field_int(map_name, btf, m, &map_extra))
2374 return -EINVAL;
2375 map_def->map_extra = map_extra;
2376 map_def->parts |= MAP_DEF_MAP_EXTRA;
2377 } else {
2378 if (strict) {
2379 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2380 return -ENOTSUP;
2381 }
2382 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2383 }
2384 }
2385
2386 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2387 pr_warn("map '%s': map type isn't specified.\n", map_name);
2388 return -EINVAL;
2389 }
2390
2391 return 0;
2392}
2393
2394static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2395{
2396 map->def.type = def->map_type;
2397 map->def.key_size = def->key_size;
2398 map->def.value_size = def->value_size;
2399 map->def.max_entries = def->max_entries;
2400 map->def.map_flags = def->map_flags;
2401 map->map_extra = def->map_extra;
2402
2403 map->numa_node = def->numa_node;
2404 map->btf_key_type_id = def->key_type_id;
2405 map->btf_value_type_id = def->value_type_id;
2406
2407 if (def->parts & MAP_DEF_MAP_TYPE)
2408 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2409
2410 if (def->parts & MAP_DEF_KEY_TYPE)
2411 pr_debug("map '%s': found key [%u], sz = %u.\n",
2412 map->name, def->key_type_id, def->key_size);
2413 else if (def->parts & MAP_DEF_KEY_SIZE)
2414 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2415
2416 if (def->parts & MAP_DEF_VALUE_TYPE)
2417 pr_debug("map '%s': found value [%u], sz = %u.\n",
2418 map->name, def->value_type_id, def->value_size);
2419 else if (def->parts & MAP_DEF_VALUE_SIZE)
2420 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2421
2422 if (def->parts & MAP_DEF_MAX_ENTRIES)
2423 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2424 if (def->parts & MAP_DEF_MAP_FLAGS)
2425 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2426 if (def->parts & MAP_DEF_MAP_EXTRA)
2427 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2428 (unsigned long long)def->map_extra);
2429 if (def->parts & MAP_DEF_PINNING)
2430 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2431 if (def->parts & MAP_DEF_NUMA_NODE)
2432 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2433
2434 if (def->parts & MAP_DEF_INNER_MAP)
2435 pr_debug("map '%s': found inner map definition.\n", map->name);
2436}
2437
2438static const char *btf_var_linkage_str(__u32 linkage)
2439{
2440 switch (linkage) {
2441 case BTF_VAR_STATIC: return "static";
2442 case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2443 case BTF_VAR_GLOBAL_EXTERN: return "extern";
2444 default: return "unknown";
2445 }
2446}
2447
2448static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2449 const struct btf_type *sec,
2450 int var_idx, int sec_idx,
2451 const Elf_Data *data, bool strict,
2452 const char *pin_root_path)
2453{
2454 struct btf_map_def map_def = {}, inner_def = {};
2455 const struct btf_type *var, *def;
2456 const struct btf_var_secinfo *vi;
2457 const struct btf_var *var_extra;
2458 const char *map_name;
2459 struct bpf_map *map;
2460 int err;
2461
2462 vi = btf_var_secinfos(sec) + var_idx;
2463 var = btf__type_by_id(obj->btf, vi->type);
2464 var_extra = btf_var(var);
2465 map_name = btf__name_by_offset(obj->btf, var->name_off);
2466
2467 if (map_name == NULL || map_name[0] == '\0') {
2468 pr_warn("map #%d: empty name.\n", var_idx);
2469 return -EINVAL;
2470 }
2471 if ((__u64)vi->offset + vi->size > data->d_size) {
2472 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2473 return -EINVAL;
2474 }
2475 if (!btf_is_var(var)) {
2476 pr_warn("map '%s': unexpected var kind %s.\n",
2477 map_name, btf_kind_str(var));
2478 return -EINVAL;
2479 }
2480 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2481 pr_warn("map '%s': unsupported map linkage %s.\n",
2482 map_name, btf_var_linkage_str(var_extra->linkage));
2483 return -EOPNOTSUPP;
2484 }
2485
2486 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2487 if (!btf_is_struct(def)) {
2488 pr_warn("map '%s': unexpected def kind %s.\n",
2489 map_name, btf_kind_str(var));
2490 return -EINVAL;
2491 }
2492 if (def->size > vi->size) {
2493 pr_warn("map '%s': invalid def size.\n", map_name);
2494 return -EINVAL;
2495 }
2496
2497 map = bpf_object__add_map(obj);
2498 if (IS_ERR(map))
2499 return PTR_ERR(map);
2500 map->name = strdup(map_name);
2501 if (!map->name) {
2502 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2503 return -ENOMEM;
2504 }
2505 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2506 map->def.type = BPF_MAP_TYPE_UNSPEC;
2507 map->sec_idx = sec_idx;
2508 map->sec_offset = vi->offset;
2509 map->btf_var_idx = var_idx;
2510 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2511 map_name, map->sec_idx, map->sec_offset);
2512
2513 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2514 if (err)
2515 return err;
2516
2517 fill_map_from_def(map, &map_def);
2518
2519 if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2520 err = build_map_pin_path(map, pin_root_path);
2521 if (err) {
2522 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2523 return err;
2524 }
2525 }
2526
2527 if (map_def.parts & MAP_DEF_INNER_MAP) {
2528 map->inner_map = calloc(1, sizeof(*map->inner_map));
2529 if (!map->inner_map)
2530 return -ENOMEM;
2531 map->inner_map->fd = -1;
2532 map->inner_map->sec_idx = sec_idx;
2533 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2534 if (!map->inner_map->name)
2535 return -ENOMEM;
2536 sprintf(map->inner_map->name, "%s.inner", map_name);
2537
2538 fill_map_from_def(map->inner_map, &inner_def);
2539 }
2540
2541 return 0;
2542}
2543
2544static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2545 const char *pin_root_path)
2546{
2547 const struct btf_type *sec = NULL;
2548 int nr_types, i, vlen, err;
2549 const struct btf_type *t;
2550 const char *name;
2551 Elf_Data *data;
2552 Elf_Scn *scn;
2553
2554 if (obj->efile.btf_maps_shndx < 0)
2555 return 0;
2556
2557 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2558 data = elf_sec_data(obj, scn);
2559 if (!scn || !data) {
2560 pr_warn("elf: failed to get %s map definitions for %s\n",
2561 MAPS_ELF_SEC, obj->path);
2562 return -EINVAL;
2563 }
2564
2565 nr_types = btf__type_cnt(obj->btf);
2566 for (i = 1; i < nr_types; i++) {
2567 t = btf__type_by_id(obj->btf, i);
2568 if (!btf_is_datasec(t))
2569 continue;
2570 name = btf__name_by_offset(obj->btf, t->name_off);
2571 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2572 sec = t;
2573 obj->efile.btf_maps_sec_btf_id = i;
2574 break;
2575 }
2576 }
2577
2578 if (!sec) {
2579 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2580 return -ENOENT;
2581 }
2582
2583 vlen = btf_vlen(sec);
2584 for (i = 0; i < vlen; i++) {
2585 err = bpf_object__init_user_btf_map(obj, sec, i,
2586 obj->efile.btf_maps_shndx,
2587 data, strict,
2588 pin_root_path);
2589 if (err)
2590 return err;
2591 }
2592
2593 return 0;
2594}
2595
2596static int bpf_object__init_maps(struct bpf_object *obj,
2597 const struct bpf_object_open_opts *opts)
2598{
2599 const char *pin_root_path;
2600 bool strict;
2601 int err;
2602
2603 strict = !OPTS_GET(opts, relaxed_maps, false);
2604 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2605
2606 err = bpf_object__init_user_maps(obj, strict);
2607 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2608 err = err ?: bpf_object__init_global_data_maps(obj);
2609 err = err ?: bpf_object__init_kconfig_map(obj);
2610 err = err ?: bpf_object__init_struct_ops_maps(obj);
2611
2612 return err;
2613}
2614
2615static bool section_have_execinstr(struct bpf_object *obj, int idx)
2616{
2617 Elf64_Shdr *sh;
2618
2619 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2620 if (!sh)
2621 return false;
2622
2623 return sh->sh_flags & SHF_EXECINSTR;
2624}
2625
2626static bool btf_needs_sanitization(struct bpf_object *obj)
2627{
2628 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2629 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2630 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2631 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2632 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2633 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2634
2635 return !has_func || !has_datasec || !has_func_global || !has_float ||
2636 !has_decl_tag || !has_type_tag;
2637}
2638
2639static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2640{
2641 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2642 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2643 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2644 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2645 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2646 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2647 struct btf_type *t;
2648 int i, j, vlen;
2649
2650 for (i = 1; i < btf__type_cnt(btf); i++) {
2651 t = (struct btf_type *)btf__type_by_id(btf, i);
2652
2653 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
2654
2655 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2656
2657
2658
2659
2660
2661 t->size = 1;
2662 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2663 } else if (!has_datasec && btf_is_datasec(t)) {
2664
2665 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2666 struct btf_member *m = btf_members(t);
2667 struct btf_type *vt;
2668 char *name;
2669
2670 name = (char *)btf__name_by_offset(btf, t->name_off);
2671 while (*name) {
2672 if (*name == '.')
2673 *name = '_';
2674 name++;
2675 }
2676
2677 vlen = btf_vlen(t);
2678 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2679 for (j = 0; j < vlen; j++, v++, m++) {
2680
2681 m->offset = v->offset * 8;
2682 m->type = v->type;
2683
2684 vt = (void *)btf__type_by_id(btf, v->type);
2685 m->name_off = vt->name_off;
2686 }
2687 } else if (!has_func && btf_is_func_proto(t)) {
2688
2689 vlen = btf_vlen(t);
2690 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2691 t->size = sizeof(__u32);
2692 } else if (!has_func && btf_is_func(t)) {
2693
2694 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2695 } else if (!has_func_global && btf_is_func(t)) {
2696
2697 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2698 } else if (!has_float && btf_is_float(t)) {
2699
2700
2701
2702
2703 t->name_off = 0;
2704 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2705 } else if (!has_type_tag && btf_is_type_tag(t)) {
2706
2707 t->name_off = 0;
2708 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
2709 }
2710 }
2711}
2712
2713static bool libbpf_needs_btf(const struct bpf_object *obj)
2714{
2715 return obj->efile.btf_maps_shndx >= 0 ||
2716 obj->efile.st_ops_shndx >= 0 ||
2717 obj->nr_extern > 0;
2718}
2719
2720static bool kernel_needs_btf(const struct bpf_object *obj)
2721{
2722 return obj->efile.st_ops_shndx >= 0;
2723}
2724
2725static int bpf_object__init_btf(struct bpf_object *obj,
2726 Elf_Data *btf_data,
2727 Elf_Data *btf_ext_data)
2728{
2729 int err = -ENOENT;
2730
2731 if (btf_data) {
2732 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2733 err = libbpf_get_error(obj->btf);
2734 if (err) {
2735 obj->btf = NULL;
2736 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
2737 goto out;
2738 }
2739
2740 btf__set_pointer_size(obj->btf, 8);
2741 }
2742 if (btf_ext_data) {
2743 if (!obj->btf) {
2744 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2745 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2746 goto out;
2747 }
2748 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
2749 err = libbpf_get_error(obj->btf_ext);
2750 if (err) {
2751 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
2752 BTF_EXT_ELF_SEC, err);
2753 obj->btf_ext = NULL;
2754 goto out;
2755 }
2756 }
2757out:
2758 if (err && libbpf_needs_btf(obj)) {
2759 pr_warn("BTF is required, but is missing or corrupted.\n");
2760 return err;
2761 }
2762 return 0;
2763}
2764
2765static int compare_vsi_off(const void *_a, const void *_b)
2766{
2767 const struct btf_var_secinfo *a = _a;
2768 const struct btf_var_secinfo *b = _b;
2769
2770 return a->offset - b->offset;
2771}
2772
2773static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
2774 struct btf_type *t)
2775{
2776 __u32 size = 0, off = 0, i, vars = btf_vlen(t);
2777 const char *name = btf__name_by_offset(btf, t->name_off);
2778 const struct btf_type *t_var;
2779 struct btf_var_secinfo *vsi;
2780 const struct btf_var *var;
2781 int ret;
2782
2783 if (!name) {
2784 pr_debug("No name found in string section for DATASEC kind.\n");
2785 return -ENOENT;
2786 }
2787
2788
2789
2790
2791 if (t->size)
2792 goto sort_vars;
2793
2794 ret = find_elf_sec_sz(obj, name, &size);
2795 if (ret || !size || (t->size && t->size != size)) {
2796 pr_debug("Invalid size for section %s: %u bytes\n", name, size);
2797 return -ENOENT;
2798 }
2799
2800 t->size = size;
2801
2802 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
2803 t_var = btf__type_by_id(btf, vsi->type);
2804 if (!t_var || !btf_is_var(t_var)) {
2805 pr_debug("Non-VAR type seen in section %s\n", name);
2806 return -EINVAL;
2807 }
2808
2809 var = btf_var(t_var);
2810 if (var->linkage == BTF_VAR_STATIC)
2811 continue;
2812
2813 name = btf__name_by_offset(btf, t_var->name_off);
2814 if (!name) {
2815 pr_debug("No name found in string section for VAR kind\n");
2816 return -ENOENT;
2817 }
2818
2819 ret = find_elf_var_offset(obj, name, &off);
2820 if (ret) {
2821 pr_debug("No offset found in symbol table for VAR %s\n",
2822 name);
2823 return -ENOENT;
2824 }
2825
2826 vsi->offset = off;
2827 }
2828
2829sort_vars:
2830 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
2831 return 0;
2832}
2833
2834static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
2835{
2836 int err = 0;
2837 __u32 i, n = btf__type_cnt(btf);
2838
2839 for (i = 1; i < n; i++) {
2840 struct btf_type *t = btf_type_by_id(btf, i);
2841
2842
2843
2844
2845
2846
2847 if (btf_is_datasec(t)) {
2848 err = btf_fixup_datasec(obj, btf, t);
2849 if (err)
2850 break;
2851 }
2852 }
2853
2854 return libbpf_err(err);
2855}
2856
2857int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
2858{
2859 return btf_finalize_data(obj, btf);
2860}
2861
2862static int bpf_object__finalize_btf(struct bpf_object *obj)
2863{
2864 int err;
2865
2866 if (!obj->btf)
2867 return 0;
2868
2869 err = btf_finalize_data(obj, obj->btf);
2870 if (err) {
2871 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2872 return err;
2873 }
2874
2875 return 0;
2876}
2877
2878static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2879{
2880 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2881 prog->type == BPF_PROG_TYPE_LSM)
2882 return true;
2883
2884
2885
2886
2887 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2888 return true;
2889
2890 return false;
2891}
2892
2893static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2894{
2895 struct bpf_program *prog;
2896 int i;
2897
2898
2899
2900
2901 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
2902 return true;
2903
2904
2905 for (i = 0; i < obj->nr_extern; i++) {
2906 const struct extern_desc *ext;
2907
2908 ext = &obj->externs[i];
2909 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2910 return true;
2911 }
2912
2913 bpf_object__for_each_program(prog, obj) {
2914 if (!prog->load)
2915 continue;
2916 if (prog_needs_vmlinux_btf(prog))
2917 return true;
2918 }
2919
2920 return false;
2921}
2922
2923static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2924{
2925 int err;
2926
2927
2928 if (obj->btf_vmlinux || obj->gen_loader)
2929 return 0;
2930
2931 if (!force && !obj_needs_vmlinux_btf(obj))
2932 return 0;
2933
2934 obj->btf_vmlinux = btf__load_vmlinux_btf();
2935 err = libbpf_get_error(obj->btf_vmlinux);
2936 if (err) {
2937 pr_warn("Error loading vmlinux BTF: %d\n", err);
2938 obj->btf_vmlinux = NULL;
2939 return err;
2940 }
2941 return 0;
2942}
2943
2944static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2945{
2946 struct btf *kern_btf = obj->btf;
2947 bool btf_mandatory, sanitize;
2948 int i, err = 0;
2949
2950 if (!obj->btf)
2951 return 0;
2952
2953 if (!kernel_supports(obj, FEAT_BTF)) {
2954 if (kernel_needs_btf(obj)) {
2955 err = -EOPNOTSUPP;
2956 goto report;
2957 }
2958 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2959 return 0;
2960 }
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970 for (i = 0; i < obj->nr_programs; i++) {
2971 struct bpf_program *prog = &obj->programs[i];
2972 struct btf_type *t;
2973 const char *name;
2974 int j, n;
2975
2976 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
2977 continue;
2978
2979 n = btf__type_cnt(obj->btf);
2980 for (j = 1; j < n; j++) {
2981 t = btf_type_by_id(obj->btf, j);
2982 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
2983 continue;
2984
2985 name = btf__str_by_offset(obj->btf, t->name_off);
2986 if (strcmp(name, prog->name) != 0)
2987 continue;
2988
2989 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
2990 break;
2991 }
2992 }
2993
2994 sanitize = btf_needs_sanitization(obj);
2995 if (sanitize) {
2996 const void *raw_data;
2997 __u32 sz;
2998
2999
3000 raw_data = btf__raw_data(obj->btf, &sz);
3001 kern_btf = btf__new(raw_data, sz);
3002 err = libbpf_get_error(kern_btf);
3003 if (err)
3004 return err;
3005
3006
3007 btf__set_pointer_size(obj->btf, 8);
3008 bpf_object__sanitize_btf(obj, kern_btf);
3009 }
3010
3011 if (obj->gen_loader) {
3012 __u32 raw_size = 0;
3013 const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3014
3015 if (!raw_data)
3016 return -ENOMEM;
3017 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3018
3019
3020
3021 btf__set_fd(kern_btf, 0);
3022 } else {
3023
3024 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3025 obj->log_level ? 1 : 0);
3026 }
3027 if (sanitize) {
3028 if (!err) {
3029
3030 btf__set_fd(obj->btf, btf__fd(kern_btf));
3031 btf__set_fd(kern_btf, -1);
3032 }
3033 btf__free(kern_btf);
3034 }
3035report:
3036 if (err) {
3037 btf_mandatory = kernel_needs_btf(obj);
3038 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3039 btf_mandatory ? "BTF is mandatory, can't proceed."
3040 : "BTF is optional, ignoring.");
3041 if (!btf_mandatory)
3042 err = 0;
3043 }
3044 return err;
3045}
3046
3047static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3048{
3049 const char *name;
3050
3051 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3052 if (!name) {
3053 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3054 off, obj->path, elf_errmsg(-1));
3055 return NULL;
3056 }
3057
3058 return name;
3059}
3060
3061static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3062{
3063 const char *name;
3064
3065 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3066 if (!name) {
3067 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3068 off, obj->path, elf_errmsg(-1));
3069 return NULL;
3070 }
3071
3072 return name;
3073}
3074
3075static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3076{
3077 Elf_Scn *scn;
3078
3079 scn = elf_getscn(obj->efile.elf, idx);
3080 if (!scn) {
3081 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3082 idx, obj->path, elf_errmsg(-1));
3083 return NULL;
3084 }
3085 return scn;
3086}
3087
3088static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3089{
3090 Elf_Scn *scn = NULL;
3091 Elf *elf = obj->efile.elf;
3092 const char *sec_name;
3093
3094 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3095 sec_name = elf_sec_name(obj, scn);
3096 if (!sec_name)
3097 return NULL;
3098
3099 if (strcmp(sec_name, name) != 0)
3100 continue;
3101
3102 return scn;
3103 }
3104 return NULL;
3105}
3106
3107static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3108{
3109 Elf64_Shdr *shdr;
3110
3111 if (!scn)
3112 return NULL;
3113
3114 shdr = elf64_getshdr(scn);
3115 if (!shdr) {
3116 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3117 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3118 return NULL;
3119 }
3120
3121 return shdr;
3122}
3123
3124static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3125{
3126 const char *name;
3127 Elf64_Shdr *sh;
3128
3129 if (!scn)
3130 return NULL;
3131
3132 sh = elf_sec_hdr(obj, scn);
3133 if (!sh)
3134 return NULL;
3135
3136 name = elf_sec_str(obj, sh->sh_name);
3137 if (!name) {
3138 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3139 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3140 return NULL;
3141 }
3142
3143 return name;
3144}
3145
3146static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3147{
3148 Elf_Data *data;
3149
3150 if (!scn)
3151 return NULL;
3152
3153 data = elf_getdata(scn, 0);
3154 if (!data) {
3155 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3156 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3157 obj->path, elf_errmsg(-1));
3158 return NULL;
3159 }
3160
3161 return data;
3162}
3163
3164static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3165{
3166 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3167 return NULL;
3168
3169 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3170}
3171
3172static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3173{
3174 if (idx >= data->d_size / sizeof(Elf64_Rel))
3175 return NULL;
3176
3177 return (Elf64_Rel *)data->d_buf + idx;
3178}
3179
3180static bool is_sec_name_dwarf(const char *name)
3181{
3182
3183 return str_has_pfx(name, ".debug_");
3184}
3185
3186static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3187{
3188
3189 if (hdr->sh_type == SHT_STRTAB)
3190 return true;
3191
3192
3193 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3194 return true;
3195
3196
3197 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3198 strcmp(name, ".text") == 0)
3199 return true;
3200
3201
3202 if (is_sec_name_dwarf(name))
3203 return true;
3204
3205 if (str_has_pfx(name, ".rel")) {
3206 name += sizeof(".rel") - 1;
3207
3208 if (is_sec_name_dwarf(name))
3209 return true;
3210
3211
3212 if (strcmp(name, BTF_ELF_SEC) == 0 ||
3213 strcmp(name, BTF_EXT_ELF_SEC) == 0)
3214 return true;
3215 }
3216
3217 return false;
3218}
3219
3220static int cmp_progs(const void *_a, const void *_b)
3221{
3222 const struct bpf_program *a = _a;
3223 const struct bpf_program *b = _b;
3224
3225 if (a->sec_idx != b->sec_idx)
3226 return a->sec_idx < b->sec_idx ? -1 : 1;
3227
3228
3229 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3230}
3231
3232static int bpf_object__elf_collect(struct bpf_object *obj)
3233{
3234 struct elf_sec_desc *sec_desc;
3235 Elf *elf = obj->efile.elf;
3236 Elf_Data *btf_ext_data = NULL;
3237 Elf_Data *btf_data = NULL;
3238 int idx = 0, err = 0;
3239 const char *name;
3240 Elf_Data *data;
3241 Elf_Scn *scn;
3242 Elf64_Shdr *sh;
3243
3244
3245
3246
3247
3248 obj->efile.sec_cnt = obj->efile.ehdr->e_shnum;
3249 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3250 if (!obj->efile.secs)
3251 return -ENOMEM;
3252
3253
3254
3255
3256 scn = NULL;
3257 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3258 sh = elf_sec_hdr(obj, scn);
3259 if (!sh)
3260 return -LIBBPF_ERRNO__FORMAT;
3261
3262 if (sh->sh_type == SHT_SYMTAB) {
3263 if (obj->efile.symbols) {
3264 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3265 return -LIBBPF_ERRNO__FORMAT;
3266 }
3267
3268 data = elf_sec_data(obj, scn);
3269 if (!data)
3270 return -LIBBPF_ERRNO__FORMAT;
3271
3272 idx = elf_ndxscn(scn);
3273
3274 obj->efile.symbols = data;
3275 obj->efile.symbols_shndx = idx;
3276 obj->efile.strtabidx = sh->sh_link;
3277 }
3278 }
3279
3280 if (!obj->efile.symbols) {
3281 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3282 obj->path);
3283 return -ENOENT;
3284 }
3285
3286 scn = NULL;
3287 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3288 idx = elf_ndxscn(scn);
3289 sec_desc = &obj->efile.secs[idx];
3290
3291 sh = elf_sec_hdr(obj, scn);
3292 if (!sh)
3293 return -LIBBPF_ERRNO__FORMAT;
3294
3295 name = elf_sec_str(obj, sh->sh_name);
3296 if (!name)
3297 return -LIBBPF_ERRNO__FORMAT;
3298
3299 if (ignore_elf_section(sh, name))
3300 continue;
3301
3302 data = elf_sec_data(obj, scn);
3303 if (!data)
3304 return -LIBBPF_ERRNO__FORMAT;
3305
3306 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3307 idx, name, (unsigned long)data->d_size,
3308 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3309 (int)sh->sh_type);
3310
3311 if (strcmp(name, "license") == 0) {
3312 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3313 if (err)
3314 return err;
3315 } else if (strcmp(name, "version") == 0) {
3316 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3317 if (err)
3318 return err;
3319 } else if (strcmp(name, "maps") == 0) {
3320 obj->efile.maps_shndx = idx;
3321 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3322 obj->efile.btf_maps_shndx = idx;
3323 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3324 if (sh->sh_type != SHT_PROGBITS)
3325 return -LIBBPF_ERRNO__FORMAT;
3326 btf_data = data;
3327 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3328 if (sh->sh_type != SHT_PROGBITS)
3329 return -LIBBPF_ERRNO__FORMAT;
3330 btf_ext_data = data;
3331 } else if (sh->sh_type == SHT_SYMTAB) {
3332
3333 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3334 if (sh->sh_flags & SHF_EXECINSTR) {
3335 if (strcmp(name, ".text") == 0)
3336 obj->efile.text_shndx = idx;
3337 err = bpf_object__add_programs(obj, data, name, idx);
3338 if (err)
3339 return err;
3340 } else if (strcmp(name, DATA_SEC) == 0 ||
3341 str_has_pfx(name, DATA_SEC ".")) {
3342 sec_desc->sec_type = SEC_DATA;
3343 sec_desc->shdr = sh;
3344 sec_desc->data = data;
3345 } else if (strcmp(name, RODATA_SEC) == 0 ||
3346 str_has_pfx(name, RODATA_SEC ".")) {
3347 sec_desc->sec_type = SEC_RODATA;
3348 sec_desc->shdr = sh;
3349 sec_desc->data = data;
3350 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
3351 obj->efile.st_ops_data = data;
3352 obj->efile.st_ops_shndx = idx;
3353 } else {
3354 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3355 idx, name);
3356 }
3357 } else if (sh->sh_type == SHT_REL) {
3358 int targ_sec_idx = sh->sh_info;
3359
3360 if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3361 targ_sec_idx >= obj->efile.sec_cnt)
3362 return -LIBBPF_ERRNO__FORMAT;
3363
3364
3365 if (!section_have_execinstr(obj, targ_sec_idx) &&
3366 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3367 strcmp(name, ".rel" MAPS_ELF_SEC)) {
3368 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3369 idx, name, targ_sec_idx,
3370 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3371 continue;
3372 }
3373
3374 sec_desc->sec_type = SEC_RELO;
3375 sec_desc->shdr = sh;
3376 sec_desc->data = data;
3377 } else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
3378 sec_desc->sec_type = SEC_BSS;
3379 sec_desc->shdr = sh;
3380 sec_desc->data = data;
3381 } else {
3382 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3383 (size_t)sh->sh_size);
3384 }
3385 }
3386
3387 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3388 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3389 return -LIBBPF_ERRNO__FORMAT;
3390 }
3391
3392
3393
3394 if (obj->nr_programs)
3395 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3396
3397 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3398}
3399
3400static bool sym_is_extern(const Elf64_Sym *sym)
3401{
3402 int bind = ELF64_ST_BIND(sym->st_info);
3403
3404 return sym->st_shndx == SHN_UNDEF &&
3405 (bind == STB_GLOBAL || bind == STB_WEAK) &&
3406 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3407}
3408
3409static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3410{
3411 int bind = ELF64_ST_BIND(sym->st_info);
3412 int type = ELF64_ST_TYPE(sym->st_info);
3413
3414
3415 if (sym->st_shndx != text_shndx)
3416 return false;
3417
3418
3419 if (bind == STB_LOCAL && type == STT_SECTION)
3420 return true;
3421
3422
3423 return bind == STB_GLOBAL && type == STT_FUNC;
3424}
3425
3426static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3427{
3428 const struct btf_type *t;
3429 const char *tname;
3430 int i, n;
3431
3432 if (!btf)
3433 return -ESRCH;
3434
3435 n = btf__type_cnt(btf);
3436 for (i = 1; i < n; i++) {
3437 t = btf__type_by_id(btf, i);
3438
3439 if (!btf_is_var(t) && !btf_is_func(t))
3440 continue;
3441
3442 tname = btf__name_by_offset(btf, t->name_off);
3443 if (strcmp(tname, ext_name))
3444 continue;
3445
3446 if (btf_is_var(t) &&
3447 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3448 return -EINVAL;
3449
3450 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3451 return -EINVAL;
3452
3453 return i;
3454 }
3455
3456 return -ENOENT;
3457}
3458
3459static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3460 const struct btf_var_secinfo *vs;
3461 const struct btf_type *t;
3462 int i, j, n;
3463
3464 if (!btf)
3465 return -ESRCH;
3466
3467 n = btf__type_cnt(btf);
3468 for (i = 1; i < n; i++) {
3469 t = btf__type_by_id(btf, i);
3470
3471 if (!btf_is_datasec(t))
3472 continue;
3473
3474 vs = btf_var_secinfos(t);
3475 for (j = 0; j < btf_vlen(t); j++, vs++) {
3476 if (vs->type == ext_btf_id)
3477 return i;
3478 }
3479 }
3480
3481 return -ENOENT;
3482}
3483
3484static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3485 bool *is_signed)
3486{
3487 const struct btf_type *t;
3488 const char *name;
3489
3490 t = skip_mods_and_typedefs(btf, id, NULL);
3491 name = btf__name_by_offset(btf, t->name_off);
3492
3493 if (is_signed)
3494 *is_signed = false;
3495 switch (btf_kind(t)) {
3496 case BTF_KIND_INT: {
3497 int enc = btf_int_encoding(t);
3498
3499 if (enc & BTF_INT_BOOL)
3500 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3501 if (is_signed)
3502 *is_signed = enc & BTF_INT_SIGNED;
3503 if (t->size == 1)
3504 return KCFG_CHAR;
3505 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3506 return KCFG_UNKNOWN;
3507 return KCFG_INT;
3508 }
3509 case BTF_KIND_ENUM:
3510 if (t->size != 4)
3511 return KCFG_UNKNOWN;
3512 if (strcmp(name, "libbpf_tristate"))
3513 return KCFG_UNKNOWN;
3514 return KCFG_TRISTATE;
3515 case BTF_KIND_ARRAY:
3516 if (btf_array(t)->nelems == 0)
3517 return KCFG_UNKNOWN;
3518 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3519 return KCFG_UNKNOWN;
3520 return KCFG_CHAR_ARR;
3521 default:
3522 return KCFG_UNKNOWN;
3523 }
3524}
3525
3526static int cmp_externs(const void *_a, const void *_b)
3527{
3528 const struct extern_desc *a = _a;
3529 const struct extern_desc *b = _b;
3530
3531 if (a->type != b->type)
3532 return a->type < b->type ? -1 : 1;
3533
3534 if (a->type == EXT_KCFG) {
3535
3536 if (a->kcfg.align != b->kcfg.align)
3537 return a->kcfg.align > b->kcfg.align ? -1 : 1;
3538
3539 if (a->kcfg.sz != b->kcfg.sz)
3540 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3541 }
3542
3543
3544 return strcmp(a->name, b->name);
3545}
3546
3547static int find_int_btf_id(const struct btf *btf)
3548{
3549 const struct btf_type *t;
3550 int i, n;
3551
3552 n = btf__type_cnt(btf);
3553 for (i = 1; i < n; i++) {
3554 t = btf__type_by_id(btf, i);
3555
3556 if (btf_is_int(t) && btf_int_bits(t) == 32)
3557 return i;
3558 }
3559
3560 return 0;
3561}
3562
3563static int add_dummy_ksym_var(struct btf *btf)
3564{
3565 int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
3566 const struct btf_var_secinfo *vs;
3567 const struct btf_type *sec;
3568
3569 if (!btf)
3570 return 0;
3571
3572 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
3573 BTF_KIND_DATASEC);
3574 if (sec_btf_id < 0)
3575 return 0;
3576
3577 sec = btf__type_by_id(btf, sec_btf_id);
3578 vs = btf_var_secinfos(sec);
3579 for (i = 0; i < btf_vlen(sec); i++, vs++) {
3580 const struct btf_type *vt;
3581
3582 vt = btf__type_by_id(btf, vs->type);
3583 if (btf_is_func(vt))
3584 break;
3585 }
3586
3587
3588 if (i == btf_vlen(sec))
3589 return 0;
3590
3591 int_btf_id = find_int_btf_id(btf);
3592 dummy_var_btf_id = btf__add_var(btf,
3593 "dummy_ksym",
3594 BTF_VAR_GLOBAL_ALLOCATED,
3595 int_btf_id);
3596 if (dummy_var_btf_id < 0)
3597 pr_warn("cannot create a dummy_ksym var\n");
3598
3599 return dummy_var_btf_id;
3600}
3601
3602static int bpf_object__collect_externs(struct bpf_object *obj)
3603{
3604 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3605 const struct btf_type *t;
3606 struct extern_desc *ext;
3607 int i, n, off, dummy_var_btf_id;
3608 const char *ext_name, *sec_name;
3609 Elf_Scn *scn;
3610 Elf64_Shdr *sh;
3611
3612 if (!obj->efile.symbols)
3613 return 0;
3614
3615 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3616 sh = elf_sec_hdr(obj, scn);
3617 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
3618 return -LIBBPF_ERRNO__FORMAT;
3619
3620 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
3621 if (dummy_var_btf_id < 0)
3622 return dummy_var_btf_id;
3623
3624 n = sh->sh_size / sh->sh_entsize;
3625 pr_debug("looking for externs among %d symbols...\n", n);
3626
3627 for (i = 0; i < n; i++) {
3628 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
3629
3630 if (!sym)
3631 return -LIBBPF_ERRNO__FORMAT;
3632 if (!sym_is_extern(sym))
3633 continue;
3634 ext_name = elf_sym_str(obj, sym->st_name);
3635 if (!ext_name || !ext_name[0])
3636 continue;
3637
3638 ext = obj->externs;
3639 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3640 if (!ext)
3641 return -ENOMEM;
3642 obj->externs = ext;
3643 ext = &ext[obj->nr_extern];
3644 memset(ext, 0, sizeof(*ext));
3645 obj->nr_extern++;
3646
3647 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3648 if (ext->btf_id <= 0) {
3649 pr_warn("failed to find BTF for extern '%s': %d\n",
3650 ext_name, ext->btf_id);
3651 return ext->btf_id;
3652 }
3653 t = btf__type_by_id(obj->btf, ext->btf_id);
3654 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3655 ext->sym_idx = i;
3656 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
3657
3658 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3659 if (ext->sec_btf_id <= 0) {
3660 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3661 ext_name, ext->btf_id, ext->sec_btf_id);
3662 return ext->sec_btf_id;
3663 }
3664 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3665 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3666
3667 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3668 if (btf_is_func(t)) {
3669 pr_warn("extern function %s is unsupported under %s section\n",
3670 ext->name, KCONFIG_SEC);
3671 return -ENOTSUP;
3672 }
3673 kcfg_sec = sec;
3674 ext->type = EXT_KCFG;
3675 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3676 if (ext->kcfg.sz <= 0) {
3677 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3678 ext_name, ext->kcfg.sz);
3679 return ext->kcfg.sz;
3680 }
3681 ext->kcfg.align = btf__align_of(obj->btf, t->type);
3682 if (ext->kcfg.align <= 0) {
3683 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3684 ext_name, ext->kcfg.align);
3685 return -EINVAL;
3686 }
3687 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3688 &ext->kcfg.is_signed);
3689 if (ext->kcfg.type == KCFG_UNKNOWN) {
3690 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3691 return -ENOTSUP;
3692 }
3693 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3694 ksym_sec = sec;
3695 ext->type = EXT_KSYM;
3696 skip_mods_and_typedefs(obj->btf, t->type,
3697 &ext->ksym.type_id);
3698 } else {
3699 pr_warn("unrecognized extern section '%s'\n", sec_name);
3700 return -ENOTSUP;
3701 }
3702 }
3703 pr_debug("collected %d externs total\n", obj->nr_extern);
3704
3705 if (!obj->nr_extern)
3706 return 0;
3707
3708
3709 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3710
3711
3712
3713
3714
3715 if (ksym_sec) {
3716
3717
3718
3719 int int_btf_id = find_int_btf_id(obj->btf);
3720
3721
3722
3723
3724
3725 const struct btf_type *dummy_var;
3726
3727 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
3728 for (i = 0; i < obj->nr_extern; i++) {
3729 ext = &obj->externs[i];
3730 if (ext->type != EXT_KSYM)
3731 continue;
3732 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3733 i, ext->sym_idx, ext->name);
3734 }
3735
3736 sec = ksym_sec;
3737 n = btf_vlen(sec);
3738 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3739 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3740 struct btf_type *vt;
3741
3742 vt = (void *)btf__type_by_id(obj->btf, vs->type);
3743 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3744 ext = find_extern_by_name(obj, ext_name);
3745 if (!ext) {
3746 pr_warn("failed to find extern definition for BTF %s '%s'\n",
3747 btf_kind_str(vt), ext_name);
3748 return -ESRCH;
3749 }
3750 if (btf_is_func(vt)) {
3751 const struct btf_type *func_proto;
3752 struct btf_param *param;
3753 int j;
3754
3755 func_proto = btf__type_by_id(obj->btf,
3756 vt->type);
3757 param = btf_params(func_proto);
3758
3759
3760
3761 for (j = 0; j < btf_vlen(func_proto); j++)
3762 if (param[j].type && !param[j].name_off)
3763 param[j].name_off =
3764 dummy_var->name_off;
3765 vs->type = dummy_var_btf_id;
3766 vt->info &= ~0xffff;
3767 vt->info |= BTF_FUNC_GLOBAL;
3768 } else {
3769 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3770 vt->type = int_btf_id;
3771 }
3772 vs->offset = off;
3773 vs->size = sizeof(int);
3774 }
3775 sec->size = off;
3776 }
3777
3778 if (kcfg_sec) {
3779 sec = kcfg_sec;
3780
3781 off = 0;
3782 for (i = 0; i < obj->nr_extern; i++) {
3783 ext = &obj->externs[i];
3784 if (ext->type != EXT_KCFG)
3785 continue;
3786
3787 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3788 off = ext->kcfg.data_off + ext->kcfg.sz;
3789 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3790 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3791 }
3792 sec->size = off;
3793 n = btf_vlen(sec);
3794 for (i = 0; i < n; i++) {
3795 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3796
3797 t = btf__type_by_id(obj->btf, vs->type);
3798 ext_name = btf__name_by_offset(obj->btf, t->name_off);
3799 ext = find_extern_by_name(obj, ext_name);
3800 if (!ext) {
3801 pr_warn("failed to find extern definition for BTF var '%s'\n",
3802 ext_name);
3803 return -ESRCH;
3804 }
3805 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3806 vs->offset = ext->kcfg.data_off;
3807 }
3808 }
3809 return 0;
3810}
3811
3812struct bpf_program *
3813bpf_object__find_program_by_title(const struct bpf_object *obj,
3814 const char *title)
3815{
3816 struct bpf_program *pos;
3817
3818 bpf_object__for_each_program(pos, obj) {
3819 if (pos->sec_name && !strcmp(pos->sec_name, title))
3820 return pos;
3821 }
3822 return errno = ENOENT, NULL;
3823}
3824
3825static bool prog_is_subprog(const struct bpf_object *obj,
3826 const struct bpf_program *prog)
3827{
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3841}
3842
3843struct bpf_program *
3844bpf_object__find_program_by_name(const struct bpf_object *obj,
3845 const char *name)
3846{
3847 struct bpf_program *prog;
3848
3849 bpf_object__for_each_program(prog, obj) {
3850 if (prog_is_subprog(obj, prog))
3851 continue;
3852 if (!strcmp(prog->name, name))
3853 return prog;
3854 }
3855 return errno = ENOENT, NULL;
3856}
3857
3858static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3859 int shndx)
3860{
3861 switch (obj->efile.secs[shndx].sec_type) {
3862 case SEC_BSS:
3863 case SEC_DATA:
3864 case SEC_RODATA:
3865 return true;
3866 default:
3867 return false;
3868 }
3869}
3870
3871static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3872 int shndx)
3873{
3874 return shndx == obj->efile.maps_shndx ||
3875 shndx == obj->efile.btf_maps_shndx;
3876}
3877
3878static enum libbpf_map_type
3879bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3880{
3881 if (shndx == obj->efile.symbols_shndx)
3882 return LIBBPF_MAP_KCONFIG;
3883
3884 switch (obj->efile.secs[shndx].sec_type) {
3885 case SEC_BSS:
3886 return LIBBPF_MAP_BSS;
3887 case SEC_DATA:
3888 return LIBBPF_MAP_DATA;
3889 case SEC_RODATA:
3890 return LIBBPF_MAP_RODATA;
3891 default:
3892 return LIBBPF_MAP_UNSPEC;
3893 }
3894}
3895
3896static int bpf_program__record_reloc(struct bpf_program *prog,
3897 struct reloc_desc *reloc_desc,
3898 __u32 insn_idx, const char *sym_name,
3899 const Elf64_Sym *sym, const Elf64_Rel *rel)
3900{
3901 struct bpf_insn *insn = &prog->insns[insn_idx];
3902 size_t map_idx, nr_maps = prog->obj->nr_maps;
3903 struct bpf_object *obj = prog->obj;
3904 __u32 shdr_idx = sym->st_shndx;
3905 enum libbpf_map_type type;
3906 const char *sym_sec_name;
3907 struct bpf_map *map;
3908
3909 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
3910 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3911 prog->name, sym_name, insn_idx, insn->code);
3912 return -LIBBPF_ERRNO__RELOC;
3913 }
3914
3915 if (sym_is_extern(sym)) {
3916 int sym_idx = ELF64_R_SYM(rel->r_info);
3917 int i, n = obj->nr_extern;
3918 struct extern_desc *ext;
3919
3920 for (i = 0; i < n; i++) {
3921 ext = &obj->externs[i];
3922 if (ext->sym_idx == sym_idx)
3923 break;
3924 }
3925 if (i >= n) {
3926 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3927 prog->name, sym_name, sym_idx);
3928 return -LIBBPF_ERRNO__RELOC;
3929 }
3930 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3931 prog->name, i, ext->name, ext->sym_idx, insn_idx);
3932 if (insn->code == (BPF_JMP | BPF_CALL))
3933 reloc_desc->type = RELO_EXTERN_FUNC;
3934 else
3935 reloc_desc->type = RELO_EXTERN_VAR;
3936 reloc_desc->insn_idx = insn_idx;
3937 reloc_desc->sym_off = i;
3938 return 0;
3939 }
3940
3941
3942 if (is_call_insn(insn)) {
3943 if (insn->src_reg != BPF_PSEUDO_CALL) {
3944 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3945 return -LIBBPF_ERRNO__RELOC;
3946 }
3947
3948 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3949 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3950 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3951 prog->name, sym_name, sym_sec_name);
3952 return -LIBBPF_ERRNO__RELOC;
3953 }
3954 if (sym->st_value % BPF_INSN_SZ) {
3955 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3956 prog->name, sym_name, (size_t)sym->st_value);
3957 return -LIBBPF_ERRNO__RELOC;
3958 }
3959 reloc_desc->type = RELO_CALL;
3960 reloc_desc->insn_idx = insn_idx;
3961 reloc_desc->sym_off = sym->st_value;
3962 return 0;
3963 }
3964
3965 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3966 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3967 prog->name, sym_name, shdr_idx);
3968 return -LIBBPF_ERRNO__RELOC;
3969 }
3970
3971
3972 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
3973
3974
3975
3976 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
3977 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
3978 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
3979 return -LIBBPF_ERRNO__RELOC;
3980 }
3981
3982 reloc_desc->type = RELO_SUBPROG_ADDR;
3983 reloc_desc->insn_idx = insn_idx;
3984 reloc_desc->sym_off = sym->st_value;
3985 return 0;
3986 }
3987
3988 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3989 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3990
3991
3992 if (type == LIBBPF_MAP_UNSPEC) {
3993 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3994 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3995 prog->name, sym_name, sym_sec_name);
3996 return -LIBBPF_ERRNO__RELOC;
3997 }
3998 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3999 map = &obj->maps[map_idx];
4000 if (map->libbpf_type != type ||
4001 map->sec_idx != sym->st_shndx ||
4002 map->sec_offset != sym->st_value)
4003 continue;
4004 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4005 prog->name, map_idx, map->name, map->sec_idx,
4006 map->sec_offset, insn_idx);
4007 break;
4008 }
4009 if (map_idx >= nr_maps) {
4010 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4011 prog->name, sym_sec_name, (size_t)sym->st_value);
4012 return -LIBBPF_ERRNO__RELOC;
4013 }
4014 reloc_desc->type = RELO_LD64;
4015 reloc_desc->insn_idx = insn_idx;
4016 reloc_desc->map_idx = map_idx;
4017 reloc_desc->sym_off = 0;
4018 return 0;
4019 }
4020
4021
4022 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4023 pr_warn("prog '%s': bad data relo against section '%s'\n",
4024 prog->name, sym_sec_name);
4025 return -LIBBPF_ERRNO__RELOC;
4026 }
4027 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4028 map = &obj->maps[map_idx];
4029 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4030 continue;
4031 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4032 prog->name, map_idx, map->name, map->sec_idx,
4033 map->sec_offset, insn_idx);
4034 break;
4035 }
4036 if (map_idx >= nr_maps) {
4037 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4038 prog->name, sym_sec_name);
4039 return -LIBBPF_ERRNO__RELOC;
4040 }
4041
4042 reloc_desc->type = RELO_DATA;
4043 reloc_desc->insn_idx = insn_idx;
4044 reloc_desc->map_idx = map_idx;
4045 reloc_desc->sym_off = sym->st_value;
4046 return 0;
4047}
4048
4049static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4050{
4051 return insn_idx >= prog->sec_insn_off &&
4052 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4053}
4054
4055static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4056 size_t sec_idx, size_t insn_idx)
4057{
4058 int l = 0, r = obj->nr_programs - 1, m;
4059 struct bpf_program *prog;
4060
4061 while (l < r) {
4062 m = l + (r - l + 1) / 2;
4063 prog = &obj->programs[m];
4064
4065 if (prog->sec_idx < sec_idx ||
4066 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4067 l = m;
4068 else
4069 r = m - 1;
4070 }
4071
4072
4073
4074 prog = &obj->programs[l];
4075 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4076 return prog;
4077 return NULL;
4078}
4079
4080static int
4081bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4082{
4083 const char *relo_sec_name, *sec_name;
4084 size_t sec_idx = shdr->sh_info, sym_idx;
4085 struct bpf_program *prog;
4086 struct reloc_desc *relos;
4087 int err, i, nrels;
4088 const char *sym_name;
4089 __u32 insn_idx;
4090 Elf_Scn *scn;
4091 Elf_Data *scn_data;
4092 Elf64_Sym *sym;
4093 Elf64_Rel *rel;
4094
4095 if (sec_idx >= obj->efile.sec_cnt)
4096 return -EINVAL;
4097
4098 scn = elf_sec_by_idx(obj, sec_idx);
4099 scn_data = elf_sec_data(obj, scn);
4100
4101 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4102 sec_name = elf_sec_name(obj, scn);
4103 if (!relo_sec_name || !sec_name)
4104 return -EINVAL;
4105
4106 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4107 relo_sec_name, sec_idx, sec_name);
4108 nrels = shdr->sh_size / shdr->sh_entsize;
4109
4110 for (i = 0; i < nrels; i++) {
4111 rel = elf_rel_by_idx(data, i);
4112 if (!rel) {
4113 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4114 return -LIBBPF_ERRNO__FORMAT;
4115 }
4116
4117 sym_idx = ELF64_R_SYM(rel->r_info);
4118 sym = elf_sym_by_idx(obj, sym_idx);
4119 if (!sym) {
4120 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4121 relo_sec_name, sym_idx, i);
4122 return -LIBBPF_ERRNO__FORMAT;
4123 }
4124
4125 if (sym->st_shndx >= obj->efile.sec_cnt) {
4126 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4127 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4128 return -LIBBPF_ERRNO__FORMAT;
4129 }
4130
4131 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4132 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4133 relo_sec_name, (size_t)rel->r_offset, i);
4134 return -LIBBPF_ERRNO__FORMAT;
4135 }
4136
4137 insn_idx = rel->r_offset / BPF_INSN_SZ;
4138
4139
4140
4141
4142
4143
4144 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4145 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4146 else
4147 sym_name = elf_sym_str(obj, sym->st_name);
4148 sym_name = sym_name ?: "<?";
4149
4150 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4151 relo_sec_name, i, insn_idx, sym_name);
4152
4153 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4154 if (!prog) {
4155 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4156 relo_sec_name, i, sec_name, insn_idx);
4157 continue;
4158 }
4159
4160 relos = libbpf_reallocarray(prog->reloc_desc,
4161 prog->nr_reloc + 1, sizeof(*relos));
4162 if (!relos)
4163 return -ENOMEM;
4164 prog->reloc_desc = relos;
4165
4166
4167 insn_idx -= prog->sec_insn_off;
4168 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4169 insn_idx, sym_name, sym, rel);
4170 if (err)
4171 return err;
4172
4173 prog->nr_reloc++;
4174 }
4175 return 0;
4176}
4177
4178static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
4179{
4180 struct bpf_map_def *def = &map->def;
4181 __u32 key_type_id = 0, value_type_id = 0;
4182 int ret;
4183
4184
4185
4186
4187
4188 if (map->sec_idx == obj->efile.btf_maps_shndx ||
4189 bpf_map__is_struct_ops(map))
4190 return 0;
4191
4192 if (!bpf_map__is_internal(map)) {
4193 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
4194 def->value_size, &key_type_id,
4195 &value_type_id);
4196 } else {
4197
4198
4199
4200
4201 ret = btf__find_by_name(obj->btf, map->real_name);
4202 }
4203 if (ret < 0)
4204 return ret;
4205
4206 map->btf_key_type_id = key_type_id;
4207 map->btf_value_type_id = bpf_map__is_internal(map) ?
4208 ret : value_type_id;
4209 return 0;
4210}
4211
4212static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4213{
4214 char file[PATH_MAX], buff[4096];
4215 FILE *fp;
4216 __u32 val;
4217 int err;
4218
4219 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4220 memset(info, 0, sizeof(*info));
4221
4222 fp = fopen(file, "r");
4223 if (!fp) {
4224 err = -errno;
4225 pr_warn("failed to open %s: %d. No procfs support?\n", file,
4226 err);
4227 return err;
4228 }
4229
4230 while (fgets(buff, sizeof(buff), fp)) {
4231 if (sscanf(buff, "map_type:\t%u", &val) == 1)
4232 info->type = val;
4233 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4234 info->key_size = val;
4235 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4236 info->value_size = val;
4237 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4238 info->max_entries = val;
4239 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4240 info->map_flags = val;
4241 }
4242
4243 fclose(fp);
4244
4245 return 0;
4246}
4247
4248int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4249{
4250 struct bpf_map_info info = {};
4251 __u32 len = sizeof(info);
4252 int new_fd, err;
4253 char *new_name;
4254
4255 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4256 if (err && errno == EINVAL)
4257 err = bpf_get_map_info_from_fdinfo(fd, &info);
4258 if (err)
4259 return libbpf_err(err);
4260
4261 new_name = strdup(info.name);
4262 if (!new_name)
4263 return libbpf_err(-errno);
4264
4265 new_fd = open("/", O_RDONLY | O_CLOEXEC);
4266 if (new_fd < 0) {
4267 err = -errno;
4268 goto err_free_new_name;
4269 }
4270
4271 new_fd = dup3(fd, new_fd, O_CLOEXEC);
4272 if (new_fd < 0) {
4273 err = -errno;
4274 goto err_close_new_fd;
4275 }
4276
4277 err = zclose(map->fd);
4278 if (err) {
4279 err = -errno;
4280 goto err_close_new_fd;
4281 }
4282 free(map->name);
4283
4284 map->fd = new_fd;
4285 map->name = new_name;
4286 map->def.type = info.type;
4287 map->def.key_size = info.key_size;
4288 map->def.value_size = info.value_size;
4289 map->def.max_entries = info.max_entries;
4290 map->def.map_flags = info.map_flags;
4291 map->btf_key_type_id = info.btf_key_type_id;
4292 map->btf_value_type_id = info.btf_value_type_id;
4293 map->reused = true;
4294 map->map_extra = info.map_extra;
4295
4296 return 0;
4297
4298err_close_new_fd:
4299 close(new_fd);
4300err_free_new_name:
4301 free(new_name);
4302 return libbpf_err(err);
4303}
4304
4305__u32 bpf_map__max_entries(const struct bpf_map *map)
4306{
4307 return map->def.max_entries;
4308}
4309
4310struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4311{
4312 if (!bpf_map_type__is_map_in_map(map->def.type))
4313 return errno = EINVAL, NULL;
4314
4315 return map->inner_map;
4316}
4317
4318int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4319{
4320 if (map->fd >= 0)
4321 return libbpf_err(-EBUSY);
4322 map->def.max_entries = max_entries;
4323 return 0;
4324}
4325
4326int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
4327{
4328 if (!map || !max_entries)
4329 return libbpf_err(-EINVAL);
4330
4331 return bpf_map__set_max_entries(map, max_entries);
4332}
4333
4334static int
4335bpf_object__probe_loading(struct bpf_object *obj)
4336{
4337 char *cp, errmsg[STRERR_BUFSIZE];
4338 struct bpf_insn insns[] = {
4339 BPF_MOV64_IMM(BPF_REG_0, 0),
4340 BPF_EXIT_INSN(),
4341 };
4342 int ret, insn_cnt = ARRAY_SIZE(insns);
4343
4344 if (obj->gen_loader)
4345 return 0;
4346
4347 ret = bump_rlimit_memlock();
4348 if (ret)
4349 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4350
4351
4352 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4353 if (ret < 0)
4354 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
4355 if (ret < 0) {
4356 ret = errno;
4357 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4358 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4359 "program. Make sure your kernel supports BPF "
4360 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4361 "set to big enough value.\n", __func__, cp, ret);
4362 return -ret;
4363 }
4364 close(ret);
4365
4366 return 0;
4367}
4368
4369static int probe_fd(int fd)
4370{
4371 if (fd >= 0)
4372 close(fd);
4373 return fd >= 0;
4374}
4375
4376static int probe_kern_prog_name(void)
4377{
4378 struct bpf_insn insns[] = {
4379 BPF_MOV64_IMM(BPF_REG_0, 0),
4380 BPF_EXIT_INSN(),
4381 };
4382 int ret, insn_cnt = ARRAY_SIZE(insns);
4383
4384
4385 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "test", "GPL", insns, insn_cnt, NULL);
4386 return probe_fd(ret);
4387}
4388
4389static int probe_kern_global_data(void)
4390{
4391 char *cp, errmsg[STRERR_BUFSIZE];
4392 struct bpf_insn insns[] = {
4393 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
4394 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
4395 BPF_MOV64_IMM(BPF_REG_0, 0),
4396 BPF_EXIT_INSN(),
4397 };
4398 int ret, map, insn_cnt = ARRAY_SIZE(insns);
4399
4400 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
4401 if (map < 0) {
4402 ret = -errno;
4403 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4404 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4405 __func__, cp, -ret);
4406 return ret;
4407 }
4408
4409 insns[0].imm = map;
4410
4411 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4412 close(map);
4413 return probe_fd(ret);
4414}
4415
4416static int probe_kern_btf(void)
4417{
4418 static const char strs[] = "\0int";
4419 __u32 types[] = {
4420
4421 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4422 };
4423
4424 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4425 strs, sizeof(strs)));
4426}
4427
4428static int probe_kern_btf_func(void)
4429{
4430 static const char strs[] = "\0int\0x\0a";
4431
4432 __u32 types[] = {
4433
4434 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4435
4436 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4437 BTF_PARAM_ENC(7, 1),
4438
4439 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
4440 };
4441
4442 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4443 strs, sizeof(strs)));
4444}
4445
4446static int probe_kern_btf_func_global(void)
4447{
4448 static const char strs[] = "\0int\0x\0a";
4449
4450 __u32 types[] = {
4451
4452 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4453
4454 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4455 BTF_PARAM_ENC(7, 1),
4456
4457 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
4458 };
4459
4460 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4461 strs, sizeof(strs)));
4462}
4463
4464static int probe_kern_btf_datasec(void)
4465{
4466 static const char strs[] = "\0x\0.data";
4467
4468 __u32 types[] = {
4469
4470 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
4471
4472 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4473 BTF_VAR_STATIC,
4474
4475 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
4476 BTF_VAR_SECINFO_ENC(2, 0, 4),
4477 };
4478
4479 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4480 strs, sizeof(strs)));
4481}
4482
4483static int probe_kern_btf_float(void)
4484{
4485 static const char strs[] = "\0float";
4486 __u32 types[] = {
4487
4488 BTF_TYPE_FLOAT_ENC(1, 4),
4489 };
4490
4491 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4492 strs, sizeof(strs)));
4493}
4494
4495static int probe_kern_btf_decl_tag(void)
4496{
4497 static const char strs[] = "\0tag";
4498 __u32 types[] = {
4499
4500 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
4501
4502 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4503 BTF_VAR_STATIC,
4504
4505 BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
4506 };
4507
4508 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4509 strs, sizeof(strs)));
4510}
4511
4512static int probe_kern_btf_type_tag(void)
4513{
4514 static const char strs[] = "\0tag";
4515 __u32 types[] = {
4516
4517 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
4518
4519 BTF_TYPE_TYPE_TAG_ENC(1, 1),
4520
4521 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
4522 };
4523
4524 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4525 strs, sizeof(strs)));
4526}
4527
4528static int probe_kern_array_mmap(void)
4529{
4530 LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
4531 int fd;
4532
4533 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts);
4534 return probe_fd(fd);
4535}
4536
4537static int probe_kern_exp_attach_type(void)
4538{
4539 LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
4540 struct bpf_insn insns[] = {
4541 BPF_MOV64_IMM(BPF_REG_0, 0),
4542 BPF_EXIT_INSN(),
4543 };
4544 int fd, insn_cnt = ARRAY_SIZE(insns);
4545
4546
4547
4548
4549
4550
4551 fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
4552 return probe_fd(fd);
4553}
4554
4555static int probe_kern_probe_read_kernel(void)
4556{
4557 struct bpf_insn insns[] = {
4558 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
4560 BPF_MOV64_IMM(BPF_REG_2, 8),
4561 BPF_MOV64_IMM(BPF_REG_3, 0),
4562 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
4563 BPF_EXIT_INSN(),
4564 };
4565 int fd, insn_cnt = ARRAY_SIZE(insns);
4566
4567 fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
4568 return probe_fd(fd);
4569}
4570
4571static int probe_prog_bind_map(void)
4572{
4573 char *cp, errmsg[STRERR_BUFSIZE];
4574 struct bpf_insn insns[] = {
4575 BPF_MOV64_IMM(BPF_REG_0, 0),
4576 BPF_EXIT_INSN(),
4577 };
4578 int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
4579
4580 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
4581 if (map < 0) {
4582 ret = -errno;
4583 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4584 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4585 __func__, cp, -ret);
4586 return ret;
4587 }
4588
4589 prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4590 if (prog < 0) {
4591 close(map);
4592 return 0;
4593 }
4594
4595 ret = bpf_prog_bind_map(prog, map, NULL);
4596
4597 close(map);
4598 close(prog);
4599
4600 return ret >= 0;
4601}
4602
4603static int probe_module_btf(void)
4604{
4605 static const char strs[] = "\0int";
4606 __u32 types[] = {
4607
4608 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4609 };
4610 struct bpf_btf_info info;
4611 __u32 len = sizeof(info);
4612 char name[16];
4613 int fd, err;
4614
4615 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4616 if (fd < 0)
4617 return 0;
4618
4619 memset(&info, 0, sizeof(info));
4620 info.name = ptr_to_u64(name);
4621 info.name_len = sizeof(name);
4622
4623
4624
4625
4626
4627 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4628 close(fd);
4629 return !err;
4630}
4631
4632static int probe_perf_link(void)
4633{
4634 struct bpf_insn insns[] = {
4635 BPF_MOV64_IMM(BPF_REG_0, 0),
4636 BPF_EXIT_INSN(),
4637 };
4638 int prog_fd, link_fd, err;
4639
4640 prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
4641 insns, ARRAY_SIZE(insns), NULL);
4642 if (prog_fd < 0)
4643 return -errno;
4644
4645
4646
4647
4648 link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
4649 err = -errno;
4650
4651 if (link_fd >= 0)
4652 close(link_fd);
4653 close(prog_fd);
4654
4655 return link_fd < 0 && err == -EBADF;
4656}
4657
4658enum kern_feature_result {
4659 FEAT_UNKNOWN = 0,
4660 FEAT_SUPPORTED = 1,
4661 FEAT_MISSING = 2,
4662};
4663
4664typedef int (*feature_probe_fn)(void);
4665
4666static struct kern_feature_desc {
4667 const char *desc;
4668 feature_probe_fn probe;
4669 enum kern_feature_result res;
4670} feature_probes[__FEAT_CNT] = {
4671 [FEAT_PROG_NAME] = {
4672 "BPF program name", probe_kern_prog_name,
4673 },
4674 [FEAT_GLOBAL_DATA] = {
4675 "global variables", probe_kern_global_data,
4676 },
4677 [FEAT_BTF] = {
4678 "minimal BTF", probe_kern_btf,
4679 },
4680 [FEAT_BTF_FUNC] = {
4681 "BTF functions", probe_kern_btf_func,
4682 },
4683 [FEAT_BTF_GLOBAL_FUNC] = {
4684 "BTF global function", probe_kern_btf_func_global,
4685 },
4686 [FEAT_BTF_DATASEC] = {
4687 "BTF data section and variable", probe_kern_btf_datasec,
4688 },
4689 [FEAT_ARRAY_MMAP] = {
4690 "ARRAY map mmap()", probe_kern_array_mmap,
4691 },
4692 [FEAT_EXP_ATTACH_TYPE] = {
4693 "BPF_PROG_LOAD expected_attach_type attribute",
4694 probe_kern_exp_attach_type,
4695 },
4696 [FEAT_PROBE_READ_KERN] = {
4697 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4698 },
4699 [FEAT_PROG_BIND_MAP] = {
4700 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4701 },
4702 [FEAT_MODULE_BTF] = {
4703 "module BTF support", probe_module_btf,
4704 },
4705 [FEAT_BTF_FLOAT] = {
4706 "BTF_KIND_FLOAT support", probe_kern_btf_float,
4707 },
4708 [FEAT_PERF_LINK] = {
4709 "BPF perf link support", probe_perf_link,
4710 },
4711 [FEAT_BTF_DECL_TAG] = {
4712 "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
4713 },
4714 [FEAT_BTF_TYPE_TAG] = {
4715 "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
4716 },
4717 [FEAT_MEMCG_ACCOUNT] = {
4718 "memcg-based memory accounting", probe_memcg_account,
4719 },
4720};
4721
4722bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4723{
4724 struct kern_feature_desc *feat = &feature_probes[feat_id];
4725 int ret;
4726
4727 if (obj && obj->gen_loader)
4728
4729
4730
4731 return true;
4732
4733 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4734 ret = feat->probe();
4735 if (ret > 0) {
4736 WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4737 } else if (ret == 0) {
4738 WRITE_ONCE(feat->res, FEAT_MISSING);
4739 } else {
4740 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4741 WRITE_ONCE(feat->res, FEAT_MISSING);
4742 }
4743 }
4744
4745 return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4746}
4747
4748static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4749{
4750 struct bpf_map_info map_info = {};
4751 char msg[STRERR_BUFSIZE];
4752 __u32 map_info_len;
4753 int err;
4754
4755 map_info_len = sizeof(map_info);
4756
4757 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
4758 if (err && errno == EINVAL)
4759 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4760 if (err) {
4761 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4762 libbpf_strerror_r(errno, msg, sizeof(msg)));
4763 return false;
4764 }
4765
4766 return (map_info.type == map->def.type &&
4767 map_info.key_size == map->def.key_size &&
4768 map_info.value_size == map->def.value_size &&
4769 map_info.max_entries == map->def.max_entries &&
4770 map_info.map_flags == map->def.map_flags &&
4771 map_info.map_extra == map->map_extra);
4772}
4773
4774static int
4775bpf_object__reuse_map(struct bpf_map *map)
4776{
4777 char *cp, errmsg[STRERR_BUFSIZE];
4778 int err, pin_fd;
4779
4780 pin_fd = bpf_obj_get(map->pin_path);
4781 if (pin_fd < 0) {
4782 err = -errno;
4783 if (err == -ENOENT) {
4784 pr_debug("found no pinned map to reuse at '%s'\n",
4785 map->pin_path);
4786 return 0;
4787 }
4788
4789 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4790 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4791 map->pin_path, cp);
4792 return err;
4793 }
4794
4795 if (!map_is_reuse_compat(map, pin_fd)) {
4796 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4797 map->pin_path);
4798 close(pin_fd);
4799 return -EINVAL;
4800 }
4801
4802 err = bpf_map__reuse_fd(map, pin_fd);
4803 if (err) {
4804 close(pin_fd);
4805 return err;
4806 }
4807 map->pinned = true;
4808 pr_debug("reused pinned map at '%s'\n", map->pin_path);
4809
4810 return 0;
4811}
4812
4813static int
4814bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4815{
4816 enum libbpf_map_type map_type = map->libbpf_type;
4817 char *cp, errmsg[STRERR_BUFSIZE];
4818 int err, zero = 0;
4819
4820 if (obj->gen_loader) {
4821 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
4822 map->mmaped, map->def.value_size);
4823 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
4824 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
4825 return 0;
4826 }
4827 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4828 if (err) {
4829 err = -errno;
4830 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4831 pr_warn("Error setting initial map(%s) contents: %s\n",
4832 map->name, cp);
4833 return err;
4834 }
4835
4836
4837 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4838 err = bpf_map_freeze(map->fd);
4839 if (err) {
4840 err = -errno;
4841 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4842 pr_warn("Error freezing map(%s) as read-only: %s\n",
4843 map->name, cp);
4844 return err;
4845 }
4846 }
4847 return 0;
4848}
4849
4850static void bpf_map__destroy(struct bpf_map *map);
4851
4852static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
4853{
4854 LIBBPF_OPTS(bpf_map_create_opts, create_attr);
4855 struct bpf_map_def *def = &map->def;
4856 const char *map_name = NULL;
4857 __u32 max_entries;
4858 int err = 0;
4859
4860 if (kernel_supports(obj, FEAT_PROG_NAME))
4861 map_name = map->name;
4862 create_attr.map_ifindex = map->map_ifindex;
4863 create_attr.map_flags = def->map_flags;
4864 create_attr.numa_node = map->numa_node;
4865 create_attr.map_extra = map->map_extra;
4866
4867 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4868 int nr_cpus;
4869
4870 nr_cpus = libbpf_num_possible_cpus();
4871 if (nr_cpus < 0) {
4872 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4873 map->name, nr_cpus);
4874 return nr_cpus;
4875 }
4876 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4877 max_entries = nr_cpus;
4878 } else {
4879 max_entries = def->max_entries;
4880 }
4881
4882 if (bpf_map__is_struct_ops(map))
4883 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4884
4885 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4886 create_attr.btf_fd = btf__fd(obj->btf);
4887 create_attr.btf_key_type_id = map->btf_key_type_id;
4888 create_attr.btf_value_type_id = map->btf_value_type_id;
4889 }
4890
4891 if (bpf_map_type__is_map_in_map(def->type)) {
4892 if (map->inner_map) {
4893 err = bpf_object__create_map(obj, map->inner_map, true);
4894 if (err) {
4895 pr_warn("map '%s': failed to create inner map: %d\n",
4896 map->name, err);
4897 return err;
4898 }
4899 map->inner_map_fd = bpf_map__fd(map->inner_map);
4900 }
4901 if (map->inner_map_fd >= 0)
4902 create_attr.inner_map_fd = map->inner_map_fd;
4903 }
4904
4905 switch (def->type) {
4906 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
4907 case BPF_MAP_TYPE_CGROUP_ARRAY:
4908 case BPF_MAP_TYPE_STACK_TRACE:
4909 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
4910 case BPF_MAP_TYPE_HASH_OF_MAPS:
4911 case BPF_MAP_TYPE_DEVMAP:
4912 case BPF_MAP_TYPE_DEVMAP_HASH:
4913 case BPF_MAP_TYPE_CPUMAP:
4914 case BPF_MAP_TYPE_XSKMAP:
4915 case BPF_MAP_TYPE_SOCKMAP:
4916 case BPF_MAP_TYPE_SOCKHASH:
4917 case BPF_MAP_TYPE_QUEUE:
4918 case BPF_MAP_TYPE_STACK:
4919 case BPF_MAP_TYPE_RINGBUF:
4920 create_attr.btf_fd = 0;
4921 create_attr.btf_key_type_id = 0;
4922 create_attr.btf_value_type_id = 0;
4923 map->btf_key_type_id = 0;
4924 map->btf_value_type_id = 0;
4925 default:
4926 break;
4927 }
4928
4929 if (obj->gen_loader) {
4930 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
4931 def->key_size, def->value_size, max_entries,
4932 &create_attr, is_inner ? -1 : map - obj->maps);
4933
4934
4935
4936 map->fd = 0;
4937 } else {
4938 map->fd = bpf_map_create(def->type, map_name,
4939 def->key_size, def->value_size,
4940 max_entries, &create_attr);
4941 }
4942 if (map->fd < 0 && (create_attr.btf_key_type_id ||
4943 create_attr.btf_value_type_id)) {
4944 char *cp, errmsg[STRERR_BUFSIZE];
4945
4946 err = -errno;
4947 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4948 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4949 map->name, cp, err);
4950 create_attr.btf_fd = 0;
4951 create_attr.btf_key_type_id = 0;
4952 create_attr.btf_value_type_id = 0;
4953 map->btf_key_type_id = 0;
4954 map->btf_value_type_id = 0;
4955 map->fd = bpf_map_create(def->type, map_name,
4956 def->key_size, def->value_size,
4957 max_entries, &create_attr);
4958 }
4959
4960 err = map->fd < 0 ? -errno : 0;
4961
4962 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4963 if (obj->gen_loader)
4964 map->inner_map->fd = -1;
4965 bpf_map__destroy(map->inner_map);
4966 zfree(&map->inner_map);
4967 }
4968
4969 return err;
4970}
4971
4972static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
4973{
4974 const struct bpf_map *targ_map;
4975 unsigned int i;
4976 int fd, err = 0;
4977
4978 for (i = 0; i < map->init_slots_sz; i++) {
4979 if (!map->init_slots[i])
4980 continue;
4981
4982 targ_map = map->init_slots[i];
4983 fd = bpf_map__fd(targ_map);
4984
4985 if (obj->gen_loader) {
4986 bpf_gen__populate_outer_map(obj->gen_loader,
4987 map - obj->maps, i,
4988 targ_map - obj->maps);
4989 } else {
4990 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4991 }
4992 if (err) {
4993 err = -errno;
4994 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4995 map->name, i, targ_map->name, fd, err);
4996 return err;
4997 }
4998 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4999 map->name, i, targ_map->name, fd);
5000 }
5001
5002 zfree(&map->init_slots);
5003 map->init_slots_sz = 0;
5004
5005 return 0;
5006}
5007
5008static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5009{
5010 const struct bpf_program *targ_prog;
5011 unsigned int i;
5012 int fd, err;
5013
5014 if (obj->gen_loader)
5015 return -ENOTSUP;
5016
5017 for (i = 0; i < map->init_slots_sz; i++) {
5018 if (!map->init_slots[i])
5019 continue;
5020
5021 targ_prog = map->init_slots[i];
5022 fd = bpf_program__fd(targ_prog);
5023
5024 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5025 if (err) {
5026 err = -errno;
5027 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5028 map->name, i, targ_prog->name, fd, err);
5029 return err;
5030 }
5031 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5032 map->name, i, targ_prog->name, fd);
5033 }
5034
5035 zfree(&map->init_slots);
5036 map->init_slots_sz = 0;
5037
5038 return 0;
5039}
5040
5041static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5042{
5043 struct bpf_map *map;
5044 int i, err;
5045
5046 for (i = 0; i < obj->nr_maps; i++) {
5047 map = &obj->maps[i];
5048
5049 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5050 continue;
5051
5052 err = init_prog_array_slots(obj, map);
5053 if (err < 0) {
5054 zclose(map->fd);
5055 return err;
5056 }
5057 }
5058 return 0;
5059}
5060
5061static int
5062bpf_object__create_maps(struct bpf_object *obj)
5063{
5064 struct bpf_map *map;
5065 char *cp, errmsg[STRERR_BUFSIZE];
5066 unsigned int i, j;
5067 int err;
5068 bool retried;
5069
5070 for (i = 0; i < obj->nr_maps; i++) {
5071 map = &obj->maps[i];
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087 if (bpf_map__is_internal(map) &&
5088 !kernel_supports(obj, FEAT_GLOBAL_DATA)) {
5089 map->skipped = true;
5090 continue;
5091 }
5092
5093 retried = false;
5094retry:
5095 if (map->pin_path) {
5096 err = bpf_object__reuse_map(map);
5097 if (err) {
5098 pr_warn("map '%s': error reusing pinned map\n",
5099 map->name);
5100 goto err_out;
5101 }
5102 if (retried && map->fd < 0) {
5103 pr_warn("map '%s': cannot find pinned map\n",
5104 map->name);
5105 err = -ENOENT;
5106 goto err_out;
5107 }
5108 }
5109
5110 if (map->fd >= 0) {
5111 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5112 map->name, map->fd);
5113 } else {
5114 err = bpf_object__create_map(obj, map, false);
5115 if (err)
5116 goto err_out;
5117
5118 pr_debug("map '%s': created successfully, fd=%d\n",
5119 map->name, map->fd);
5120
5121 if (bpf_map__is_internal(map)) {
5122 err = bpf_object__populate_internal_map(obj, map);
5123 if (err < 0) {
5124 zclose(map->fd);
5125 goto err_out;
5126 }
5127 }
5128
5129 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5130 err = init_map_in_map_slots(obj, map);
5131 if (err < 0) {
5132 zclose(map->fd);
5133 goto err_out;
5134 }
5135 }
5136 }
5137
5138 if (map->pin_path && !map->pinned) {
5139 err = bpf_map__pin(map, NULL);
5140 if (err) {
5141 zclose(map->fd);
5142 if (!retried && err == -EEXIST) {
5143 retried = true;
5144 goto retry;
5145 }
5146 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5147 map->name, map->pin_path, err);
5148 goto err_out;
5149 }
5150 }
5151 }
5152
5153 return 0;
5154
5155err_out:
5156 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5157 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5158 pr_perm_msg(err);
5159 for (j = 0; j < i; j++)
5160 zclose(obj->maps[j].fd);
5161 return err;
5162}
5163
5164static bool bpf_core_is_flavor_sep(const char *s)
5165{
5166
5167 return s[0] != '_' &&
5168 s[1] == '_' && s[2] == '_' && s[3] == '_' &&
5169 s[4] != '_';
5170}
5171
5172
5173
5174
5175
5176size_t bpf_core_essential_name_len(const char *name)
5177{
5178 size_t n = strlen(name);
5179 int i;
5180
5181 for (i = n - 5; i >= 0; i--) {
5182 if (bpf_core_is_flavor_sep(name + i))
5183 return i + 1;
5184 }
5185 return n;
5186}
5187
5188static void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5189{
5190 free(cands->cands);
5191 free(cands);
5192}
5193
5194static int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5195 size_t local_essent_len,
5196 const struct btf *targ_btf,
5197 const char *targ_btf_name,
5198 int targ_start_id,
5199 struct bpf_core_cand_list *cands)
5200{
5201 struct bpf_core_cand *new_cands, *cand;
5202 const struct btf_type *t, *local_t;
5203 const char *targ_name, *local_name;
5204 size_t targ_essent_len;
5205 int n, i;
5206
5207 local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5208 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5209
5210 n = btf__type_cnt(targ_btf);
5211 for (i = targ_start_id; i < n; i++) {
5212 t = btf__type_by_id(targ_btf, i);
5213 if (btf_kind(t) != btf_kind(local_t))
5214 continue;
5215
5216 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5217 if (str_is_empty(targ_name))
5218 continue;
5219
5220 targ_essent_len = bpf_core_essential_name_len(targ_name);
5221 if (targ_essent_len != local_essent_len)
5222 continue;
5223
5224 if (strncmp(local_name, targ_name, local_essent_len) != 0)
5225 continue;
5226
5227 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5228 local_cand->id, btf_kind_str(local_t),
5229 local_name, i, btf_kind_str(t), targ_name,
5230 targ_btf_name);
5231 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5232 sizeof(*cands->cands));
5233 if (!new_cands)
5234 return -ENOMEM;
5235
5236 cand = &new_cands[cands->len];
5237 cand->btf = targ_btf;
5238 cand->id = i;
5239
5240 cands->cands = new_cands;
5241 cands->len++;
5242 }
5243 return 0;
5244}
5245
5246static int load_module_btfs(struct bpf_object *obj)
5247{
5248 struct bpf_btf_info info;
5249 struct module_btf *mod_btf;
5250 struct btf *btf;
5251 char name[64];
5252 __u32 id = 0, len;
5253 int err, fd;
5254
5255 if (obj->btf_modules_loaded)
5256 return 0;
5257
5258 if (obj->gen_loader)
5259 return 0;
5260
5261
5262 obj->btf_modules_loaded = true;
5263
5264
5265 if (!kernel_supports(obj, FEAT_MODULE_BTF))
5266 return 0;
5267
5268 while (true) {
5269 err = bpf_btf_get_next_id(id, &id);
5270 if (err && errno == ENOENT)
5271 return 0;
5272 if (err) {
5273 err = -errno;
5274 pr_warn("failed to iterate BTF objects: %d\n", err);
5275 return err;
5276 }
5277
5278 fd = bpf_btf_get_fd_by_id(id);
5279 if (fd < 0) {
5280 if (errno == ENOENT)
5281 continue;
5282 err = -errno;
5283 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5284 return err;
5285 }
5286
5287 len = sizeof(info);
5288 memset(&info, 0, sizeof(info));
5289 info.name = ptr_to_u64(name);
5290 info.name_len = sizeof(name);
5291
5292 err = bpf_obj_get_info_by_fd(fd, &info, &len);
5293 if (err) {
5294 err = -errno;
5295 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5296 goto err_out;
5297 }
5298
5299
5300 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5301 close(fd);
5302 continue;
5303 }
5304
5305 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5306 err = libbpf_get_error(btf);
5307 if (err) {
5308 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5309 name, id, err);
5310 goto err_out;
5311 }
5312
5313 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5314 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5315 if (err)
5316 goto err_out;
5317
5318 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5319
5320 mod_btf->btf = btf;
5321 mod_btf->id = id;
5322 mod_btf->fd = fd;
5323 mod_btf->name = strdup(name);
5324 if (!mod_btf->name) {
5325 err = -ENOMEM;
5326 goto err_out;
5327 }
5328 continue;
5329
5330err_out:
5331 close(fd);
5332 return err;
5333 }
5334
5335 return 0;
5336}
5337
5338static struct bpf_core_cand_list *
5339bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5340{
5341 struct bpf_core_cand local_cand = {};
5342 struct bpf_core_cand_list *cands;
5343 const struct btf *main_btf;
5344 const struct btf_type *local_t;
5345 const char *local_name;
5346 size_t local_essent_len;
5347 int err, i;
5348
5349 local_cand.btf = local_btf;
5350 local_cand.id = local_type_id;
5351 local_t = btf__type_by_id(local_btf, local_type_id);
5352 if (!local_t)
5353 return ERR_PTR(-EINVAL);
5354
5355 local_name = btf__name_by_offset(local_btf, local_t->name_off);
5356 if (str_is_empty(local_name))
5357 return ERR_PTR(-EINVAL);
5358 local_essent_len = bpf_core_essential_name_len(local_name);
5359
5360 cands = calloc(1, sizeof(*cands));
5361 if (!cands)
5362 return ERR_PTR(-ENOMEM);
5363
5364
5365 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5366 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5367 if (err)
5368 goto err_out;
5369
5370
5371 if (cands->len)
5372 return cands;
5373
5374
5375 if (obj->btf_vmlinux_override)
5376 return cands;
5377
5378
5379 err = load_module_btfs(obj);
5380 if (err)
5381 goto err_out;
5382
5383 for (i = 0; i < obj->btf_module_cnt; i++) {
5384 err = bpf_core_add_cands(&local_cand, local_essent_len,
5385 obj->btf_modules[i].btf,
5386 obj->btf_modules[i].name,
5387 btf__type_cnt(obj->btf_vmlinux),
5388 cands);
5389 if (err)
5390 goto err_out;
5391 }
5392
5393 return cands;
5394err_out:
5395 bpf_core_free_cands(cands);
5396 return ERR_PTR(err);
5397}
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5419 const struct btf *targ_btf, __u32 targ_id)
5420{
5421 const struct btf_type *local_type, *targ_type;
5422 int depth = 32;
5423
5424
5425 local_type = btf__type_by_id(local_btf, local_id);
5426 targ_type = btf__type_by_id(targ_btf, targ_id);
5427 if (btf_kind(local_type) != btf_kind(targ_type))
5428 return 0;
5429
5430recur:
5431 depth--;
5432 if (depth < 0)
5433 return -EINVAL;
5434
5435 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5436 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5437 if (!local_type || !targ_type)
5438 return -EINVAL;
5439
5440 if (btf_kind(local_type) != btf_kind(targ_type))
5441 return 0;
5442
5443 switch (btf_kind(local_type)) {
5444 case BTF_KIND_UNKN:
5445 case BTF_KIND_STRUCT:
5446 case BTF_KIND_UNION:
5447 case BTF_KIND_ENUM:
5448 case BTF_KIND_FWD:
5449 return 1;
5450 case BTF_KIND_INT:
5451
5452
5453
5454 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5455 case BTF_KIND_PTR:
5456 local_id = local_type->type;
5457 targ_id = targ_type->type;
5458 goto recur;
5459 case BTF_KIND_ARRAY:
5460 local_id = btf_array(local_type)->type;
5461 targ_id = btf_array(targ_type)->type;
5462 goto recur;
5463 case BTF_KIND_FUNC_PROTO: {
5464 struct btf_param *local_p = btf_params(local_type);
5465 struct btf_param *targ_p = btf_params(targ_type);
5466 __u16 local_vlen = btf_vlen(local_type);
5467 __u16 targ_vlen = btf_vlen(targ_type);
5468 int i, err;
5469
5470 if (local_vlen != targ_vlen)
5471 return 0;
5472
5473 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5474 skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5475 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5476 err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5477 if (err <= 0)
5478 return err;
5479 }
5480
5481
5482 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5483 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5484 goto recur;
5485 }
5486 default:
5487 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5488 btf_kind_str(local_type), local_id, targ_id);
5489 return 0;
5490 }
5491}
5492
5493static size_t bpf_core_hash_fn(const void *key, void *ctx)
5494{
5495 return (size_t)key;
5496}
5497
5498static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5499{
5500 return k1 == k2;
5501}
5502
5503static void *u32_as_hash_key(__u32 x)
5504{
5505 return (void *)(uintptr_t)x;
5506}
5507
5508static int record_relo_core(struct bpf_program *prog,
5509 const struct bpf_core_relo *core_relo, int insn_idx)
5510{
5511 struct reloc_desc *relos, *relo;
5512
5513 relos = libbpf_reallocarray(prog->reloc_desc,
5514 prog->nr_reloc + 1, sizeof(*relos));
5515 if (!relos)
5516 return -ENOMEM;
5517 relo = &relos[prog->nr_reloc];
5518 relo->type = RELO_CORE;
5519 relo->insn_idx = insn_idx;
5520 relo->core_relo = core_relo;
5521 prog->reloc_desc = relos;
5522 prog->nr_reloc++;
5523 return 0;
5524}
5525
5526static int bpf_core_apply_relo(struct bpf_program *prog,
5527 const struct bpf_core_relo *relo,
5528 int relo_idx,
5529 const struct btf *local_btf,
5530 struct hashmap *cand_cache)
5531{
5532 struct bpf_core_spec specs_scratch[3] = {};
5533 const void *type_key = u32_as_hash_key(relo->type_id);
5534 struct bpf_core_cand_list *cands = NULL;
5535 const char *prog_name = prog->name;
5536 const struct btf_type *local_type;
5537 const char *local_name;
5538 __u32 local_id = relo->type_id;
5539 struct bpf_insn *insn;
5540 int insn_idx, err;
5541
5542 if (relo->insn_off % BPF_INSN_SZ)
5543 return -EINVAL;
5544 insn_idx = relo->insn_off / BPF_INSN_SZ;
5545
5546
5547
5548
5549 insn_idx = insn_idx - prog->sec_insn_off;
5550 if (insn_idx >= prog->insns_cnt)
5551 return -EINVAL;
5552 insn = &prog->insns[insn_idx];
5553
5554 local_type = btf__type_by_id(local_btf, local_id);
5555 if (!local_type)
5556 return -EINVAL;
5557
5558 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5559 if (!local_name)
5560 return -EINVAL;
5561
5562 if (prog->obj->gen_loader) {
5563 const char *spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
5564
5565 pr_debug("record_relo_core: prog %td insn[%d] %s %s %s final insn_idx %d\n",
5566 prog - prog->obj->programs, relo->insn_off / 8,
5567 btf_kind_str(local_type), local_name, spec_str, insn_idx);
5568 return record_relo_core(prog, relo, insn_idx);
5569 }
5570
5571 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5572 !hashmap__find(cand_cache, type_key, (void **)&cands)) {
5573 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5574 if (IS_ERR(cands)) {
5575 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5576 prog_name, relo_idx, local_id, btf_kind_str(local_type),
5577 local_name, PTR_ERR(cands));
5578 return PTR_ERR(cands);
5579 }
5580 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
5581 if (err) {
5582 bpf_core_free_cands(cands);
5583 return err;
5584 }
5585 }
5586
5587 return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo,
5588 relo_idx, local_btf, cands, specs_scratch);
5589}
5590
5591static int
5592bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5593{
5594 const struct btf_ext_info_sec *sec;
5595 const struct bpf_core_relo *rec;
5596 const struct btf_ext_info *seg;
5597 struct hashmap_entry *entry;
5598 struct hashmap *cand_cache = NULL;
5599 struct bpf_program *prog;
5600 const char *sec_name;
5601 int i, err = 0, insn_idx, sec_idx;
5602
5603 if (obj->btf_ext->core_relo_info.len == 0)
5604 return 0;
5605
5606 if (targ_btf_path) {
5607 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5608 err = libbpf_get_error(obj->btf_vmlinux_override);
5609 if (err) {
5610 pr_warn("failed to parse target BTF: %d\n", err);
5611 return err;
5612 }
5613 }
5614
5615 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5616 if (IS_ERR(cand_cache)) {
5617 err = PTR_ERR(cand_cache);
5618 goto out;
5619 }
5620
5621 seg = &obj->btf_ext->core_relo_info;
5622 for_each_btf_ext_sec(seg, sec) {
5623 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5624 if (str_is_empty(sec_name)) {
5625 err = -EINVAL;
5626 goto out;
5627 }
5628
5629
5630
5631
5632
5633
5634 prog = NULL;
5635 for (i = 0; i < obj->nr_programs; i++) {
5636 prog = &obj->programs[i];
5637 if (strcmp(prog->sec_name, sec_name) == 0)
5638 break;
5639 }
5640 if (!prog) {
5641 pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
5642 return -ENOENT;
5643 }
5644 sec_idx = prog->sec_idx;
5645
5646 pr_debug("sec '%s': found %d CO-RE relocations\n",
5647 sec_name, sec->num_info);
5648
5649 for_each_btf_ext_rec(seg, sec, i, rec) {
5650 insn_idx = rec->insn_off / BPF_INSN_SZ;
5651 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5652 if (!prog) {
5653 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
5654 sec_name, insn_idx, i);
5655 err = -EINVAL;
5656 goto out;
5657 }
5658
5659
5660
5661 if (!prog->load)
5662 continue;
5663
5664 err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
5665 if (err) {
5666 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5667 prog->name, i, err);
5668 goto out;
5669 }
5670 }
5671 }
5672
5673out:
5674
5675 btf__free(obj->btf_vmlinux_override);
5676 obj->btf_vmlinux_override = NULL;
5677
5678 if (!IS_ERR_OR_NULL(cand_cache)) {
5679 hashmap__for_each_entry(cand_cache, entry, i) {
5680 bpf_core_free_cands(entry->value);
5681 }
5682 hashmap__free(cand_cache);
5683 }
5684 return err;
5685}
5686
5687
5688
5689
5690
5691
5692static int
5693bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5694{
5695 int i;
5696
5697 for (i = 0; i < prog->nr_reloc; i++) {
5698 struct reloc_desc *relo = &prog->reloc_desc[i];
5699 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5700 struct extern_desc *ext;
5701
5702 switch (relo->type) {
5703 case RELO_LD64:
5704 if (obj->gen_loader) {
5705 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5706 insn[0].imm = relo->map_idx;
5707 } else {
5708 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5709 insn[0].imm = obj->maps[relo->map_idx].fd;
5710 }
5711 break;
5712 case RELO_DATA:
5713 insn[1].imm = insn[0].imm + relo->sym_off;
5714 if (obj->gen_loader) {
5715 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5716 insn[0].imm = relo->map_idx;
5717 } else {
5718 const struct bpf_map *map = &obj->maps[relo->map_idx];
5719
5720 if (map->skipped) {
5721 pr_warn("prog '%s': relo #%d: kernel doesn't support global data\n",
5722 prog->name, i);
5723 return -ENOTSUP;
5724 }
5725 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5726 insn[0].imm = obj->maps[relo->map_idx].fd;
5727 }
5728 break;
5729 case RELO_EXTERN_VAR:
5730 ext = &obj->externs[relo->sym_off];
5731 if (ext->type == EXT_KCFG) {
5732 if (obj->gen_loader) {
5733 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5734 insn[0].imm = obj->kconfig_map_idx;
5735 } else {
5736 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5737 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5738 }
5739 insn[1].imm = ext->kcfg.data_off;
5740 } else {
5741 if (ext->ksym.type_id && ext->is_set) {
5742 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5743 insn[0].imm = ext->ksym.kernel_btf_id;
5744 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
5745 } else {
5746 insn[0].imm = (__u32)ext->ksym.addr;
5747 insn[1].imm = ext->ksym.addr >> 32;
5748 }
5749 }
5750 break;
5751 case RELO_EXTERN_FUNC:
5752 ext = &obj->externs[relo->sym_off];
5753 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
5754 if (ext->is_set) {
5755 insn[0].imm = ext->ksym.kernel_btf_id;
5756 insn[0].off = ext->ksym.btf_fd_idx;
5757 } else {
5758 insn[0].imm = 0;
5759 insn[0].off = 0;
5760 }
5761 break;
5762 case RELO_SUBPROG_ADDR:
5763 if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
5764 pr_warn("prog '%s': relo #%d: bad insn\n",
5765 prog->name, i);
5766 return -EINVAL;
5767 }
5768
5769 break;
5770 case RELO_CALL:
5771
5772 break;
5773 case RELO_CORE:
5774
5775 break;
5776 default:
5777 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
5778 prog->name, i, relo->type);
5779 return -EINVAL;
5780 }
5781 }
5782
5783 return 0;
5784}
5785
5786static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
5787 const struct bpf_program *prog,
5788 const struct btf_ext_info *ext_info,
5789 void **prog_info, __u32 *prog_rec_cnt,
5790 __u32 *prog_rec_sz)
5791{
5792 void *copy_start = NULL, *copy_end = NULL;
5793 void *rec, *rec_end, *new_prog_info;
5794 const struct btf_ext_info_sec *sec;
5795 size_t old_sz, new_sz;
5796 const char *sec_name;
5797 int i, off_adj;
5798
5799 for_each_btf_ext_sec(ext_info, sec) {
5800 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5801 if (!sec_name)
5802 return -EINVAL;
5803 if (strcmp(sec_name, prog->sec_name) != 0)
5804 continue;
5805
5806 for_each_btf_ext_rec(ext_info, sec, i, rec) {
5807 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
5808
5809 if (insn_off < prog->sec_insn_off)
5810 continue;
5811 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
5812 break;
5813
5814 if (!copy_start)
5815 copy_start = rec;
5816 copy_end = rec + ext_info->rec_size;
5817 }
5818
5819 if (!copy_start)
5820 return -ENOENT;
5821
5822
5823
5824
5825 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
5826 new_sz = old_sz + (copy_end - copy_start);
5827 new_prog_info = realloc(*prog_info, new_sz);
5828 if (!new_prog_info)
5829 return -ENOMEM;
5830 *prog_info = new_prog_info;
5831 *prog_rec_cnt = new_sz / ext_info->rec_size;
5832 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
5833
5834
5835
5836
5837
5838
5839
5840 off_adj = prog->sub_insn_off - prog->sec_insn_off;
5841 rec = new_prog_info + old_sz;
5842 rec_end = new_prog_info + new_sz;
5843 for (; rec < rec_end; rec += ext_info->rec_size) {
5844 __u32 *insn_off = rec;
5845
5846 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
5847 }
5848 *prog_rec_sz = ext_info->rec_size;
5849 return 0;
5850 }
5851
5852 return -ENOENT;
5853}
5854
5855static int
5856reloc_prog_func_and_line_info(const struct bpf_object *obj,
5857 struct bpf_program *main_prog,
5858 const struct bpf_program *prog)
5859{
5860 int err;
5861
5862
5863
5864
5865 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
5866 return 0;
5867
5868
5869
5870
5871 if (main_prog != prog && !main_prog->func_info)
5872 goto line_info;
5873
5874 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
5875 &main_prog->func_info,
5876 &main_prog->func_info_cnt,
5877 &main_prog->func_info_rec_size);
5878 if (err) {
5879 if (err != -ENOENT) {
5880 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
5881 prog->name, err);
5882 return err;
5883 }
5884 if (main_prog->func_info) {
5885
5886
5887
5888
5889 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
5890 return err;
5891 }
5892
5893 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
5894 prog->name);
5895 }
5896
5897line_info:
5898
5899 if (main_prog != prog && !main_prog->line_info)
5900 return 0;
5901
5902 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
5903 &main_prog->line_info,
5904 &main_prog->line_info_cnt,
5905 &main_prog->line_info_rec_size);
5906 if (err) {
5907 if (err != -ENOENT) {
5908 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
5909 prog->name, err);
5910 return err;
5911 }
5912 if (main_prog->line_info) {
5913
5914
5915
5916
5917 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
5918 return err;
5919 }
5920
5921 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
5922 prog->name);
5923 }
5924 return 0;
5925}
5926
5927static int cmp_relo_by_insn_idx(const void *key, const void *elem)
5928{
5929 size_t insn_idx = *(const size_t *)key;
5930 const struct reloc_desc *relo = elem;
5931
5932 if (insn_idx == relo->insn_idx)
5933 return 0;
5934 return insn_idx < relo->insn_idx ? -1 : 1;
5935}
5936
5937static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
5938{
5939 if (!prog->nr_reloc)
5940 return NULL;
5941 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
5942 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
5943}
5944
5945static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
5946{
5947 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
5948 struct reloc_desc *relos;
5949 int i;
5950
5951 if (main_prog == subprog)
5952 return 0;
5953 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
5954 if (!relos)
5955 return -ENOMEM;
5956 if (subprog->nr_reloc)
5957 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
5958 sizeof(*relos) * subprog->nr_reloc);
5959
5960 for (i = main_prog->nr_reloc; i < new_cnt; i++)
5961 relos[i].insn_idx += subprog->sub_insn_off;
5962
5963
5964
5965 main_prog->reloc_desc = relos;
5966 main_prog->nr_reloc = new_cnt;
5967 return 0;
5968}
5969
5970static int
5971bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
5972 struct bpf_program *prog)
5973{
5974 size_t sub_insn_idx, insn_idx, new_cnt;
5975 struct bpf_program *subprog;
5976 struct bpf_insn *insns, *insn;
5977 struct reloc_desc *relo;
5978 int err;
5979
5980 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
5981 if (err)
5982 return err;
5983
5984 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
5985 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
5986 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
5987 continue;
5988
5989 relo = find_prog_insn_relo(prog, insn_idx);
5990 if (relo && relo->type == RELO_EXTERN_FUNC)
5991
5992
5993
5994 continue;
5995 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
5996 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
5997 prog->name, insn_idx, relo->type);
5998 return -LIBBPF_ERRNO__RELOC;
5999 }
6000 if (relo) {
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011 if (relo->type == RELO_CALL)
6012 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6013 else
6014 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6015 } else if (insn_is_pseudo_func(insn)) {
6016
6017
6018
6019
6020 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6021 prog->name, insn_idx);
6022 return -LIBBPF_ERRNO__RELOC;
6023 } else {
6024
6025
6026
6027
6028
6029
6030 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6031 }
6032
6033
6034 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6035 if (!subprog) {
6036 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6037 prog->name);
6038 return -LIBBPF_ERRNO__RELOC;
6039 }
6040
6041
6042
6043
6044
6045
6046
6047
6048
6049
6050
6051 if (subprog->sub_insn_off == 0) {
6052 subprog->sub_insn_off = main_prog->insns_cnt;
6053
6054 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6055 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6056 if (!insns) {
6057 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6058 return -ENOMEM;
6059 }
6060 main_prog->insns = insns;
6061 main_prog->insns_cnt = new_cnt;
6062
6063 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6064 subprog->insns_cnt * sizeof(*insns));
6065
6066 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6067 main_prog->name, subprog->insns_cnt, subprog->name);
6068
6069
6070 err = append_subprog_relos(main_prog, subprog);
6071 if (err)
6072 return err;
6073 err = bpf_object__reloc_code(obj, main_prog, subprog);
6074 if (err)
6075 return err;
6076 }
6077
6078
6079
6080
6081 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6082
6083
6084
6085
6086
6087 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6088
6089 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6090 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6091 }
6092
6093 return 0;
6094}
6095
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177static int
6178bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6179{
6180 struct bpf_program *subprog;
6181 int i, err;
6182
6183
6184
6185
6186 for (i = 0; i < obj->nr_programs; i++) {
6187 subprog = &obj->programs[i];
6188 if (!prog_is_subprog(obj, subprog))
6189 continue;
6190
6191 subprog->sub_insn_off = 0;
6192 }
6193
6194 err = bpf_object__reloc_code(obj, prog, prog);
6195 if (err)
6196 return err;
6197
6198
6199 return 0;
6200}
6201
6202static void
6203bpf_object__free_relocs(struct bpf_object *obj)
6204{
6205 struct bpf_program *prog;
6206 int i;
6207
6208
6209 for (i = 0; i < obj->nr_programs; i++) {
6210 prog = &obj->programs[i];
6211 zfree(&prog->reloc_desc);
6212 prog->nr_reloc = 0;
6213 }
6214}
6215
6216static int cmp_relocs(const void *_a, const void *_b)
6217{
6218 const struct reloc_desc *a = _a;
6219 const struct reloc_desc *b = _b;
6220
6221 if (a->insn_idx != b->insn_idx)
6222 return a->insn_idx < b->insn_idx ? -1 : 1;
6223
6224
6225 if (a->type != b->type)
6226 return a->type < b->type ? -1 : 1;
6227
6228 return 0;
6229}
6230
6231static void bpf_object__sort_relos(struct bpf_object *obj)
6232{
6233 int i;
6234
6235 for (i = 0; i < obj->nr_programs; i++) {
6236 struct bpf_program *p = &obj->programs[i];
6237
6238 if (!p->nr_reloc)
6239 continue;
6240
6241 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6242 }
6243}
6244
6245static int
6246bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6247{
6248 struct bpf_program *prog;
6249 size_t i, j;
6250 int err;
6251
6252 if (obj->btf_ext) {
6253 err = bpf_object__relocate_core(obj, targ_btf_path);
6254 if (err) {
6255 pr_warn("failed to perform CO-RE relocations: %d\n",
6256 err);
6257 return err;
6258 }
6259 if (obj->gen_loader)
6260 bpf_object__sort_relos(obj);
6261 }
6262
6263
6264
6265
6266
6267
6268
6269
6270 for (i = 0; i < obj->nr_programs; i++) {
6271 prog = &obj->programs[i];
6272 for (j = 0; j < prog->nr_reloc; j++) {
6273 struct reloc_desc *relo = &prog->reloc_desc[j];
6274 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6275
6276
6277 if (relo->type == RELO_SUBPROG_ADDR)
6278 insn[0].src_reg = BPF_PSEUDO_FUNC;
6279 }
6280 }
6281
6282
6283
6284
6285
6286
6287
6288
6289 for (i = 0; i < obj->nr_programs; i++) {
6290 prog = &obj->programs[i];
6291
6292
6293
6294 if (prog_is_subprog(obj, prog))
6295 continue;
6296 if (!prog->load)
6297 continue;
6298
6299 err = bpf_object__relocate_calls(obj, prog);
6300 if (err) {
6301 pr_warn("prog '%s': failed to relocate calls: %d\n",
6302 prog->name, err);
6303 return err;
6304 }
6305 }
6306
6307 for (i = 0; i < obj->nr_programs; i++) {
6308 prog = &obj->programs[i];
6309 if (prog_is_subprog(obj, prog))
6310 continue;
6311 if (!prog->load)
6312 continue;
6313 err = bpf_object__relocate_data(obj, prog);
6314 if (err) {
6315 pr_warn("prog '%s': failed to relocate data references: %d\n",
6316 prog->name, err);
6317 return err;
6318 }
6319 }
6320 if (!obj->gen_loader)
6321 bpf_object__free_relocs(obj);
6322 return 0;
6323}
6324
6325static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6326 Elf64_Shdr *shdr, Elf_Data *data);
6327
6328static int bpf_object__collect_map_relos(struct bpf_object *obj,
6329 Elf64_Shdr *shdr, Elf_Data *data)
6330{
6331 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6332 int i, j, nrels, new_sz;
6333 const struct btf_var_secinfo *vi = NULL;
6334 const struct btf_type *sec, *var, *def;
6335 struct bpf_map *map = NULL, *targ_map = NULL;
6336 struct bpf_program *targ_prog = NULL;
6337 bool is_prog_array, is_map_in_map;
6338 const struct btf_member *member;
6339 const char *name, *mname, *type;
6340 unsigned int moff;
6341 Elf64_Sym *sym;
6342 Elf64_Rel *rel;
6343 void *tmp;
6344
6345 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6346 return -EINVAL;
6347 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6348 if (!sec)
6349 return -EINVAL;
6350
6351 nrels = shdr->sh_size / shdr->sh_entsize;
6352 for (i = 0; i < nrels; i++) {
6353 rel = elf_rel_by_idx(data, i);
6354 if (!rel) {
6355 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6356 return -LIBBPF_ERRNO__FORMAT;
6357 }
6358
6359 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
6360 if (!sym) {
6361 pr_warn(".maps relo #%d: symbol %zx not found\n",
6362 i, (size_t)ELF64_R_SYM(rel->r_info));
6363 return -LIBBPF_ERRNO__FORMAT;
6364 }
6365 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
6366
6367 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
6368 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
6369 (size_t)rel->r_offset, sym->st_name, name);
6370
6371 for (j = 0; j < obj->nr_maps; j++) {
6372 map = &obj->maps[j];
6373 if (map->sec_idx != obj->efile.btf_maps_shndx)
6374 continue;
6375
6376 vi = btf_var_secinfos(sec) + map->btf_var_idx;
6377 if (vi->offset <= rel->r_offset &&
6378 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6379 break;
6380 }
6381 if (j == obj->nr_maps) {
6382 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
6383 i, name, (size_t)rel->r_offset);
6384 return -EINVAL;
6385 }
6386
6387 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
6388 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
6389 type = is_map_in_map ? "map" : "prog";
6390 if (is_map_in_map) {
6391 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
6392 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6393 i, name);
6394 return -LIBBPF_ERRNO__RELOC;
6395 }
6396 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6397 map->def.key_size != sizeof(int)) {
6398 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6399 i, map->name, sizeof(int));
6400 return -EINVAL;
6401 }
6402 targ_map = bpf_object__find_map_by_name(obj, name);
6403 if (!targ_map) {
6404 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
6405 i, name);
6406 return -ESRCH;
6407 }
6408 } else if (is_prog_array) {
6409 targ_prog = bpf_object__find_program_by_name(obj, name);
6410 if (!targ_prog) {
6411 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
6412 i, name);
6413 return -ESRCH;
6414 }
6415 if (targ_prog->sec_idx != sym->st_shndx ||
6416 targ_prog->sec_insn_off * 8 != sym->st_value ||
6417 prog_is_subprog(obj, targ_prog)) {
6418 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
6419 i, name);
6420 return -LIBBPF_ERRNO__RELOC;
6421 }
6422 } else {
6423 return -EINVAL;
6424 }
6425
6426 var = btf__type_by_id(obj->btf, vi->type);
6427 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6428 if (btf_vlen(def) == 0)
6429 return -EINVAL;
6430 member = btf_members(def) + btf_vlen(def) - 1;
6431 mname = btf__name_by_offset(obj->btf, member->name_off);
6432 if (strcmp(mname, "values"))
6433 return -EINVAL;
6434
6435 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6436 if (rel->r_offset - vi->offset < moff)
6437 return -EINVAL;
6438
6439 moff = rel->r_offset - vi->offset - moff;
6440
6441
6442
6443 if (moff % bpf_ptr_sz)
6444 return -EINVAL;
6445 moff /= bpf_ptr_sz;
6446 if (moff >= map->init_slots_sz) {
6447 new_sz = moff + 1;
6448 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6449 if (!tmp)
6450 return -ENOMEM;
6451 map->init_slots = tmp;
6452 memset(map->init_slots + map->init_slots_sz, 0,
6453 (new_sz - map->init_slots_sz) * host_ptr_sz);
6454 map->init_slots_sz = new_sz;
6455 }
6456 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
6457
6458 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
6459 i, map->name, moff, type, name);
6460 }
6461
6462 return 0;
6463}
6464
6465static int bpf_object__collect_relos(struct bpf_object *obj)
6466{
6467 int i, err;
6468
6469 for (i = 0; i < obj->efile.sec_cnt; i++) {
6470 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
6471 Elf64_Shdr *shdr;
6472 Elf_Data *data;
6473 int idx;
6474
6475 if (sec_desc->sec_type != SEC_RELO)
6476 continue;
6477
6478 shdr = sec_desc->shdr;
6479 data = sec_desc->data;
6480 idx = shdr->sh_info;
6481
6482 if (shdr->sh_type != SHT_REL) {
6483 pr_warn("internal error at %d\n", __LINE__);
6484 return -LIBBPF_ERRNO__INTERNAL;
6485 }
6486
6487 if (idx == obj->efile.st_ops_shndx)
6488 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6489 else if (idx == obj->efile.btf_maps_shndx)
6490 err = bpf_object__collect_map_relos(obj, shdr, data);
6491 else
6492 err = bpf_object__collect_prog_relos(obj, shdr, data);
6493 if (err)
6494 return err;
6495 }
6496
6497 bpf_object__sort_relos(obj);
6498 return 0;
6499}
6500
6501static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6502{
6503 if (BPF_CLASS(insn->code) == BPF_JMP &&
6504 BPF_OP(insn->code) == BPF_CALL &&
6505 BPF_SRC(insn->code) == BPF_K &&
6506 insn->src_reg == 0 &&
6507 insn->dst_reg == 0) {
6508 *func_id = insn->imm;
6509 return true;
6510 }
6511 return false;
6512}
6513
6514static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
6515{
6516 struct bpf_insn *insn = prog->insns;
6517 enum bpf_func_id func_id;
6518 int i;
6519
6520 if (obj->gen_loader)
6521 return 0;
6522
6523 for (i = 0; i < prog->insns_cnt; i++, insn++) {
6524 if (!insn_is_helper_call(insn, &func_id))
6525 continue;
6526
6527
6528
6529
6530
6531 switch (func_id) {
6532 case BPF_FUNC_probe_read_kernel:
6533 case BPF_FUNC_probe_read_user:
6534 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6535 insn->imm = BPF_FUNC_probe_read;
6536 break;
6537 case BPF_FUNC_probe_read_kernel_str:
6538 case BPF_FUNC_probe_read_user_str:
6539 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6540 insn->imm = BPF_FUNC_probe_read_str;
6541 break;
6542 default:
6543 break;
6544 }
6545 }
6546 return 0;
6547}
6548
6549static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
6550 int *btf_obj_fd, int *btf_type_id);
6551
6552
6553static int libbpf_preload_prog(struct bpf_program *prog,
6554 struct bpf_prog_load_opts *opts, long cookie)
6555{
6556 enum sec_def_flags def = cookie;
6557
6558
6559 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
6560 opts->expected_attach_type = 0;
6561
6562 if (def & SEC_SLEEPABLE)
6563 opts->prog_flags |= BPF_F_SLEEPABLE;
6564
6565 if ((prog->type == BPF_PROG_TYPE_TRACING ||
6566 prog->type == BPF_PROG_TYPE_LSM ||
6567 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
6568 int btf_obj_fd = 0, btf_type_id = 0, err;
6569 const char *attach_name;
6570
6571 attach_name = strchr(prog->sec_name, '/') + 1;
6572 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
6573 if (err)
6574 return err;
6575
6576
6577 prog->attach_btf_obj_fd = btf_obj_fd;
6578 prog->attach_btf_id = btf_type_id;
6579
6580
6581
6582
6583
6584
6585 opts->attach_btf_obj_fd = btf_obj_fd;
6586 opts->attach_btf_id = btf_type_id;
6587 }
6588 return 0;
6589}
6590
6591static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog,
6592 struct bpf_insn *insns, int insns_cnt,
6593 const char *license, __u32 kern_version,
6594 int *prog_fd)
6595{
6596 LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
6597 const char *prog_name = NULL;
6598 char *cp, errmsg[STRERR_BUFSIZE];
6599 size_t log_buf_size = 0;
6600 char *log_buf = NULL, *tmp;
6601 int btf_fd, ret, err;
6602 bool own_log_buf = true;
6603 __u32 log_level = prog->log_level;
6604
6605 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6606
6607
6608
6609
6610 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6611 prog->name, prog->sec_name);
6612 return -EINVAL;
6613 }
6614
6615 if (!insns || !insns_cnt)
6616 return -EINVAL;
6617
6618 load_attr.expected_attach_type = prog->expected_attach_type;
6619 if (kernel_supports(obj, FEAT_PROG_NAME))
6620 prog_name = prog->name;
6621 load_attr.attach_prog_fd = prog->attach_prog_fd;
6622 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6623 load_attr.attach_btf_id = prog->attach_btf_id;
6624 load_attr.kern_version = kern_version;
6625 load_attr.prog_ifindex = prog->prog_ifindex;
6626
6627
6628 btf_fd = bpf_object__btf_fd(obj);
6629 if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
6630 load_attr.prog_btf_fd = btf_fd;
6631 load_attr.func_info = prog->func_info;
6632 load_attr.func_info_rec_size = prog->func_info_rec_size;
6633 load_attr.func_info_cnt = prog->func_info_cnt;
6634 load_attr.line_info = prog->line_info;
6635 load_attr.line_info_rec_size = prog->line_info_rec_size;
6636 load_attr.line_info_cnt = prog->line_info_cnt;
6637 }
6638 load_attr.log_level = log_level;
6639 load_attr.prog_flags = prog->prog_flags;
6640 load_attr.fd_array = obj->fd_array;
6641
6642
6643 if (prog->sec_def && prog->sec_def->preload_fn) {
6644 err = prog->sec_def->preload_fn(prog, &load_attr, prog->sec_def->cookie);
6645 if (err < 0) {
6646 pr_warn("prog '%s': failed to prepare load attributes: %d\n",
6647 prog->name, err);
6648 return err;
6649 }
6650 }
6651
6652 if (obj->gen_loader) {
6653 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
6654 license, insns, insns_cnt, &load_attr,
6655 prog - obj->programs);
6656 *prog_fd = -1;
6657 return 0;
6658 }
6659
6660retry_load:
6661
6662
6663
6664
6665
6666 if (log_level) {
6667 if (prog->log_buf) {
6668 log_buf = prog->log_buf;
6669 log_buf_size = prog->log_size;
6670 own_log_buf = false;
6671 } else if (obj->log_buf) {
6672 log_buf = obj->log_buf;
6673 log_buf_size = obj->log_size;
6674 own_log_buf = false;
6675 } else {
6676 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
6677 tmp = realloc(log_buf, log_buf_size);
6678 if (!tmp) {
6679 ret = -ENOMEM;
6680 goto out;
6681 }
6682 log_buf = tmp;
6683 log_buf[0] = '\0';
6684 own_log_buf = true;
6685 }
6686 }
6687
6688 load_attr.log_buf = log_buf;
6689 load_attr.log_size = log_buf_size;
6690 load_attr.log_level = log_level;
6691
6692 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
6693 if (ret >= 0) {
6694 if (log_level && own_log_buf) {
6695 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
6696 prog->name, log_buf);
6697 }
6698
6699 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
6700 struct bpf_map *map;
6701 int i;
6702
6703 for (i = 0; i < obj->nr_maps; i++) {
6704 map = &prog->obj->maps[i];
6705 if (map->libbpf_type != LIBBPF_MAP_RODATA)
6706 continue;
6707
6708 if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
6709 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6710 pr_warn("prog '%s': failed to bind map '%s': %s\n",
6711 prog->name, map->real_name, cp);
6712
6713 }
6714 }
6715 }
6716
6717 *prog_fd = ret;
6718 ret = 0;
6719 goto out;
6720 }
6721
6722 if (log_level == 0) {
6723 log_level = 1;
6724 goto retry_load;
6725 }
6726
6727
6728
6729
6730
6731
6732
6733 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
6734 goto retry_load;
6735
6736 ret = -errno;
6737 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6738 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
6739 pr_perm_msg(ret);
6740
6741 if (own_log_buf && log_buf && log_buf[0] != '\0') {
6742 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
6743 prog->name, log_buf);
6744 }
6745 if (insns_cnt >= BPF_MAXINSNS) {
6746 pr_warn("prog '%s': program too large (%d insns), at most %d insns\n",
6747 prog->name, insns_cnt, BPF_MAXINSNS);
6748 }
6749
6750out:
6751 if (own_log_buf)
6752 free(log_buf);
6753 return ret;
6754}
6755
6756static int bpf_program_record_relos(struct bpf_program *prog)
6757{
6758 struct bpf_object *obj = prog->obj;
6759 int i;
6760
6761 for (i = 0; i < prog->nr_reloc; i++) {
6762 struct reloc_desc *relo = &prog->reloc_desc[i];
6763 struct extern_desc *ext = &obj->externs[relo->sym_off];
6764
6765 switch (relo->type) {
6766 case RELO_EXTERN_VAR:
6767 if (ext->type != EXT_KSYM)
6768 continue;
6769 bpf_gen__record_extern(obj->gen_loader, ext->name,
6770 ext->is_weak, !ext->ksym.type_id,
6771 BTF_KIND_VAR, relo->insn_idx);
6772 break;
6773 case RELO_EXTERN_FUNC:
6774 bpf_gen__record_extern(obj->gen_loader, ext->name,
6775 ext->is_weak, false, BTF_KIND_FUNC,
6776 relo->insn_idx);
6777 break;
6778 case RELO_CORE: {
6779 struct bpf_core_relo cr = {
6780 .insn_off = relo->insn_idx * 8,
6781 .type_id = relo->core_relo->type_id,
6782 .access_str_off = relo->core_relo->access_str_off,
6783 .kind = relo->core_relo->kind,
6784 };
6785
6786 bpf_gen__record_relo_core(obj->gen_loader, &cr);
6787 break;
6788 }
6789 default:
6790 continue;
6791 }
6792 }
6793 return 0;
6794}
6795
6796static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
6797 const char *license, __u32 kern_ver)
6798{
6799 int err = 0, fd, i;
6800
6801 if (obj->loaded) {
6802 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
6803 return libbpf_err(-EINVAL);
6804 }
6805
6806 if (prog->instances.nr < 0 || !prog->instances.fds) {
6807 if (prog->preprocessor) {
6808 pr_warn("Internal error: can't load program '%s'\n",
6809 prog->name);
6810 return libbpf_err(-LIBBPF_ERRNO__INTERNAL);
6811 }
6812
6813 prog->instances.fds = malloc(sizeof(int));
6814 if (!prog->instances.fds) {
6815 pr_warn("Not enough memory for BPF fds\n");
6816 return libbpf_err(-ENOMEM);
6817 }
6818 prog->instances.nr = 1;
6819 prog->instances.fds[0] = -1;
6820 }
6821
6822 if (!prog->preprocessor) {
6823 if (prog->instances.nr != 1) {
6824 pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
6825 prog->name, prog->instances.nr);
6826 }
6827 if (obj->gen_loader)
6828 bpf_program_record_relos(prog);
6829 err = bpf_object_load_prog_instance(obj, prog,
6830 prog->insns, prog->insns_cnt,
6831 license, kern_ver, &fd);
6832 if (!err)
6833 prog->instances.fds[0] = fd;
6834 goto out;
6835 }
6836
6837 for (i = 0; i < prog->instances.nr; i++) {
6838 struct bpf_prog_prep_result result;
6839 bpf_program_prep_t preprocessor = prog->preprocessor;
6840
6841 memset(&result, 0, sizeof(result));
6842 err = preprocessor(prog, i, prog->insns,
6843 prog->insns_cnt, &result);
6844 if (err) {
6845 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
6846 i, prog->name);
6847 goto out;
6848 }
6849
6850 if (!result.new_insn_ptr || !result.new_insn_cnt) {
6851 pr_debug("Skip loading the %dth instance of program '%s'\n",
6852 i, prog->name);
6853 prog->instances.fds[i] = -1;
6854 if (result.pfd)
6855 *result.pfd = -1;
6856 continue;
6857 }
6858
6859 err = bpf_object_load_prog_instance(obj, prog,
6860 result.new_insn_ptr, result.new_insn_cnt,
6861 license, kern_ver, &fd);
6862 if (err) {
6863 pr_warn("Loading the %dth instance of program '%s' failed\n",
6864 i, prog->name);
6865 goto out;
6866 }
6867
6868 if (result.pfd)
6869 *result.pfd = fd;
6870 prog->instances.fds[i] = fd;
6871 }
6872out:
6873 if (err)
6874 pr_warn("failed to load program '%s'\n", prog->name);
6875 return libbpf_err(err);
6876}
6877
6878int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_ver)
6879{
6880 return bpf_object_load_prog(prog->obj, prog, license, kern_ver);
6881}
6882
6883static int
6884bpf_object__load_progs(struct bpf_object *obj, int log_level)
6885{
6886 struct bpf_program *prog;
6887 size_t i;
6888 int err;
6889
6890 for (i = 0; i < obj->nr_programs; i++) {
6891 prog = &obj->programs[i];
6892 err = bpf_object__sanitize_prog(obj, prog);
6893 if (err)
6894 return err;
6895 }
6896
6897 for (i = 0; i < obj->nr_programs; i++) {
6898 prog = &obj->programs[i];
6899 if (prog_is_subprog(obj, prog))
6900 continue;
6901 if (!prog->load) {
6902 pr_debug("prog '%s': skipped loading\n", prog->name);
6903 continue;
6904 }
6905 prog->log_level |= log_level;
6906 err = bpf_object_load_prog(obj, prog, obj->license, obj->kern_version);
6907 if (err)
6908 return err;
6909 }
6910 if (obj->gen_loader)
6911 bpf_object__free_relocs(obj);
6912 return 0;
6913}
6914
6915static const struct bpf_sec_def *find_sec_def(const char *sec_name);
6916
6917static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
6918{
6919 struct bpf_program *prog;
6920 int err;
6921
6922 bpf_object__for_each_program(prog, obj) {
6923 prog->sec_def = find_sec_def(prog->sec_name);
6924 if (!prog->sec_def) {
6925
6926 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
6927 prog->name, prog->sec_name);
6928 continue;
6929 }
6930
6931 bpf_program__set_type(prog, prog->sec_def->prog_type);
6932 bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type);
6933
6934#pragma GCC diagnostic push
6935#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
6936 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
6937 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
6938 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
6939#pragma GCC diagnostic pop
6940
6941
6942
6943
6944 if (prog->sec_def->init_fn) {
6945 err = prog->sec_def->init_fn(prog, prog->sec_def->cookie);
6946 if (err < 0) {
6947 pr_warn("prog '%s': failed to initialize: %d\n",
6948 prog->name, err);
6949 return err;
6950 }
6951 }
6952 }
6953
6954 return 0;
6955}
6956
6957static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
6958 const struct bpf_object_open_opts *opts)
6959{
6960 const char *obj_name, *kconfig, *btf_tmp_path;
6961 struct bpf_object *obj;
6962 char tmp_name[64];
6963 int err;
6964 char *log_buf;
6965 size_t log_size;
6966 __u32 log_level;
6967
6968 if (elf_version(EV_CURRENT) == EV_NONE) {
6969 pr_warn("failed to init libelf for %s\n",
6970 path ? : "(mem buf)");
6971 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
6972 }
6973
6974 if (!OPTS_VALID(opts, bpf_object_open_opts))
6975 return ERR_PTR(-EINVAL);
6976
6977 obj_name = OPTS_GET(opts, object_name, NULL);
6978 if (obj_buf) {
6979 if (!obj_name) {
6980 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
6981 (unsigned long)obj_buf,
6982 (unsigned long)obj_buf_sz);
6983 obj_name = tmp_name;
6984 }
6985 path = obj_name;
6986 pr_debug("loading object '%s' from buffer\n", obj_name);
6987 }
6988
6989 log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
6990 log_size = OPTS_GET(opts, kernel_log_size, 0);
6991 log_level = OPTS_GET(opts, kernel_log_level, 0);
6992 if (log_size > UINT_MAX)
6993 return ERR_PTR(-EINVAL);
6994 if (log_size && !log_buf)
6995 return ERR_PTR(-EINVAL);
6996
6997 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
6998 if (IS_ERR(obj))
6999 return obj;
7000
7001 obj->log_buf = log_buf;
7002 obj->log_size = log_size;
7003 obj->log_level = log_level;
7004
7005 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7006 if (btf_tmp_path) {
7007 if (strlen(btf_tmp_path) >= PATH_MAX) {
7008 err = -ENAMETOOLONG;
7009 goto out;
7010 }
7011 obj->btf_custom_path = strdup(btf_tmp_path);
7012 if (!obj->btf_custom_path) {
7013 err = -ENOMEM;
7014 goto out;
7015 }
7016 }
7017
7018 kconfig = OPTS_GET(opts, kconfig, NULL);
7019 if (kconfig) {
7020 obj->kconfig = strdup(kconfig);
7021 if (!obj->kconfig) {
7022 err = -ENOMEM;
7023 goto out;
7024 }
7025 }
7026
7027 err = bpf_object__elf_init(obj);
7028 err = err ? : bpf_object__check_endianness(obj);
7029 err = err ? : bpf_object__elf_collect(obj);
7030 err = err ? : bpf_object__collect_externs(obj);
7031 err = err ? : bpf_object__finalize_btf(obj);
7032 err = err ? : bpf_object__init_maps(obj, opts);
7033 err = err ? : bpf_object_init_progs(obj, opts);
7034 err = err ? : bpf_object__collect_relos(obj);
7035 if (err)
7036 goto out;
7037
7038 bpf_object__elf_finish(obj);
7039
7040 return obj;
7041out:
7042 bpf_object__close(obj);
7043 return ERR_PTR(err);
7044}
7045
7046static struct bpf_object *
7047__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
7048{
7049 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7050 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7051 );
7052
7053
7054 if (!attr->file)
7055 return NULL;
7056
7057 pr_debug("loading %s\n", attr->file);
7058 return bpf_object_open(attr->file, NULL, 0, &opts);
7059}
7060
7061struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7062{
7063 return libbpf_ptr(__bpf_object__open_xattr(attr, 0));
7064}
7065
7066struct bpf_object *bpf_object__open(const char *path)
7067{
7068 struct bpf_object_open_attr attr = {
7069 .file = path,
7070 .prog_type = BPF_PROG_TYPE_UNSPEC,
7071 };
7072
7073 return libbpf_ptr(__bpf_object__open_xattr(&attr, 0));
7074}
7075
7076struct bpf_object *
7077bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7078{
7079 if (!path)
7080 return libbpf_err_ptr(-EINVAL);
7081
7082 pr_debug("loading %s\n", path);
7083
7084 return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
7085}
7086
7087struct bpf_object *
7088bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7089 const struct bpf_object_open_opts *opts)
7090{
7091 if (!obj_buf || obj_buf_sz == 0)
7092 return libbpf_err_ptr(-EINVAL);
7093
7094 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
7095}
7096
7097struct bpf_object *
7098bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7099 const char *name)
7100{
7101 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7102 .object_name = name,
7103
7104 .relaxed_maps = true,
7105 );
7106
7107
7108 if (!obj_buf || obj_buf_sz == 0)
7109 return errno = EINVAL, NULL;
7110
7111 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts));
7112}
7113
7114static int bpf_object_unload(struct bpf_object *obj)
7115{
7116 size_t i;
7117
7118 if (!obj)
7119 return libbpf_err(-EINVAL);
7120
7121 for (i = 0; i < obj->nr_maps; i++) {
7122 zclose(obj->maps[i].fd);
7123 if (obj->maps[i].st_ops)
7124 zfree(&obj->maps[i].st_ops->kern_vdata);
7125 }
7126
7127 for (i = 0; i < obj->nr_programs; i++)
7128 bpf_program__unload(&obj->programs[i]);
7129
7130 return 0;
7131}
7132
7133int bpf_object__unload(struct bpf_object *obj) __attribute__((alias("bpf_object_unload")));
7134
7135static int bpf_object__sanitize_maps(struct bpf_object *obj)
7136{
7137 struct bpf_map *m;
7138
7139 bpf_object__for_each_map(m, obj) {
7140 if (!bpf_map__is_internal(m))
7141 continue;
7142 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
7143 m->def.map_flags ^= BPF_F_MMAPABLE;
7144 }
7145
7146 return 0;
7147}
7148
7149static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7150{
7151 char sym_type, sym_name[500];
7152 unsigned long long sym_addr;
7153 const struct btf_type *t;
7154 struct extern_desc *ext;
7155 int ret, err = 0;
7156 FILE *f;
7157
7158 f = fopen("/proc/kallsyms", "r");
7159 if (!f) {
7160 err = -errno;
7161 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7162 return err;
7163 }
7164
7165 while (true) {
7166 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7167 &sym_addr, &sym_type, sym_name);
7168 if (ret == EOF && feof(f))
7169 break;
7170 if (ret != 3) {
7171 pr_warn("failed to read kallsyms entry: %d\n", ret);
7172 err = -EINVAL;
7173 goto out;
7174 }
7175
7176 ext = find_extern_by_name(obj, sym_name);
7177 if (!ext || ext->type != EXT_KSYM)
7178 continue;
7179
7180 t = btf__type_by_id(obj->btf, ext->btf_id);
7181 if (!btf_is_var(t))
7182 continue;
7183
7184 if (ext->is_set && ext->ksym.addr != sym_addr) {
7185 pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7186 sym_name, ext->ksym.addr, sym_addr);
7187 err = -EINVAL;
7188 goto out;
7189 }
7190 if (!ext->is_set) {
7191 ext->is_set = true;
7192 ext->ksym.addr = sym_addr;
7193 pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7194 }
7195 }
7196
7197out:
7198 fclose(f);
7199 return err;
7200}
7201
7202static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
7203 __u16 kind, struct btf **res_btf,
7204 struct module_btf **res_mod_btf)
7205{
7206 struct module_btf *mod_btf;
7207 struct btf *btf;
7208 int i, id, err;
7209
7210 btf = obj->btf_vmlinux;
7211 mod_btf = NULL;
7212 id = btf__find_by_name_kind(btf, ksym_name, kind);
7213
7214 if (id == -ENOENT) {
7215 err = load_module_btfs(obj);
7216 if (err)
7217 return err;
7218
7219 for (i = 0; i < obj->btf_module_cnt; i++) {
7220
7221 mod_btf = &obj->btf_modules[i];
7222 btf = mod_btf->btf;
7223 id = btf__find_by_name_kind_own(btf, ksym_name, kind);
7224 if (id != -ENOENT)
7225 break;
7226 }
7227 }
7228 if (id <= 0)
7229 return -ESRCH;
7230
7231 *res_btf = btf;
7232 *res_mod_btf = mod_btf;
7233 return id;
7234}
7235
7236static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
7237 struct extern_desc *ext)
7238{
7239 const struct btf_type *targ_var, *targ_type;
7240 __u32 targ_type_id, local_type_id;
7241 struct module_btf *mod_btf = NULL;
7242 const char *targ_var_name;
7243 struct btf *btf = NULL;
7244 int id, err;
7245
7246 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
7247 if (id < 0) {
7248 if (id == -ESRCH && ext->is_weak)
7249 return 0;
7250 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
7251 ext->name);
7252 return id;
7253 }
7254
7255
7256 local_type_id = ext->ksym.type_id;
7257
7258
7259 targ_var = btf__type_by_id(btf, id);
7260 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7261 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7262
7263 err = bpf_core_types_are_compat(obj->btf, local_type_id,
7264 btf, targ_type_id);
7265 if (err <= 0) {
7266 const struct btf_type *local_type;
7267 const char *targ_name, *local_name;
7268
7269 local_type = btf__type_by_id(obj->btf, local_type_id);
7270 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7271 targ_name = btf__name_by_offset(btf, targ_type->name_off);
7272
7273 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7274 ext->name, local_type_id,
7275 btf_kind_str(local_type), local_name, targ_type_id,
7276 btf_kind_str(targ_type), targ_name);
7277 return -EINVAL;
7278 }
7279
7280 ext->is_set = true;
7281 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
7282 ext->ksym.kernel_btf_id = id;
7283 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
7284 ext->name, id, btf_kind_str(targ_var), targ_var_name);
7285
7286 return 0;
7287}
7288
7289static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
7290 struct extern_desc *ext)
7291{
7292 int local_func_proto_id, kfunc_proto_id, kfunc_id;
7293 struct module_btf *mod_btf = NULL;
7294 const struct btf_type *kern_func;
7295 struct btf *kern_btf = NULL;
7296 int ret;
7297
7298 local_func_proto_id = ext->ksym.type_id;
7299
7300 kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
7301 if (kfunc_id < 0) {
7302 if (kfunc_id == -ESRCH && ext->is_weak)
7303 return 0;
7304 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
7305 ext->name);
7306 return kfunc_id;
7307 }
7308
7309 kern_func = btf__type_by_id(kern_btf, kfunc_id);
7310 kfunc_proto_id = kern_func->type;
7311
7312 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
7313 kern_btf, kfunc_proto_id);
7314 if (ret <= 0) {
7315 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n",
7316 ext->name, local_func_proto_id, kfunc_proto_id);
7317 return -EINVAL;
7318 }
7319
7320
7321 if (mod_btf && !mod_btf->fd_array_idx) {
7322
7323 if (obj->fd_array_cnt == INT16_MAX) {
7324 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
7325 ext->name, mod_btf->fd_array_idx);
7326 return -E2BIG;
7327 }
7328
7329 if (!obj->fd_array_cnt)
7330 obj->fd_array_cnt = 1;
7331
7332 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
7333 obj->fd_array_cnt + 1);
7334 if (ret)
7335 return ret;
7336 mod_btf->fd_array_idx = obj->fd_array_cnt;
7337
7338 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
7339 }
7340
7341 ext->is_set = true;
7342 ext->ksym.kernel_btf_id = kfunc_id;
7343 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
7344 pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
7345 ext->name, kfunc_id);
7346
7347 return 0;
7348}
7349
7350static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7351{
7352 const struct btf_type *t;
7353 struct extern_desc *ext;
7354 int i, err;
7355
7356 for (i = 0; i < obj->nr_extern; i++) {
7357 ext = &obj->externs[i];
7358 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7359 continue;
7360
7361 if (obj->gen_loader) {
7362 ext->is_set = true;
7363 ext->ksym.kernel_btf_obj_fd = 0;
7364 ext->ksym.kernel_btf_id = 0;
7365 continue;
7366 }
7367 t = btf__type_by_id(obj->btf, ext->btf_id);
7368 if (btf_is_var(t))
7369 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
7370 else
7371 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
7372 if (err)
7373 return err;
7374 }
7375 return 0;
7376}
7377
7378static int bpf_object__resolve_externs(struct bpf_object *obj,
7379 const char *extra_kconfig)
7380{
7381 bool need_config = false, need_kallsyms = false;
7382 bool need_vmlinux_btf = false;
7383 struct extern_desc *ext;
7384 void *kcfg_data = NULL;
7385 int err, i;
7386
7387 if (obj->nr_extern == 0)
7388 return 0;
7389
7390 if (obj->kconfig_map_idx >= 0)
7391 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7392
7393 for (i = 0; i < obj->nr_extern; i++) {
7394 ext = &obj->externs[i];
7395
7396 if (ext->type == EXT_KCFG &&
7397 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7398 void *ext_val = kcfg_data + ext->kcfg.data_off;
7399 __u32 kver = get_kernel_version();
7400
7401 if (!kver) {
7402 pr_warn("failed to get kernel version\n");
7403 return -EINVAL;
7404 }
7405 err = set_kcfg_value_num(ext, ext_val, kver);
7406 if (err)
7407 return err;
7408 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7409 } else if (ext->type == EXT_KCFG && str_has_pfx(ext->name, "CONFIG_")) {
7410 need_config = true;
7411 } else if (ext->type == EXT_KSYM) {
7412 if (ext->ksym.type_id)
7413 need_vmlinux_btf = true;
7414 else
7415 need_kallsyms = true;
7416 } else {
7417 pr_warn("unrecognized extern '%s'\n", ext->name);
7418 return -EINVAL;
7419 }
7420 }
7421 if (need_config && extra_kconfig) {
7422 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7423 if (err)
7424 return -EINVAL;
7425 need_config = false;
7426 for (i = 0; i < obj->nr_extern; i++) {
7427 ext = &obj->externs[i];
7428 if (ext->type == EXT_KCFG && !ext->is_set) {
7429 need_config = true;
7430 break;
7431 }
7432 }
7433 }
7434 if (need_config) {
7435 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7436 if (err)
7437 return -EINVAL;
7438 }
7439 if (need_kallsyms) {
7440 err = bpf_object__read_kallsyms_file(obj);
7441 if (err)
7442 return -EINVAL;
7443 }
7444 if (need_vmlinux_btf) {
7445 err = bpf_object__resolve_ksyms_btf_id(obj);
7446 if (err)
7447 return -EINVAL;
7448 }
7449 for (i = 0; i < obj->nr_extern; i++) {
7450 ext = &obj->externs[i];
7451
7452 if (!ext->is_set && !ext->is_weak) {
7453 pr_warn("extern %s (strong) not resolved\n", ext->name);
7454 return -ESRCH;
7455 } else if (!ext->is_set) {
7456 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7457 ext->name);
7458 }
7459 }
7460
7461 return 0;
7462}
7463
7464static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
7465{
7466 int err, i;
7467
7468 if (!obj)
7469 return libbpf_err(-EINVAL);
7470
7471 if (obj->loaded) {
7472 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7473 return libbpf_err(-EINVAL);
7474 }
7475
7476 if (obj->gen_loader)
7477 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
7478
7479 err = bpf_object__probe_loading(obj);
7480 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
7481 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7482 err = err ? : bpf_object__sanitize_and_load_btf(obj);
7483 err = err ? : bpf_object__sanitize_maps(obj);
7484 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7485 err = err ? : bpf_object__create_maps(obj);
7486 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
7487 err = err ? : bpf_object__load_progs(obj, extra_log_level);
7488 err = err ? : bpf_object_init_prog_arrays(obj);
7489
7490 if (obj->gen_loader) {
7491
7492 if (obj->btf)
7493 btf__set_fd(obj->btf, -1);
7494 for (i = 0; i < obj->nr_maps; i++)
7495 obj->maps[i].fd = -1;
7496 if (!err)
7497 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
7498 }
7499
7500
7501 zfree(&obj->fd_array);
7502
7503
7504 for (i = 0; i < obj->btf_module_cnt; i++) {
7505 close(obj->btf_modules[i].fd);
7506 btf__free(obj->btf_modules[i].btf);
7507 free(obj->btf_modules[i].name);
7508 }
7509 free(obj->btf_modules);
7510
7511
7512 btf__free(obj->btf_vmlinux);
7513 obj->btf_vmlinux = NULL;
7514
7515 obj->loaded = true;
7516
7517 if (err)
7518 goto out;
7519
7520 return 0;
7521out:
7522
7523 for (i = 0; i < obj->nr_maps; i++)
7524 if (obj->maps[i].pinned && !obj->maps[i].reused)
7525 bpf_map__unpin(&obj->maps[i], NULL);
7526
7527 bpf_object_unload(obj);
7528 pr_warn("failed to load object '%s'\n", obj->path);
7529 return libbpf_err(err);
7530}
7531
7532int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
7533{
7534 return bpf_object_load(attr->obj, attr->log_level, attr->target_btf_path);
7535}
7536
7537int bpf_object__load(struct bpf_object *obj)
7538{
7539 return bpf_object_load(obj, 0, NULL);
7540}
7541
7542static int make_parent_dir(const char *path)
7543{
7544 char *cp, errmsg[STRERR_BUFSIZE];
7545 char *dname, *dir;
7546 int err = 0;
7547
7548 dname = strdup(path);
7549 if (dname == NULL)
7550 return -ENOMEM;
7551
7552 dir = dirname(dname);
7553 if (mkdir(dir, 0700) && errno != EEXIST)
7554 err = -errno;
7555
7556 free(dname);
7557 if (err) {
7558 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7559 pr_warn("failed to mkdir %s: %s\n", path, cp);
7560 }
7561 return err;
7562}
7563
7564static int check_path(const char *path)
7565{
7566 char *cp, errmsg[STRERR_BUFSIZE];
7567 struct statfs st_fs;
7568 char *dname, *dir;
7569 int err = 0;
7570
7571 if (path == NULL)
7572 return -EINVAL;
7573
7574 dname = strdup(path);
7575 if (dname == NULL)
7576 return -ENOMEM;
7577
7578 dir = dirname(dname);
7579 if (statfs(dir, &st_fs)) {
7580 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7581 pr_warn("failed to statfs %s: %s\n", dir, cp);
7582 err = -errno;
7583 }
7584 free(dname);
7585
7586 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7587 pr_warn("specified path %s is not on BPF FS\n", path);
7588 err = -EINVAL;
7589 }
7590
7591 return err;
7592}
7593
7594static int bpf_program_pin_instance(struct bpf_program *prog, const char *path, int instance)
7595{
7596 char *cp, errmsg[STRERR_BUFSIZE];
7597 int err;
7598
7599 err = make_parent_dir(path);
7600 if (err)
7601 return libbpf_err(err);
7602
7603 err = check_path(path);
7604 if (err)
7605 return libbpf_err(err);
7606
7607 if (prog == NULL) {
7608 pr_warn("invalid program pointer\n");
7609 return libbpf_err(-EINVAL);
7610 }
7611
7612 if (instance < 0 || instance >= prog->instances.nr) {
7613 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7614 instance, prog->name, prog->instances.nr);
7615 return libbpf_err(-EINVAL);
7616 }
7617
7618 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
7619 err = -errno;
7620 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7621 pr_warn("failed to pin program: %s\n", cp);
7622 return libbpf_err(err);
7623 }
7624 pr_debug("pinned program '%s'\n", path);
7625
7626 return 0;
7627}
7628
7629static int bpf_program_unpin_instance(struct bpf_program *prog, const char *path, int instance)
7630{
7631 int err;
7632
7633 err = check_path(path);
7634 if (err)
7635 return libbpf_err(err);
7636
7637 if (prog == NULL) {
7638 pr_warn("invalid program pointer\n");
7639 return libbpf_err(-EINVAL);
7640 }
7641
7642 if (instance < 0 || instance >= prog->instances.nr) {
7643 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7644 instance, prog->name, prog->instances.nr);
7645 return libbpf_err(-EINVAL);
7646 }
7647
7648 err = unlink(path);
7649 if (err != 0)
7650 return libbpf_err(-errno);
7651
7652 pr_debug("unpinned program '%s'\n", path);
7653
7654 return 0;
7655}
7656
7657__attribute__((alias("bpf_program_pin_instance")))
7658int bpf_object__pin_instance(struct bpf_program *prog, const char *path, int instance);
7659
7660__attribute__((alias("bpf_program_unpin_instance")))
7661int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance);
7662
7663int bpf_program__pin(struct bpf_program *prog, const char *path)
7664{
7665 int i, err;
7666
7667 err = make_parent_dir(path);
7668 if (err)
7669 return libbpf_err(err);
7670
7671 err = check_path(path);
7672 if (err)
7673 return libbpf_err(err);
7674
7675 if (prog == NULL) {
7676 pr_warn("invalid program pointer\n");
7677 return libbpf_err(-EINVAL);
7678 }
7679
7680 if (prog->instances.nr <= 0) {
7681 pr_warn("no instances of prog %s to pin\n", prog->name);
7682 return libbpf_err(-EINVAL);
7683 }
7684
7685 if (prog->instances.nr == 1) {
7686
7687 return bpf_program_pin_instance(prog, path, 0);
7688 }
7689
7690 for (i = 0; i < prog->instances.nr; i++) {
7691 char buf[PATH_MAX];
7692 int len;
7693
7694 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7695 if (len < 0) {
7696 err = -EINVAL;
7697 goto err_unpin;
7698 } else if (len >= PATH_MAX) {
7699 err = -ENAMETOOLONG;
7700 goto err_unpin;
7701 }
7702
7703 err = bpf_program_pin_instance(prog, buf, i);
7704 if (err)
7705 goto err_unpin;
7706 }
7707
7708 return 0;
7709
7710err_unpin:
7711 for (i = i - 1; i >= 0; i--) {
7712 char buf[PATH_MAX];
7713 int len;
7714
7715 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7716 if (len < 0)
7717 continue;
7718 else if (len >= PATH_MAX)
7719 continue;
7720
7721 bpf_program_unpin_instance(prog, buf, i);
7722 }
7723
7724 rmdir(path);
7725
7726 return libbpf_err(err);
7727}
7728
7729int bpf_program__unpin(struct bpf_program *prog, const char *path)
7730{
7731 int i, err;
7732
7733 err = check_path(path);
7734 if (err)
7735 return libbpf_err(err);
7736
7737 if (prog == NULL) {
7738 pr_warn("invalid program pointer\n");
7739 return libbpf_err(-EINVAL);
7740 }
7741
7742 if (prog->instances.nr <= 0) {
7743 pr_warn("no instances of prog %s to pin\n", prog->name);
7744 return libbpf_err(-EINVAL);
7745 }
7746
7747 if (prog->instances.nr == 1) {
7748
7749 return bpf_program_unpin_instance(prog, path, 0);
7750 }
7751
7752 for (i = 0; i < prog->instances.nr; i++) {
7753 char buf[PATH_MAX];
7754 int len;
7755
7756 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7757 if (len < 0)
7758 return libbpf_err(-EINVAL);
7759 else if (len >= PATH_MAX)
7760 return libbpf_err(-ENAMETOOLONG);
7761
7762 err = bpf_program_unpin_instance(prog, buf, i);
7763 if (err)
7764 return err;
7765 }
7766
7767 err = rmdir(path);
7768 if (err)
7769 return libbpf_err(-errno);
7770
7771 return 0;
7772}
7773
7774int bpf_map__pin(struct bpf_map *map, const char *path)
7775{
7776 char *cp, errmsg[STRERR_BUFSIZE];
7777 int err;
7778
7779 if (map == NULL) {
7780 pr_warn("invalid map pointer\n");
7781 return libbpf_err(-EINVAL);
7782 }
7783
7784 if (map->pin_path) {
7785 if (path && strcmp(path, map->pin_path)) {
7786 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7787 bpf_map__name(map), map->pin_path, path);
7788 return libbpf_err(-EINVAL);
7789 } else if (map->pinned) {
7790 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7791 bpf_map__name(map), map->pin_path);
7792 return 0;
7793 }
7794 } else {
7795 if (!path) {
7796 pr_warn("missing a path to pin map '%s' at\n",
7797 bpf_map__name(map));
7798 return libbpf_err(-EINVAL);
7799 } else if (map->pinned) {
7800 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7801 return libbpf_err(-EEXIST);
7802 }
7803
7804 map->pin_path = strdup(path);
7805 if (!map->pin_path) {
7806 err = -errno;
7807 goto out_err;
7808 }
7809 }
7810
7811 err = make_parent_dir(map->pin_path);
7812 if (err)
7813 return libbpf_err(err);
7814
7815 err = check_path(map->pin_path);
7816 if (err)
7817 return libbpf_err(err);
7818
7819 if (bpf_obj_pin(map->fd, map->pin_path)) {
7820 err = -errno;
7821 goto out_err;
7822 }
7823
7824 map->pinned = true;
7825 pr_debug("pinned map '%s'\n", map->pin_path);
7826
7827 return 0;
7828
7829out_err:
7830 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7831 pr_warn("failed to pin map: %s\n", cp);
7832 return libbpf_err(err);
7833}
7834
7835int bpf_map__unpin(struct bpf_map *map, const char *path)
7836{
7837 int err;
7838
7839 if (map == NULL) {
7840 pr_warn("invalid map pointer\n");
7841 return libbpf_err(-EINVAL);
7842 }
7843
7844 if (map->pin_path) {
7845 if (path && strcmp(path, map->pin_path)) {
7846 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7847 bpf_map__name(map), map->pin_path, path);
7848 return libbpf_err(-EINVAL);
7849 }
7850 path = map->pin_path;
7851 } else if (!path) {
7852 pr_warn("no path to unpin map '%s' from\n",
7853 bpf_map__name(map));
7854 return libbpf_err(-EINVAL);
7855 }
7856
7857 err = check_path(path);
7858 if (err)
7859 return libbpf_err(err);
7860
7861 err = unlink(path);
7862 if (err != 0)
7863 return libbpf_err(-errno);
7864
7865 map->pinned = false;
7866 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
7867
7868 return 0;
7869}
7870
7871int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
7872{
7873 char *new = NULL;
7874
7875 if (path) {
7876 new = strdup(path);
7877 if (!new)
7878 return libbpf_err(-errno);
7879 }
7880
7881 free(map->pin_path);
7882 map->pin_path = new;
7883 return 0;
7884}
7885
7886const char *bpf_map__get_pin_path(const struct bpf_map *map)
7887{
7888 return map->pin_path;
7889}
7890
7891const char *bpf_map__pin_path(const struct bpf_map *map)
7892{
7893 return map->pin_path;
7894}
7895
7896bool bpf_map__is_pinned(const struct bpf_map *map)
7897{
7898 return map->pinned;
7899}
7900
7901static void sanitize_pin_path(char *s)
7902{
7903
7904 while (*s) {
7905 if (*s == '.')
7906 *s = '_';
7907 s++;
7908 }
7909}
7910
7911int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7912{
7913 struct bpf_map *map;
7914 int err;
7915
7916 if (!obj)
7917 return libbpf_err(-ENOENT);
7918
7919 if (!obj->loaded) {
7920 pr_warn("object not yet loaded; load it first\n");
7921 return libbpf_err(-ENOENT);
7922 }
7923
7924 bpf_object__for_each_map(map, obj) {
7925 char *pin_path = NULL;
7926 char buf[PATH_MAX];
7927
7928 if (map->skipped)
7929 continue;
7930
7931 if (path) {
7932 int len;
7933
7934 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7935 bpf_map__name(map));
7936 if (len < 0) {
7937 err = -EINVAL;
7938 goto err_unpin_maps;
7939 } else if (len >= PATH_MAX) {
7940 err = -ENAMETOOLONG;
7941 goto err_unpin_maps;
7942 }
7943 sanitize_pin_path(buf);
7944 pin_path = buf;
7945 } else if (!map->pin_path) {
7946 continue;
7947 }
7948
7949 err = bpf_map__pin(map, pin_path);
7950 if (err)
7951 goto err_unpin_maps;
7952 }
7953
7954 return 0;
7955
7956err_unpin_maps:
7957 while ((map = bpf_object__prev_map(obj, map))) {
7958 if (!map->pin_path)
7959 continue;
7960
7961 bpf_map__unpin(map, NULL);
7962 }
7963
7964 return libbpf_err(err);
7965}
7966
7967int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
7968{
7969 struct bpf_map *map;
7970 int err;
7971
7972 if (!obj)
7973 return libbpf_err(-ENOENT);
7974
7975 bpf_object__for_each_map(map, obj) {
7976 char *pin_path = NULL;
7977 char buf[PATH_MAX];
7978
7979 if (path) {
7980 int len;
7981
7982 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7983 bpf_map__name(map));
7984 if (len < 0)
7985 return libbpf_err(-EINVAL);
7986 else if (len >= PATH_MAX)
7987 return libbpf_err(-ENAMETOOLONG);
7988 sanitize_pin_path(buf);
7989 pin_path = buf;
7990 } else if (!map->pin_path) {
7991 continue;
7992 }
7993
7994 err = bpf_map__unpin(map, pin_path);
7995 if (err)
7996 return libbpf_err(err);
7997 }
7998
7999 return 0;
8000}
8001
8002int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8003{
8004 struct bpf_program *prog;
8005 int err;
8006
8007 if (!obj)
8008 return libbpf_err(-ENOENT);
8009
8010 if (!obj->loaded) {
8011 pr_warn("object not yet loaded; load it first\n");
8012 return libbpf_err(-ENOENT);
8013 }
8014
8015 bpf_object__for_each_program(prog, obj) {
8016 char buf[PATH_MAX];
8017 int len;
8018
8019 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8020 prog->pin_name);
8021 if (len < 0) {
8022 err = -EINVAL;
8023 goto err_unpin_programs;
8024 } else if (len >= PATH_MAX) {
8025 err = -ENAMETOOLONG;
8026 goto err_unpin_programs;
8027 }
8028
8029 err = bpf_program__pin(prog, buf);
8030 if (err)
8031 goto err_unpin_programs;
8032 }
8033
8034 return 0;
8035
8036err_unpin_programs:
8037 while ((prog = bpf_object__prev_program(obj, prog))) {
8038 char buf[PATH_MAX];
8039 int len;
8040
8041 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8042 prog->pin_name);
8043 if (len < 0)
8044 continue;
8045 else if (len >= PATH_MAX)
8046 continue;
8047
8048 bpf_program__unpin(prog, buf);
8049 }
8050
8051 return libbpf_err(err);
8052}
8053
8054int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8055{
8056 struct bpf_program *prog;
8057 int err;
8058
8059 if (!obj)
8060 return libbpf_err(-ENOENT);
8061
8062 bpf_object__for_each_program(prog, obj) {
8063 char buf[PATH_MAX];
8064 int len;
8065
8066 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8067 prog->pin_name);
8068 if (len < 0)
8069 return libbpf_err(-EINVAL);
8070 else if (len >= PATH_MAX)
8071 return libbpf_err(-ENAMETOOLONG);
8072
8073 err = bpf_program__unpin(prog, buf);
8074 if (err)
8075 return libbpf_err(err);
8076 }
8077
8078 return 0;
8079}
8080
8081int bpf_object__pin(struct bpf_object *obj, const char *path)
8082{
8083 int err;
8084
8085 err = bpf_object__pin_maps(obj, path);
8086 if (err)
8087 return libbpf_err(err);
8088
8089 err = bpf_object__pin_programs(obj, path);
8090 if (err) {
8091 bpf_object__unpin_maps(obj, path);
8092 return libbpf_err(err);
8093 }
8094
8095 return 0;
8096}
8097
8098static void bpf_map__destroy(struct bpf_map *map)
8099{
8100 if (map->clear_priv)
8101 map->clear_priv(map, map->priv);
8102 map->priv = NULL;
8103 map->clear_priv = NULL;
8104
8105 if (map->inner_map) {
8106 bpf_map__destroy(map->inner_map);
8107 zfree(&map->inner_map);
8108 }
8109
8110 zfree(&map->init_slots);
8111 map->init_slots_sz = 0;
8112
8113 if (map->mmaped) {
8114 munmap(map->mmaped, bpf_map_mmap_sz(map));
8115 map->mmaped = NULL;
8116 }
8117
8118 if (map->st_ops) {
8119 zfree(&map->st_ops->data);
8120 zfree(&map->st_ops->progs);
8121 zfree(&map->st_ops->kern_func_off);
8122 zfree(&map->st_ops);
8123 }
8124
8125 zfree(&map->name);
8126 zfree(&map->real_name);
8127 zfree(&map->pin_path);
8128
8129 if (map->fd >= 0)
8130 zclose(map->fd);
8131}
8132
8133void bpf_object__close(struct bpf_object *obj)
8134{
8135 size_t i;
8136
8137 if (IS_ERR_OR_NULL(obj))
8138 return;
8139
8140 if (obj->clear_priv)
8141 obj->clear_priv(obj, obj->priv);
8142
8143 bpf_gen__free(obj->gen_loader);
8144 bpf_object__elf_finish(obj);
8145 bpf_object_unload(obj);
8146 btf__free(obj->btf);
8147 btf_ext__free(obj->btf_ext);
8148
8149 for (i = 0; i < obj->nr_maps; i++)
8150 bpf_map__destroy(&obj->maps[i]);
8151
8152 zfree(&obj->btf_custom_path);
8153 zfree(&obj->kconfig);
8154 zfree(&obj->externs);
8155 obj->nr_extern = 0;
8156
8157 zfree(&obj->maps);
8158 obj->nr_maps = 0;
8159
8160 if (obj->programs && obj->nr_programs) {
8161 for (i = 0; i < obj->nr_programs; i++)
8162 bpf_program__exit(&obj->programs[i]);
8163 }
8164 zfree(&obj->programs);
8165
8166 list_del(&obj->list);
8167 free(obj);
8168}
8169
8170struct bpf_object *
8171bpf_object__next(struct bpf_object *prev)
8172{
8173 struct bpf_object *next;
8174 bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
8175
8176 if (strict)
8177 return NULL;
8178
8179 if (!prev)
8180 next = list_first_entry(&bpf_objects_list,
8181 struct bpf_object,
8182 list);
8183 else
8184 next = list_next_entry(prev, list);
8185
8186
8187 if (&next->list == &bpf_objects_list)
8188 return NULL;
8189
8190 return next;
8191}
8192
8193const char *bpf_object__name(const struct bpf_object *obj)
8194{
8195 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8196}
8197
8198unsigned int bpf_object__kversion(const struct bpf_object *obj)
8199{
8200 return obj ? obj->kern_version : 0;
8201}
8202
8203struct btf *bpf_object__btf(const struct bpf_object *obj)
8204{
8205 return obj ? obj->btf : NULL;
8206}
8207
8208int bpf_object__btf_fd(const struct bpf_object *obj)
8209{
8210 return obj->btf ? btf__fd(obj->btf) : -1;
8211}
8212
8213int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8214{
8215 if (obj->loaded)
8216 return libbpf_err(-EINVAL);
8217
8218 obj->kern_version = kern_version;
8219
8220 return 0;
8221}
8222
8223int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8224 bpf_object_clear_priv_t clear_priv)
8225{
8226 if (obj->priv && obj->clear_priv)
8227 obj->clear_priv(obj, obj->priv);
8228
8229 obj->priv = priv;
8230 obj->clear_priv = clear_priv;
8231 return 0;
8232}
8233
8234void *bpf_object__priv(const struct bpf_object *obj)
8235{
8236 return obj ? obj->priv : libbpf_err_ptr(-EINVAL);
8237}
8238
8239int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8240{
8241 struct bpf_gen *gen;
8242
8243 if (!opts)
8244 return -EFAULT;
8245 if (!OPTS_VALID(opts, gen_loader_opts))
8246 return -EINVAL;
8247 gen = calloc(sizeof(*gen), 1);
8248 if (!gen)
8249 return -ENOMEM;
8250 gen->opts = opts;
8251 obj->gen_loader = gen;
8252 return 0;
8253}
8254
8255static struct bpf_program *
8256__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8257 bool forward)
8258{
8259 size_t nr_programs = obj->nr_programs;
8260 ssize_t idx;
8261
8262 if (!nr_programs)
8263 return NULL;
8264
8265 if (!p)
8266
8267 return forward ? &obj->programs[0] :
8268 &obj->programs[nr_programs - 1];
8269
8270 if (p->obj != obj) {
8271 pr_warn("error: program handler doesn't match object\n");
8272 return errno = EINVAL, NULL;
8273 }
8274
8275 idx = (p - obj->programs) + (forward ? 1 : -1);
8276 if (idx >= obj->nr_programs || idx < 0)
8277 return NULL;
8278 return &obj->programs[idx];
8279}
8280
8281struct bpf_program *
8282bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8283{
8284 return bpf_object__next_program(obj, prev);
8285}
8286
8287struct bpf_program *
8288bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
8289{
8290 struct bpf_program *prog = prev;
8291
8292 do {
8293 prog = __bpf_program__iter(prog, obj, true);
8294 } while (prog && prog_is_subprog(obj, prog));
8295
8296 return prog;
8297}
8298
8299struct bpf_program *
8300bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8301{
8302 return bpf_object__prev_program(obj, next);
8303}
8304
8305struct bpf_program *
8306bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
8307{
8308 struct bpf_program *prog = next;
8309
8310 do {
8311 prog = __bpf_program__iter(prog, obj, false);
8312 } while (prog && prog_is_subprog(obj, prog));
8313
8314 return prog;
8315}
8316
8317int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8318 bpf_program_clear_priv_t clear_priv)
8319{
8320 if (prog->priv && prog->clear_priv)
8321 prog->clear_priv(prog, prog->priv);
8322
8323 prog->priv = priv;
8324 prog->clear_priv = clear_priv;
8325 return 0;
8326}
8327
8328void *bpf_program__priv(const struct bpf_program *prog)
8329{
8330 return prog ? prog->priv : libbpf_err_ptr(-EINVAL);
8331}
8332
8333void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8334{
8335 prog->prog_ifindex = ifindex;
8336}
8337
8338const char *bpf_program__name(const struct bpf_program *prog)
8339{
8340 return prog->name;
8341}
8342
8343const char *bpf_program__section_name(const struct bpf_program *prog)
8344{
8345 return prog->sec_name;
8346}
8347
8348const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8349{
8350 const char *title;
8351
8352 title = prog->sec_name;
8353 if (needs_copy) {
8354 title = strdup(title);
8355 if (!title) {
8356 pr_warn("failed to strdup program title\n");
8357 return libbpf_err_ptr(-ENOMEM);
8358 }
8359 }
8360
8361 return title;
8362}
8363
8364bool bpf_program__autoload(const struct bpf_program *prog)
8365{
8366 return prog->load;
8367}
8368
8369int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8370{
8371 if (prog->obj->loaded)
8372 return libbpf_err(-EINVAL);
8373
8374 prog->load = autoload;
8375 return 0;
8376}
8377
8378static int bpf_program_nth_fd(const struct bpf_program *prog, int n);
8379
8380int bpf_program__fd(const struct bpf_program *prog)
8381{
8382 return bpf_program_nth_fd(prog, 0);
8383}
8384
8385size_t bpf_program__size(const struct bpf_program *prog)
8386{
8387 return prog->insns_cnt * BPF_INSN_SZ;
8388}
8389
8390const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
8391{
8392 return prog->insns;
8393}
8394
8395size_t bpf_program__insn_cnt(const struct bpf_program *prog)
8396{
8397 return prog->insns_cnt;
8398}
8399
8400int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8401 bpf_program_prep_t prep)
8402{
8403 int *instances_fds;
8404
8405 if (nr_instances <= 0 || !prep)
8406 return libbpf_err(-EINVAL);
8407
8408 if (prog->instances.nr > 0 || prog->instances.fds) {
8409 pr_warn("Can't set pre-processor after loading\n");
8410 return libbpf_err(-EINVAL);
8411 }
8412
8413 instances_fds = malloc(sizeof(int) * nr_instances);
8414 if (!instances_fds) {
8415 pr_warn("alloc memory failed for fds\n");
8416 return libbpf_err(-ENOMEM);
8417 }
8418
8419
8420 memset(instances_fds, -1, sizeof(int) * nr_instances);
8421
8422 prog->instances.nr = nr_instances;
8423 prog->instances.fds = instances_fds;
8424 prog->preprocessor = prep;
8425 return 0;
8426}
8427
8428__attribute__((alias("bpf_program_nth_fd")))
8429int bpf_program__nth_fd(const struct bpf_program *prog, int n);
8430
8431static int bpf_program_nth_fd(const struct bpf_program *prog, int n)
8432{
8433 int fd;
8434
8435 if (!prog)
8436 return libbpf_err(-EINVAL);
8437
8438 if (n >= prog->instances.nr || n < 0) {
8439 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8440 n, prog->name, prog->instances.nr);
8441 return libbpf_err(-EINVAL);
8442 }
8443
8444 fd = prog->instances.fds[n];
8445 if (fd < 0) {
8446 pr_warn("%dth instance of program '%s' is invalid\n",
8447 n, prog->name);
8448 return libbpf_err(-ENOENT);
8449 }
8450
8451 return fd;
8452}
8453
8454enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog)
8455{
8456 return prog->type;
8457}
8458
8459void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8460{
8461 prog->type = type;
8462}
8463
8464static bool bpf_program__is_type(const struct bpf_program *prog,
8465 enum bpf_prog_type type)
8466{
8467 return prog ? (prog->type == type) : false;
8468}
8469
8470#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
8471int bpf_program__set_##NAME(struct bpf_program *prog) \
8472{ \
8473 if (!prog) \
8474 return libbpf_err(-EINVAL); \
8475 bpf_program__set_type(prog, TYPE); \
8476 return 0; \
8477} \
8478 \
8479bool bpf_program__is_##NAME(const struct bpf_program *prog) \
8480{ \
8481 return bpf_program__is_type(prog, TYPE); \
8482} \
8483
8484BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
8485BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
8486BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
8487BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8488BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
8489BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
8490BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
8491BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8492BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
8493BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
8494BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
8495BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
8496BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
8497
8498enum bpf_attach_type
8499bpf_program__get_expected_attach_type(const struct bpf_program *prog)
8500{
8501 return prog->expected_attach_type;
8502}
8503
8504void bpf_program__set_expected_attach_type(struct bpf_program *prog,
8505 enum bpf_attach_type type)
8506{
8507 prog->expected_attach_type = type;
8508}
8509
8510__u32 bpf_program__flags(const struct bpf_program *prog)
8511{
8512 return prog->prog_flags;
8513}
8514
8515int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
8516{
8517 if (prog->obj->loaded)
8518 return libbpf_err(-EBUSY);
8519
8520 prog->prog_flags = flags;
8521 return 0;
8522}
8523
8524__u32 bpf_program__log_level(const struct bpf_program *prog)
8525{
8526 return prog->log_level;
8527}
8528
8529int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
8530{
8531 if (prog->obj->loaded)
8532 return libbpf_err(-EBUSY);
8533
8534 prog->log_level = log_level;
8535 return 0;
8536}
8537
8538const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
8539{
8540 *log_size = prog->log_size;
8541 return prog->log_buf;
8542}
8543
8544int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
8545{
8546 if (log_size && !log_buf)
8547 return -EINVAL;
8548 if (prog->log_size > UINT_MAX)
8549 return -EINVAL;
8550 if (prog->obj->loaded)
8551 return -EBUSY;
8552
8553 prog->log_buf = log_buf;
8554 prog->log_size = log_size;
8555 return 0;
8556}
8557
8558#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
8559 .sec = sec_pfx, \
8560 .prog_type = BPF_PROG_TYPE_##ptype, \
8561 .expected_attach_type = atype, \
8562 .cookie = (long)(flags), \
8563 .preload_fn = libbpf_preload_prog, \
8564 __VA_ARGS__ \
8565}
8566
8567static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie);
8568static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie);
8569static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie);
8570static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie);
8571static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie);
8572static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie);
8573
8574static const struct bpf_sec_def section_defs[] = {
8575 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX),
8576 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8577 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8578 SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
8579 SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
8580 SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
8581 SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
8582 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
8583 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX),
8584 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
8585 SEC_DEF("tracepoint/", TRACEPOINT, 0, SEC_NONE, attach_tp),
8586 SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp),
8587 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
8588 SEC_DEF("raw_tp/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
8589 SEC_DEF("raw_tracepoint.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
8590 SEC_DEF("raw_tp.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
8591 SEC_DEF("tp_btf/", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
8592 SEC_DEF("fentry/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
8593 SEC_DEF("fmod_ret/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
8594 SEC_DEF("fexit/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
8595 SEC_DEF("fentry.s/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
8596 SEC_DEF("fmod_ret.s/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
8597 SEC_DEF("fexit.s/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
8598 SEC_DEF("freplace/", EXT, 0, SEC_ATTACH_BTF, attach_trace),
8599 SEC_DEF("lsm/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
8600 SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
8601 SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
8602 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
8603 SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
8604 SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
8605 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8606 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX),
8607 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX),
8608 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE | SEC_SLOPPY_PFX),
8609 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE | SEC_SLOPPY_PFX),
8610 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE | SEC_SLOPPY_PFX),
8611 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8612 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8613 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
8614 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8615 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8616 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8617 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8618 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8619 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8620 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8621 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8622 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8623 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
8624 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8625 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8626 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
8627 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8628 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8629 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8630 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8631 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8632 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8633 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8634 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8635 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8636 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8637 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8638 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8639 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8640 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8641 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8642 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
8643 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
8644};
8645
8646#define MAX_TYPE_NAME_SIZE 32
8647
8648static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8649{
8650 const struct bpf_sec_def *sec_def;
8651 enum sec_def_flags sec_flags;
8652 int i, n = ARRAY_SIZE(section_defs), len;
8653 bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME;
8654
8655 for (i = 0; i < n; i++) {
8656 sec_def = §ion_defs[i];
8657 sec_flags = sec_def->cookie;
8658 len = strlen(sec_def->sec);
8659
8660
8661 if (sec_def->sec[len - 1] == '/') {
8662 if (str_has_pfx(sec_name, sec_def->sec))
8663 return sec_def;
8664 continue;
8665 }
8666
8667
8668
8669
8670 if (sec_def->sec[len - 1] == '+') {
8671 len--;
8672
8673 if (strncmp(sec_name, sec_def->sec, len) != 0)
8674 continue;
8675
8676 if (sec_name[len] == '\0' || sec_name[len] == '/')
8677 return sec_def;
8678 continue;
8679 }
8680
8681
8682
8683
8684
8685
8686 if ((sec_flags & SEC_SLOPPY_PFX) && !strict) {
8687 if (str_has_pfx(sec_name, sec_def->sec))
8688 return sec_def;
8689 continue;
8690 }
8691
8692
8693
8694
8695 if (strcmp(sec_name, sec_def->sec) == 0)
8696 return sec_def;
8697 }
8698 return NULL;
8699}
8700
8701static char *libbpf_get_type_names(bool attach_type)
8702{
8703 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8704 char *buf;
8705
8706 buf = malloc(len);
8707 if (!buf)
8708 return NULL;
8709
8710 buf[0] = '\0';
8711
8712 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8713 const struct bpf_sec_def *sec_def = §ion_defs[i];
8714
8715 if (attach_type) {
8716 if (sec_def->preload_fn != libbpf_preload_prog)
8717 continue;
8718
8719 if (!(sec_def->cookie & SEC_ATTACHABLE))
8720 continue;
8721 }
8722
8723 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
8724 free(buf);
8725 return NULL;
8726 }
8727 strcat(buf, " ");
8728 strcat(buf, section_defs[i].sec);
8729 }
8730
8731 return buf;
8732}
8733
8734int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8735 enum bpf_attach_type *expected_attach_type)
8736{
8737 const struct bpf_sec_def *sec_def;
8738 char *type_names;
8739
8740 if (!name)
8741 return libbpf_err(-EINVAL);
8742
8743 sec_def = find_sec_def(name);
8744 if (sec_def) {
8745 *prog_type = sec_def->prog_type;
8746 *expected_attach_type = sec_def->expected_attach_type;
8747 return 0;
8748 }
8749
8750 pr_debug("failed to guess program type from ELF section '%s'\n", name);
8751 type_names = libbpf_get_type_names(false);
8752 if (type_names != NULL) {
8753 pr_debug("supported section(type) names are:%s\n", type_names);
8754 free(type_names);
8755 }
8756
8757 return libbpf_err(-ESRCH);
8758}
8759
8760static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8761 size_t offset)
8762{
8763 struct bpf_map *map;
8764 size_t i;
8765
8766 for (i = 0; i < obj->nr_maps; i++) {
8767 map = &obj->maps[i];
8768 if (!bpf_map__is_struct_ops(map))
8769 continue;
8770 if (map->sec_offset <= offset &&
8771 offset - map->sec_offset < map->def.value_size)
8772 return map;
8773 }
8774
8775 return NULL;
8776}
8777
8778
8779static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8780 Elf64_Shdr *shdr, Elf_Data *data)
8781{
8782 const struct btf_member *member;
8783 struct bpf_struct_ops *st_ops;
8784 struct bpf_program *prog;
8785 unsigned int shdr_idx;
8786 const struct btf *btf;
8787 struct bpf_map *map;
8788 unsigned int moff, insn_idx;
8789 const char *name;
8790 __u32 member_idx;
8791 Elf64_Sym *sym;
8792 Elf64_Rel *rel;
8793 int i, nrels;
8794
8795 btf = obj->btf;
8796 nrels = shdr->sh_size / shdr->sh_entsize;
8797 for (i = 0; i < nrels; i++) {
8798 rel = elf_rel_by_idx(data, i);
8799 if (!rel) {
8800 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8801 return -LIBBPF_ERRNO__FORMAT;
8802 }
8803
8804 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
8805 if (!sym) {
8806 pr_warn("struct_ops reloc: symbol %zx not found\n",
8807 (size_t)ELF64_R_SYM(rel->r_info));
8808 return -LIBBPF_ERRNO__FORMAT;
8809 }
8810
8811 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
8812 map = find_struct_ops_map_by_offset(obj, rel->r_offset);
8813 if (!map) {
8814 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
8815 (size_t)rel->r_offset);
8816 return -EINVAL;
8817 }
8818
8819 moff = rel->r_offset - map->sec_offset;
8820 shdr_idx = sym->st_shndx;
8821 st_ops = map->st_ops;
8822 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
8823 map->name,
8824 (long long)(rel->r_info >> 32),
8825 (long long)sym->st_value,
8826 shdr_idx, (size_t)rel->r_offset,
8827 map->sec_offset, sym->st_name, name);
8828
8829 if (shdr_idx >= SHN_LORESERVE) {
8830 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
8831 map->name, (size_t)rel->r_offset, shdr_idx);
8832 return -LIBBPF_ERRNO__RELOC;
8833 }
8834 if (sym->st_value % BPF_INSN_SZ) {
8835 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
8836 map->name, (unsigned long long)sym->st_value);
8837 return -LIBBPF_ERRNO__FORMAT;
8838 }
8839 insn_idx = sym->st_value / BPF_INSN_SZ;
8840
8841 member = find_member_by_offset(st_ops->type, moff * 8);
8842 if (!member) {
8843 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
8844 map->name, moff);
8845 return -EINVAL;
8846 }
8847 member_idx = member - btf_members(st_ops->type);
8848 name = btf__name_by_offset(btf, member->name_off);
8849
8850 if (!resolve_func_ptr(btf, member->type, NULL)) {
8851 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
8852 map->name, name);
8853 return -EINVAL;
8854 }
8855
8856 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
8857 if (!prog) {
8858 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
8859 map->name, shdr_idx, name);
8860 return -EINVAL;
8861 }
8862
8863
8864 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
8865 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
8866 map->name, prog->name);
8867 return -EINVAL;
8868 }
8869
8870
8871
8872
8873 if (!prog->attach_btf_id) {
8874 prog->attach_btf_id = st_ops->type_id;
8875 prog->expected_attach_type = member_idx;
8876 }
8877
8878
8879
8880
8881
8882 if (prog->attach_btf_id != st_ops->type_id ||
8883 prog->expected_attach_type != member_idx) {
8884 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
8885 map->name, prog->name, prog->sec_name, prog->type,
8886 prog->attach_btf_id, prog->expected_attach_type, name);
8887 return -EINVAL;
8888 }
8889
8890 st_ops->progs[member_idx] = prog;
8891 }
8892
8893 return 0;
8894}
8895
8896#define BTF_TRACE_PREFIX "btf_trace_"
8897#define BTF_LSM_PREFIX "bpf_lsm_"
8898#define BTF_ITER_PREFIX "bpf_iter_"
8899#define BTF_MAX_NAME_SIZE 128
8900
8901void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
8902 const char **prefix, int *kind)
8903{
8904 switch (attach_type) {
8905 case BPF_TRACE_RAW_TP:
8906 *prefix = BTF_TRACE_PREFIX;
8907 *kind = BTF_KIND_TYPEDEF;
8908 break;
8909 case BPF_LSM_MAC:
8910 *prefix = BTF_LSM_PREFIX;
8911 *kind = BTF_KIND_FUNC;
8912 break;
8913 case BPF_TRACE_ITER:
8914 *prefix = BTF_ITER_PREFIX;
8915 *kind = BTF_KIND_FUNC;
8916 break;
8917 default:
8918 *prefix = "";
8919 *kind = BTF_KIND_FUNC;
8920 }
8921}
8922
8923static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
8924 const char *name, __u32 kind)
8925{
8926 char btf_type_name[BTF_MAX_NAME_SIZE];
8927 int ret;
8928
8929 ret = snprintf(btf_type_name, sizeof(btf_type_name),
8930 "%s%s", prefix, name);
8931
8932
8933
8934
8935 if (ret < 0 || ret >= sizeof(btf_type_name))
8936 return -ENAMETOOLONG;
8937 return btf__find_by_name_kind(btf, btf_type_name, kind);
8938}
8939
8940static inline int find_attach_btf_id(struct btf *btf, const char *name,
8941 enum bpf_attach_type attach_type)
8942{
8943 const char *prefix;
8944 int kind;
8945
8946 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
8947 return find_btf_by_prefix_kind(btf, prefix, name, kind);
8948}
8949
8950int libbpf_find_vmlinux_btf_id(const char *name,
8951 enum bpf_attach_type attach_type)
8952{
8953 struct btf *btf;
8954 int err;
8955
8956 btf = btf__load_vmlinux_btf();
8957 err = libbpf_get_error(btf);
8958 if (err) {
8959 pr_warn("vmlinux BTF is not found\n");
8960 return libbpf_err(err);
8961 }
8962
8963 err = find_attach_btf_id(btf, name, attach_type);
8964 if (err <= 0)
8965 pr_warn("%s is not found in vmlinux BTF\n", name);
8966
8967 btf__free(btf);
8968 return libbpf_err(err);
8969}
8970
8971static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
8972{
8973 struct bpf_prog_info info = {};
8974 __u32 info_len = sizeof(info);
8975 struct btf *btf;
8976 int err;
8977
8978 err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
8979 if (err) {
8980 pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n",
8981 attach_prog_fd, err);
8982 return err;
8983 }
8984
8985 err = -EINVAL;
8986 if (!info.btf_id) {
8987 pr_warn("The target program doesn't have BTF\n");
8988 goto out;
8989 }
8990 btf = btf__load_from_kernel_by_id(info.btf_id);
8991 err = libbpf_get_error(btf);
8992 if (err) {
8993 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
8994 goto out;
8995 }
8996 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8997 btf__free(btf);
8998 if (err <= 0) {
8999 pr_warn("%s is not found in prog's BTF\n", name);
9000 goto out;
9001 }
9002out:
9003 return err;
9004}
9005
9006static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9007 enum bpf_attach_type attach_type,
9008 int *btf_obj_fd, int *btf_type_id)
9009{
9010 int ret, i;
9011
9012 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9013 if (ret > 0) {
9014 *btf_obj_fd = 0;
9015 *btf_type_id = ret;
9016 return 0;
9017 }
9018 if (ret != -ENOENT)
9019 return ret;
9020
9021 ret = load_module_btfs(obj);
9022 if (ret)
9023 return ret;
9024
9025 for (i = 0; i < obj->btf_module_cnt; i++) {
9026 const struct module_btf *mod = &obj->btf_modules[i];
9027
9028 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9029 if (ret > 0) {
9030 *btf_obj_fd = mod->fd;
9031 *btf_type_id = ret;
9032 return 0;
9033 }
9034 if (ret == -ENOENT)
9035 continue;
9036
9037 return ret;
9038 }
9039
9040 return -ESRCH;
9041}
9042
9043static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9044 int *btf_obj_fd, int *btf_type_id)
9045{
9046 enum bpf_attach_type attach_type = prog->expected_attach_type;
9047 __u32 attach_prog_fd = prog->attach_prog_fd;
9048 int err = 0;
9049
9050
9051 if (attach_prog_fd) {
9052 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9053 if (err < 0) {
9054 pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9055 attach_prog_fd, attach_name, err);
9056 return err;
9057 }
9058 *btf_obj_fd = 0;
9059 *btf_type_id = err;
9060 return 0;
9061 }
9062
9063
9064 if (prog->obj->gen_loader) {
9065 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9066 *btf_obj_fd = 0;
9067 *btf_type_id = 1;
9068 } else {
9069 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9070 }
9071 if (err) {
9072 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
9073 return err;
9074 }
9075 return 0;
9076}
9077
9078int libbpf_attach_type_by_name(const char *name,
9079 enum bpf_attach_type *attach_type)
9080{
9081 char *type_names;
9082 const struct bpf_sec_def *sec_def;
9083
9084 if (!name)
9085 return libbpf_err(-EINVAL);
9086
9087 sec_def = find_sec_def(name);
9088 if (!sec_def) {
9089 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9090 type_names = libbpf_get_type_names(true);
9091 if (type_names != NULL) {
9092 pr_debug("attachable section(type) names are:%s\n", type_names);
9093 free(type_names);
9094 }
9095
9096 return libbpf_err(-EINVAL);
9097 }
9098
9099 if (sec_def->preload_fn != libbpf_preload_prog)
9100 return libbpf_err(-EINVAL);
9101 if (!(sec_def->cookie & SEC_ATTACHABLE))
9102 return libbpf_err(-EINVAL);
9103
9104 *attach_type = sec_def->expected_attach_type;
9105 return 0;
9106}
9107
9108int bpf_map__fd(const struct bpf_map *map)
9109{
9110 return map ? map->fd : libbpf_err(-EINVAL);
9111}
9112
9113const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9114{
9115 return map ? &map->def : libbpf_err_ptr(-EINVAL);
9116}
9117
9118static bool map_uses_real_name(const struct bpf_map *map)
9119{
9120
9121
9122
9123
9124
9125
9126 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
9127 return true;
9128 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
9129 return true;
9130 return false;
9131}
9132
9133const char *bpf_map__name(const struct bpf_map *map)
9134{
9135 if (!map)
9136 return NULL;
9137
9138 if (map_uses_real_name(map))
9139 return map->real_name;
9140
9141 return map->name;
9142}
9143
9144enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9145{
9146 return map->def.type;
9147}
9148
9149int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9150{
9151 if (map->fd >= 0)
9152 return libbpf_err(-EBUSY);
9153 map->def.type = type;
9154 return 0;
9155}
9156
9157__u32 bpf_map__map_flags(const struct bpf_map *map)
9158{
9159 return map->def.map_flags;
9160}
9161
9162int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9163{
9164 if (map->fd >= 0)
9165 return libbpf_err(-EBUSY);
9166 map->def.map_flags = flags;
9167 return 0;
9168}
9169
9170__u64 bpf_map__map_extra(const struct bpf_map *map)
9171{
9172 return map->map_extra;
9173}
9174
9175int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
9176{
9177 if (map->fd >= 0)
9178 return libbpf_err(-EBUSY);
9179 map->map_extra = map_extra;
9180 return 0;
9181}
9182
9183__u32 bpf_map__numa_node(const struct bpf_map *map)
9184{
9185 return map->numa_node;
9186}
9187
9188int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9189{
9190 if (map->fd >= 0)
9191 return libbpf_err(-EBUSY);
9192 map->numa_node = numa_node;
9193 return 0;
9194}
9195
9196__u32 bpf_map__key_size(const struct bpf_map *map)
9197{
9198 return map->def.key_size;
9199}
9200
9201int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9202{
9203 if (map->fd >= 0)
9204 return libbpf_err(-EBUSY);
9205 map->def.key_size = size;
9206 return 0;
9207}
9208
9209__u32 bpf_map__value_size(const struct bpf_map *map)
9210{
9211 return map->def.value_size;
9212}
9213
9214int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9215{
9216 if (map->fd >= 0)
9217 return libbpf_err(-EBUSY);
9218 map->def.value_size = size;
9219 return 0;
9220}
9221
9222__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9223{
9224 return map ? map->btf_key_type_id : 0;
9225}
9226
9227__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9228{
9229 return map ? map->btf_value_type_id : 0;
9230}
9231
9232int bpf_map__set_priv(struct bpf_map *map, void *priv,
9233 bpf_map_clear_priv_t clear_priv)
9234{
9235 if (!map)
9236 return libbpf_err(-EINVAL);
9237
9238 if (map->priv) {
9239 if (map->clear_priv)
9240 map->clear_priv(map, map->priv);
9241 }
9242
9243 map->priv = priv;
9244 map->clear_priv = clear_priv;
9245 return 0;
9246}
9247
9248void *bpf_map__priv(const struct bpf_map *map)
9249{
9250 return map ? map->priv : libbpf_err_ptr(-EINVAL);
9251}
9252
9253int bpf_map__set_initial_value(struct bpf_map *map,
9254 const void *data, size_t size)
9255{
9256 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9257 size != map->def.value_size || map->fd >= 0)
9258 return libbpf_err(-EINVAL);
9259
9260 memcpy(map->mmaped, data, size);
9261 return 0;
9262}
9263
9264const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
9265{
9266 if (!map->mmaped)
9267 return NULL;
9268 *psize = map->def.value_size;
9269 return map->mmaped;
9270}
9271
9272bool bpf_map__is_offload_neutral(const struct bpf_map *map)
9273{
9274 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9275}
9276
9277bool bpf_map__is_internal(const struct bpf_map *map)
9278{
9279 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9280}
9281
9282__u32 bpf_map__ifindex(const struct bpf_map *map)
9283{
9284 return map->map_ifindex;
9285}
9286
9287int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9288{
9289 if (map->fd >= 0)
9290 return libbpf_err(-EBUSY);
9291 map->map_ifindex = ifindex;
9292 return 0;
9293}
9294
9295int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9296{
9297 if (!bpf_map_type__is_map_in_map(map->def.type)) {
9298 pr_warn("error: unsupported map type\n");
9299 return libbpf_err(-EINVAL);
9300 }
9301 if (map->inner_map_fd != -1) {
9302 pr_warn("error: inner_map_fd already specified\n");
9303 return libbpf_err(-EINVAL);
9304 }
9305 if (map->inner_map) {
9306 bpf_map__destroy(map->inner_map);
9307 zfree(&map->inner_map);
9308 }
9309 map->inner_map_fd = fd;
9310 return 0;
9311}
9312
9313static struct bpf_map *
9314__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9315{
9316 ssize_t idx;
9317 struct bpf_map *s, *e;
9318
9319 if (!obj || !obj->maps)
9320 return errno = EINVAL, NULL;
9321
9322 s = obj->maps;
9323 e = obj->maps + obj->nr_maps;
9324
9325 if ((m < s) || (m >= e)) {
9326 pr_warn("error in %s: map handler doesn't belong to object\n",
9327 __func__);
9328 return errno = EINVAL, NULL;
9329 }
9330
9331 idx = (m - obj->maps) + i;
9332 if (idx >= obj->nr_maps || idx < 0)
9333 return NULL;
9334 return &obj->maps[idx];
9335}
9336
9337struct bpf_map *
9338bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9339{
9340 return bpf_object__next_map(obj, prev);
9341}
9342
9343struct bpf_map *
9344bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
9345{
9346 if (prev == NULL)
9347 return obj->maps;
9348
9349 return __bpf_map__iter(prev, obj, 1);
9350}
9351
9352struct bpf_map *
9353bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9354{
9355 return bpf_object__prev_map(obj, next);
9356}
9357
9358struct bpf_map *
9359bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
9360{
9361 if (next == NULL) {
9362 if (!obj->nr_maps)
9363 return NULL;
9364 return obj->maps + obj->nr_maps - 1;
9365 }
9366
9367 return __bpf_map__iter(next, obj, -1);
9368}
9369
9370struct bpf_map *
9371bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9372{
9373 struct bpf_map *pos;
9374
9375 bpf_object__for_each_map(pos, obj) {
9376
9377
9378
9379
9380 if (name[0] == '.') {
9381 if (pos->real_name && strcmp(pos->real_name, name) == 0)
9382 return pos;
9383 continue;
9384 }
9385
9386 if (map_uses_real_name(pos)) {
9387 if (strcmp(pos->real_name, name) == 0)
9388 return pos;
9389 continue;
9390 }
9391 if (strcmp(pos->name, name) == 0)
9392 return pos;
9393 }
9394 return errno = ENOENT, NULL;
9395}
9396
9397int
9398bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9399{
9400 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9401}
9402
9403struct bpf_map *
9404bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9405{
9406 return libbpf_err_ptr(-ENOTSUP);
9407}
9408
9409long libbpf_get_error(const void *ptr)
9410{
9411 if (!IS_ERR_OR_NULL(ptr))
9412 return 0;
9413
9414 if (IS_ERR(ptr))
9415 errno = -PTR_ERR(ptr);
9416
9417
9418
9419
9420
9421
9422 return -errno;
9423}
9424
9425__attribute__((alias("bpf_prog_load_xattr2")))
9426int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
9427 struct bpf_object **pobj, int *prog_fd);
9428
9429static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
9430 struct bpf_object **pobj, int *prog_fd)
9431{
9432 struct bpf_object_open_attr open_attr = {};
9433 struct bpf_program *prog, *first_prog = NULL;
9434 struct bpf_object *obj;
9435 struct bpf_map *map;
9436 int err;
9437
9438 if (!attr)
9439 return libbpf_err(-EINVAL);
9440 if (!attr->file)
9441 return libbpf_err(-EINVAL);
9442
9443 open_attr.file = attr->file;
9444 open_attr.prog_type = attr->prog_type;
9445
9446 obj = bpf_object__open_xattr(&open_attr);
9447 err = libbpf_get_error(obj);
9448 if (err)
9449 return libbpf_err(-ENOENT);
9450
9451 bpf_object__for_each_program(prog, obj) {
9452 enum bpf_attach_type attach_type = attr->expected_attach_type;
9453
9454
9455
9456
9457
9458 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9459 bpf_program__set_type(prog, attr->prog_type);
9460 bpf_program__set_expected_attach_type(prog,
9461 attach_type);
9462 }
9463 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
9464
9465
9466
9467
9468 bpf_object__close(obj);
9469 return libbpf_err(-EINVAL);
9470 }
9471
9472 prog->prog_ifindex = attr->ifindex;
9473 prog->log_level = attr->log_level;
9474 prog->prog_flags |= attr->prog_flags;
9475 if (!first_prog)
9476 first_prog = prog;
9477 }
9478
9479 bpf_object__for_each_map(map, obj) {
9480 if (!bpf_map__is_offload_neutral(map))
9481 map->map_ifindex = attr->ifindex;
9482 }
9483
9484 if (!first_prog) {
9485 pr_warn("object file doesn't contain bpf program\n");
9486 bpf_object__close(obj);
9487 return libbpf_err(-ENOENT);
9488 }
9489
9490 err = bpf_object__load(obj);
9491 if (err) {
9492 bpf_object__close(obj);
9493 return libbpf_err(err);
9494 }
9495
9496 *pobj = obj;
9497 *prog_fd = bpf_program__fd(first_prog);
9498 return 0;
9499}
9500
9501COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1)
9502int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
9503 struct bpf_object **pobj, int *prog_fd)
9504{
9505 struct bpf_prog_load_attr attr;
9506
9507 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
9508 attr.file = file;
9509 attr.prog_type = type;
9510 attr.expected_attach_type = 0;
9511
9512 return bpf_prog_load_xattr2(&attr, pobj, prog_fd);
9513}
9514
9515struct bpf_link {
9516 int (*detach)(struct bpf_link *link);
9517 void (*dealloc)(struct bpf_link *link);
9518 char *pin_path;
9519 int fd;
9520 bool disconnected;
9521};
9522
9523
9524int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9525{
9526 int ret;
9527
9528 ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9529 return libbpf_err_errno(ret);
9530}
9531
9532
9533
9534
9535
9536
9537
9538
9539
9540
9541
9542void bpf_link__disconnect(struct bpf_link *link)
9543{
9544 link->disconnected = true;
9545}
9546
9547int bpf_link__destroy(struct bpf_link *link)
9548{
9549 int err = 0;
9550
9551 if (IS_ERR_OR_NULL(link))
9552 return 0;
9553
9554 if (!link->disconnected && link->detach)
9555 err = link->detach(link);
9556 if (link->pin_path)
9557 free(link->pin_path);
9558 if (link->dealloc)
9559 link->dealloc(link);
9560 else
9561 free(link);
9562
9563 return libbpf_err(err);
9564}
9565
9566int bpf_link__fd(const struct bpf_link *link)
9567{
9568 return link->fd;
9569}
9570
9571const char *bpf_link__pin_path(const struct bpf_link *link)
9572{
9573 return link->pin_path;
9574}
9575
9576static int bpf_link__detach_fd(struct bpf_link *link)
9577{
9578 return libbpf_err_errno(close(link->fd));
9579}
9580
9581struct bpf_link *bpf_link__open(const char *path)
9582{
9583 struct bpf_link *link;
9584 int fd;
9585
9586 fd = bpf_obj_get(path);
9587 if (fd < 0) {
9588 fd = -errno;
9589 pr_warn("failed to open link at %s: %d\n", path, fd);
9590 return libbpf_err_ptr(fd);
9591 }
9592
9593 link = calloc(1, sizeof(*link));
9594 if (!link) {
9595 close(fd);
9596 return libbpf_err_ptr(-ENOMEM);
9597 }
9598 link->detach = &bpf_link__detach_fd;
9599 link->fd = fd;
9600
9601 link->pin_path = strdup(path);
9602 if (!link->pin_path) {
9603 bpf_link__destroy(link);
9604 return libbpf_err_ptr(-ENOMEM);
9605 }
9606
9607 return link;
9608}
9609
9610int bpf_link__detach(struct bpf_link *link)
9611{
9612 return bpf_link_detach(link->fd) ? -errno : 0;
9613}
9614
9615int bpf_link__pin(struct bpf_link *link, const char *path)
9616{
9617 int err;
9618
9619 if (link->pin_path)
9620 return libbpf_err(-EBUSY);
9621 err = make_parent_dir(path);
9622 if (err)
9623 return libbpf_err(err);
9624 err = check_path(path);
9625 if (err)
9626 return libbpf_err(err);
9627
9628 link->pin_path = strdup(path);
9629 if (!link->pin_path)
9630 return libbpf_err(-ENOMEM);
9631
9632 if (bpf_obj_pin(link->fd, link->pin_path)) {
9633 err = -errno;
9634 zfree(&link->pin_path);
9635 return libbpf_err(err);
9636 }
9637
9638 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9639 return 0;
9640}
9641
9642int bpf_link__unpin(struct bpf_link *link)
9643{
9644 int err;
9645
9646 if (!link->pin_path)
9647 return libbpf_err(-EINVAL);
9648
9649 err = unlink(link->pin_path);
9650 if (err != 0)
9651 return -errno;
9652
9653 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9654 zfree(&link->pin_path);
9655 return 0;
9656}
9657
9658struct bpf_link_perf {
9659 struct bpf_link link;
9660 int perf_event_fd;
9661
9662 char *legacy_probe_name;
9663 bool legacy_is_kprobe;
9664 bool legacy_is_retprobe;
9665};
9666
9667static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
9668static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
9669
9670static int bpf_link_perf_detach(struct bpf_link *link)
9671{
9672 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
9673 int err = 0;
9674
9675 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
9676 err = -errno;
9677
9678 if (perf_link->perf_event_fd != link->fd)
9679 close(perf_link->perf_event_fd);
9680 close(link->fd);
9681
9682
9683 if (perf_link->legacy_probe_name) {
9684 if (perf_link->legacy_is_kprobe) {
9685 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
9686 perf_link->legacy_is_retprobe);
9687 } else {
9688 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
9689 perf_link->legacy_is_retprobe);
9690 }
9691 }
9692
9693 return err;
9694}
9695
9696static void bpf_link_perf_dealloc(struct bpf_link *link)
9697{
9698 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
9699
9700 free(perf_link->legacy_probe_name);
9701 free(perf_link);
9702}
9703
9704struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
9705 const struct bpf_perf_event_opts *opts)
9706{
9707 char errmsg[STRERR_BUFSIZE];
9708 struct bpf_link_perf *link;
9709 int prog_fd, link_fd = -1, err;
9710
9711 if (!OPTS_VALID(opts, bpf_perf_event_opts))
9712 return libbpf_err_ptr(-EINVAL);
9713
9714 if (pfd < 0) {
9715 pr_warn("prog '%s': invalid perf event FD %d\n",
9716 prog->name, pfd);
9717 return libbpf_err_ptr(-EINVAL);
9718 }
9719 prog_fd = bpf_program__fd(prog);
9720 if (prog_fd < 0) {
9721 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9722 prog->name);
9723 return libbpf_err_ptr(-EINVAL);
9724 }
9725
9726 link = calloc(1, sizeof(*link));
9727 if (!link)
9728 return libbpf_err_ptr(-ENOMEM);
9729 link->link.detach = &bpf_link_perf_detach;
9730 link->link.dealloc = &bpf_link_perf_dealloc;
9731 link->perf_event_fd = pfd;
9732
9733 if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
9734 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
9735 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
9736
9737 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
9738 if (link_fd < 0) {
9739 err = -errno;
9740 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
9741 prog->name, pfd,
9742 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9743 goto err_out;
9744 }
9745 link->link.fd = link_fd;
9746 } else {
9747 if (OPTS_GET(opts, bpf_cookie, 0)) {
9748 pr_warn("prog '%s': user context value is not supported\n", prog->name);
9749 err = -EOPNOTSUPP;
9750 goto err_out;
9751 }
9752
9753 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9754 err = -errno;
9755 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
9756 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9757 if (err == -EPROTO)
9758 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9759 prog->name, pfd);
9760 goto err_out;
9761 }
9762 link->link.fd = pfd;
9763 }
9764 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9765 err = -errno;
9766 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
9767 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9768 goto err_out;
9769 }
9770
9771 return &link->link;
9772err_out:
9773 if (link_fd >= 0)
9774 close(link_fd);
9775 free(link);
9776 return libbpf_err_ptr(err);
9777}
9778
9779struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
9780{
9781 return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
9782}
9783
9784
9785
9786
9787
9788
9789static int parse_uint_from_file(const char *file, const char *fmt)
9790{
9791 char buf[STRERR_BUFSIZE];
9792 int err, ret;
9793 FILE *f;
9794
9795 f = fopen(file, "r");
9796 if (!f) {
9797 err = -errno;
9798 pr_debug("failed to open '%s': %s\n", file,
9799 libbpf_strerror_r(err, buf, sizeof(buf)));
9800 return err;
9801 }
9802 err = fscanf(f, fmt, &ret);
9803 if (err != 1) {
9804 err = err == EOF ? -EIO : -errno;
9805 pr_debug("failed to parse '%s': %s\n", file,
9806 libbpf_strerror_r(err, buf, sizeof(buf)));
9807 fclose(f);
9808 return err;
9809 }
9810 fclose(f);
9811 return ret;
9812}
9813
9814static int determine_kprobe_perf_type(void)
9815{
9816 const char *file = "/sys/bus/event_source/devices/kprobe/type";
9817
9818 return parse_uint_from_file(file, "%d\n");
9819}
9820
9821static int determine_uprobe_perf_type(void)
9822{
9823 const char *file = "/sys/bus/event_source/devices/uprobe/type";
9824
9825 return parse_uint_from_file(file, "%d\n");
9826}
9827
9828static int determine_kprobe_retprobe_bit(void)
9829{
9830 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9831
9832 return parse_uint_from_file(file, "config:%d\n");
9833}
9834
9835static int determine_uprobe_retprobe_bit(void)
9836{
9837 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9838
9839 return parse_uint_from_file(file, "config:%d\n");
9840}
9841
9842#define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
9843#define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
9844
9845static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9846 uint64_t offset, int pid, size_t ref_ctr_off)
9847{
9848 struct perf_event_attr attr = {};
9849 char errmsg[STRERR_BUFSIZE];
9850 int type, pfd, err;
9851
9852 if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
9853 return -EINVAL;
9854
9855 type = uprobe ? determine_uprobe_perf_type()
9856 : determine_kprobe_perf_type();
9857 if (type < 0) {
9858 pr_warn("failed to determine %s perf type: %s\n",
9859 uprobe ? "uprobe" : "kprobe",
9860 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9861 return type;
9862 }
9863 if (retprobe) {
9864 int bit = uprobe ? determine_uprobe_retprobe_bit()
9865 : determine_kprobe_retprobe_bit();
9866
9867 if (bit < 0) {
9868 pr_warn("failed to determine %s retprobe bit: %s\n",
9869 uprobe ? "uprobe" : "kprobe",
9870 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
9871 return bit;
9872 }
9873 attr.config |= 1 << bit;
9874 }
9875 attr.size = sizeof(attr);
9876 attr.type = type;
9877 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
9878 attr.config1 = ptr_to_u64(name);
9879 attr.config2 = offset;
9880
9881
9882 pfd = syscall(__NR_perf_event_open, &attr,
9883 pid < 0 ? -1 : pid ,
9884 pid == -1 ? 0 : -1 ,
9885 -1 , PERF_FLAG_FD_CLOEXEC);
9886 if (pfd < 0) {
9887 err = -errno;
9888 pr_warn("%s perf_event_open() failed: %s\n",
9889 uprobe ? "uprobe" : "kprobe",
9890 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9891 return err;
9892 }
9893 return pfd;
9894}
9895
9896static int append_to_file(const char *file, const char *fmt, ...)
9897{
9898 int fd, n, err = 0;
9899 va_list ap;
9900
9901 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
9902 if (fd < 0)
9903 return -errno;
9904
9905 va_start(ap, fmt);
9906 n = vdprintf(fd, fmt, ap);
9907 va_end(ap);
9908
9909 if (n < 0)
9910 err = -errno;
9911
9912 close(fd);
9913 return err;
9914}
9915
9916static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
9917 const char *kfunc_name, size_t offset)
9918{
9919 static int index = 0;
9920
9921 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
9922 __sync_fetch_and_add(&index, 1));
9923}
9924
9925static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
9926 const char *kfunc_name, size_t offset)
9927{
9928 const char *file = "/sys/kernel/debug/tracing/kprobe_events";
9929
9930 return append_to_file(file, "%c:%s/%s %s+0x%zx",
9931 retprobe ? 'r' : 'p',
9932 retprobe ? "kretprobes" : "kprobes",
9933 probe_name, kfunc_name, offset);
9934}
9935
9936static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
9937{
9938 const char *file = "/sys/kernel/debug/tracing/kprobe_events";
9939
9940 return append_to_file(file, "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name);
9941}
9942
9943static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
9944{
9945 char file[256];
9946
9947 snprintf(file, sizeof(file),
9948 "/sys/kernel/debug/tracing/events/%s/%s/id",
9949 retprobe ? "kretprobes" : "kprobes", probe_name);
9950
9951 return parse_uint_from_file(file, "%d\n");
9952}
9953
9954static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
9955 const char *kfunc_name, size_t offset, int pid)
9956{
9957 struct perf_event_attr attr = {};
9958 char errmsg[STRERR_BUFSIZE];
9959 int type, pfd, err;
9960
9961 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
9962 if (err < 0) {
9963 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
9964 kfunc_name, offset,
9965 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9966 return err;
9967 }
9968 type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
9969 if (type < 0) {
9970 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
9971 kfunc_name, offset,
9972 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9973 return type;
9974 }
9975 attr.size = sizeof(attr);
9976 attr.config = type;
9977 attr.type = PERF_TYPE_TRACEPOINT;
9978
9979 pfd = syscall(__NR_perf_event_open, &attr,
9980 pid < 0 ? -1 : pid,
9981 pid == -1 ? 0 : -1,
9982 -1 , PERF_FLAG_FD_CLOEXEC);
9983 if (pfd < 0) {
9984 err = -errno;
9985 pr_warn("legacy kprobe perf_event_open() failed: %s\n",
9986 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9987 return err;
9988 }
9989 return pfd;
9990}
9991
9992struct bpf_link *
9993bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
9994 const char *func_name,
9995 const struct bpf_kprobe_opts *opts)
9996{
9997 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
9998 char errmsg[STRERR_BUFSIZE];
9999 char *legacy_probe = NULL;
10000 struct bpf_link *link;
10001 size_t offset;
10002 bool retprobe, legacy;
10003 int pfd, err;
10004
10005 if (!OPTS_VALID(opts, bpf_kprobe_opts))
10006 return libbpf_err_ptr(-EINVAL);
10007
10008 retprobe = OPTS_GET(opts, retprobe, false);
10009 offset = OPTS_GET(opts, offset, 0);
10010 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10011
10012 legacy = determine_kprobe_perf_type() < 0;
10013 if (!legacy) {
10014 pfd = perf_event_open_probe(false , retprobe,
10015 func_name, offset,
10016 -1 , 0 );
10017 } else {
10018 char probe_name[256];
10019
10020 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
10021 func_name, offset);
10022
10023 legacy_probe = strdup(probe_name);
10024 if (!legacy_probe)
10025 return libbpf_err_ptr(-ENOMEM);
10026
10027 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
10028 offset, -1 );
10029 }
10030 if (pfd < 0) {
10031 err = -errno;
10032 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
10033 prog->name, retprobe ? "kretprobe" : "kprobe",
10034 func_name, offset,
10035 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10036 goto err_out;
10037 }
10038 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10039 err = libbpf_get_error(link);
10040 if (err) {
10041 close(pfd);
10042 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
10043 prog->name, retprobe ? "kretprobe" : "kprobe",
10044 func_name, offset,
10045 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10046 goto err_out;
10047 }
10048 if (legacy) {
10049 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10050
10051 perf_link->legacy_probe_name = legacy_probe;
10052 perf_link->legacy_is_kprobe = true;
10053 perf_link->legacy_is_retprobe = retprobe;
10054 }
10055
10056 return link;
10057err_out:
10058 free(legacy_probe);
10059 return libbpf_err_ptr(err);
10060}
10061
10062struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
10063 bool retprobe,
10064 const char *func_name)
10065{
10066 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
10067 .retprobe = retprobe,
10068 );
10069
10070 return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
10071}
10072
10073static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie)
10074{
10075 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
10076 unsigned long offset = 0;
10077 struct bpf_link *link;
10078 const char *func_name;
10079 char *func;
10080 int n, err;
10081
10082 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
10083 if (opts.retprobe)
10084 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
10085 else
10086 func_name = prog->sec_name + sizeof("kprobe/") - 1;
10087
10088 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
10089 if (n < 1) {
10090 err = -EINVAL;
10091 pr_warn("kprobe name is invalid: %s\n", func_name);
10092 return libbpf_err_ptr(err);
10093 }
10094 if (opts.retprobe && offset != 0) {
10095 free(func);
10096 err = -EINVAL;
10097 pr_warn("kretprobes do not support offset specification\n");
10098 return libbpf_err_ptr(err);
10099 }
10100
10101 opts.offset = offset;
10102 link = bpf_program__attach_kprobe_opts(prog, func, &opts);
10103 free(func);
10104 return link;
10105}
10106
10107static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
10108 const char *binary_path, uint64_t offset)
10109{
10110 int i;
10111
10112 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
10113
10114
10115 for (i = 0; buf[i]; i++) {
10116 if (!isalnum(buf[i]))
10117 buf[i] = '_';
10118 }
10119}
10120
10121static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
10122 const char *binary_path, size_t offset)
10123{
10124 const char *file = "/sys/kernel/debug/tracing/uprobe_events";
10125
10126 return append_to_file(file, "%c:%s/%s %s:0x%zx",
10127 retprobe ? 'r' : 'p',
10128 retprobe ? "uretprobes" : "uprobes",
10129 probe_name, binary_path, offset);
10130}
10131
10132static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
10133{
10134 const char *file = "/sys/kernel/debug/tracing/uprobe_events";
10135
10136 return append_to_file(file, "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name);
10137}
10138
10139static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10140{
10141 char file[512];
10142
10143 snprintf(file, sizeof(file),
10144 "/sys/kernel/debug/tracing/events/%s/%s/id",
10145 retprobe ? "uretprobes" : "uprobes", probe_name);
10146
10147 return parse_uint_from_file(file, "%d\n");
10148}
10149
10150static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
10151 const char *binary_path, size_t offset, int pid)
10152{
10153 struct perf_event_attr attr;
10154 int type, pfd, err;
10155
10156 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
10157 if (err < 0) {
10158 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
10159 binary_path, (size_t)offset, err);
10160 return err;
10161 }
10162 type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
10163 if (type < 0) {
10164 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
10165 binary_path, offset, err);
10166 return type;
10167 }
10168
10169 memset(&attr, 0, sizeof(attr));
10170 attr.size = sizeof(attr);
10171 attr.config = type;
10172 attr.type = PERF_TYPE_TRACEPOINT;
10173
10174 pfd = syscall(__NR_perf_event_open, &attr,
10175 pid < 0 ? -1 : pid,
10176 pid == -1 ? 0 : -1,
10177 -1 , PERF_FLAG_FD_CLOEXEC);
10178 if (pfd < 0) {
10179 err = -errno;
10180 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
10181 return err;
10182 }
10183 return pfd;
10184}
10185
10186LIBBPF_API struct bpf_link *
10187bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
10188 const char *binary_path, size_t func_offset,
10189 const struct bpf_uprobe_opts *opts)
10190{
10191 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
10192 char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
10193 struct bpf_link *link;
10194 size_t ref_ctr_off;
10195 int pfd, err;
10196 bool retprobe, legacy;
10197
10198 if (!OPTS_VALID(opts, bpf_uprobe_opts))
10199 return libbpf_err_ptr(-EINVAL);
10200
10201 retprobe = OPTS_GET(opts, retprobe, false);
10202 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
10203 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10204
10205 legacy = determine_uprobe_perf_type() < 0;
10206 if (!legacy) {
10207 pfd = perf_event_open_probe(true , retprobe, binary_path,
10208 func_offset, pid, ref_ctr_off);
10209 } else {
10210 char probe_name[512];
10211
10212 if (ref_ctr_off)
10213 return libbpf_err_ptr(-EINVAL);
10214
10215 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
10216 binary_path, func_offset);
10217
10218 legacy_probe = strdup(probe_name);
10219 if (!legacy_probe)
10220 return libbpf_err_ptr(-ENOMEM);
10221
10222 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
10223 binary_path, func_offset, pid);
10224 }
10225 if (pfd < 0) {
10226 err = -errno;
10227 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
10228 prog->name, retprobe ? "uretprobe" : "uprobe",
10229 binary_path, func_offset,
10230 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10231 goto err_out;
10232 }
10233
10234 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10235 err = libbpf_get_error(link);
10236 if (err) {
10237 close(pfd);
10238 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
10239 prog->name, retprobe ? "uretprobe" : "uprobe",
10240 binary_path, func_offset,
10241 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10242 goto err_out;
10243 }
10244 if (legacy) {
10245 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10246
10247 perf_link->legacy_probe_name = legacy_probe;
10248 perf_link->legacy_is_kprobe = false;
10249 perf_link->legacy_is_retprobe = retprobe;
10250 }
10251 return link;
10252err_out:
10253 free(legacy_probe);
10254 return libbpf_err_ptr(err);
10255
10256}
10257
10258struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
10259 bool retprobe, pid_t pid,
10260 const char *binary_path,
10261 size_t func_offset)
10262{
10263 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
10264
10265 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
10266}
10267
10268static int determine_tracepoint_id(const char *tp_category,
10269 const char *tp_name)
10270{
10271 char file[PATH_MAX];
10272 int ret;
10273
10274 ret = snprintf(file, sizeof(file),
10275 "/sys/kernel/debug/tracing/events/%s/%s/id",
10276 tp_category, tp_name);
10277 if (ret < 0)
10278 return -errno;
10279 if (ret >= sizeof(file)) {
10280 pr_debug("tracepoint %s/%s path is too long\n",
10281 tp_category, tp_name);
10282 return -E2BIG;
10283 }
10284 return parse_uint_from_file(file, "%d\n");
10285}
10286
10287static int perf_event_open_tracepoint(const char *tp_category,
10288 const char *tp_name)
10289{
10290 struct perf_event_attr attr = {};
10291 char errmsg[STRERR_BUFSIZE];
10292 int tp_id, pfd, err;
10293
10294 tp_id = determine_tracepoint_id(tp_category, tp_name);
10295 if (tp_id < 0) {
10296 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
10297 tp_category, tp_name,
10298 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
10299 return tp_id;
10300 }
10301
10302 attr.type = PERF_TYPE_TRACEPOINT;
10303 attr.size = sizeof(attr);
10304 attr.config = tp_id;
10305
10306 pfd = syscall(__NR_perf_event_open, &attr, -1 , 0 ,
10307 -1 , PERF_FLAG_FD_CLOEXEC);
10308 if (pfd < 0) {
10309 err = -errno;
10310 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
10311 tp_category, tp_name,
10312 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10313 return err;
10314 }
10315 return pfd;
10316}
10317
10318struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
10319 const char *tp_category,
10320 const char *tp_name,
10321 const struct bpf_tracepoint_opts *opts)
10322{
10323 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
10324 char errmsg[STRERR_BUFSIZE];
10325 struct bpf_link *link;
10326 int pfd, err;
10327
10328 if (!OPTS_VALID(opts, bpf_tracepoint_opts))
10329 return libbpf_err_ptr(-EINVAL);
10330
10331 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10332
10333 pfd = perf_event_open_tracepoint(tp_category, tp_name);
10334 if (pfd < 0) {
10335 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
10336 prog->name, tp_category, tp_name,
10337 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10338 return libbpf_err_ptr(pfd);
10339 }
10340 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10341 err = libbpf_get_error(link);
10342 if (err) {
10343 close(pfd);
10344 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
10345 prog->name, tp_category, tp_name,
10346 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10347 return libbpf_err_ptr(err);
10348 }
10349 return link;
10350}
10351
10352struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
10353 const char *tp_category,
10354 const char *tp_name)
10355{
10356 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
10357}
10358
10359static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie)
10360{
10361 char *sec_name, *tp_cat, *tp_name;
10362 struct bpf_link *link;
10363
10364 sec_name = strdup(prog->sec_name);
10365 if (!sec_name)
10366 return libbpf_err_ptr(-ENOMEM);
10367
10368
10369 if (str_has_pfx(prog->sec_name, "tp/"))
10370 tp_cat = sec_name + sizeof("tp/") - 1;
10371 else
10372 tp_cat = sec_name + sizeof("tracepoint/") - 1;
10373 tp_name = strchr(tp_cat, '/');
10374 if (!tp_name) {
10375 free(sec_name);
10376 return libbpf_err_ptr(-EINVAL);
10377 }
10378 *tp_name = '\0';
10379 tp_name++;
10380
10381 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
10382 free(sec_name);
10383 return link;
10384}
10385
10386struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
10387 const char *tp_name)
10388{
10389 char errmsg[STRERR_BUFSIZE];
10390 struct bpf_link *link;
10391 int prog_fd, pfd;
10392
10393 prog_fd = bpf_program__fd(prog);
10394 if (prog_fd < 0) {
10395 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10396 return libbpf_err_ptr(-EINVAL);
10397 }
10398
10399 link = calloc(1, sizeof(*link));
10400 if (!link)
10401 return libbpf_err_ptr(-ENOMEM);
10402 link->detach = &bpf_link__detach_fd;
10403
10404 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
10405 if (pfd < 0) {
10406 pfd = -errno;
10407 free(link);
10408 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
10409 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10410 return libbpf_err_ptr(pfd);
10411 }
10412 link->fd = pfd;
10413 return link;
10414}
10415
10416static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie)
10417{
10418 static const char *const prefixes[] = {
10419 "raw_tp/",
10420 "raw_tracepoint/",
10421 "raw_tp.w/",
10422 "raw_tracepoint.w/",
10423 };
10424 size_t i;
10425 const char *tp_name = NULL;
10426
10427 for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
10428 if (str_has_pfx(prog->sec_name, prefixes[i])) {
10429 tp_name = prog->sec_name + strlen(prefixes[i]);
10430 break;
10431 }
10432 }
10433 if (!tp_name) {
10434 pr_warn("prog '%s': invalid section name '%s'\n",
10435 prog->name, prog->sec_name);
10436 return libbpf_err_ptr(-EINVAL);
10437 }
10438
10439 return bpf_program__attach_raw_tracepoint(prog, tp_name);
10440}
10441
10442
10443static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog)
10444{
10445 char errmsg[STRERR_BUFSIZE];
10446 struct bpf_link *link;
10447 int prog_fd, pfd;
10448
10449 prog_fd = bpf_program__fd(prog);
10450 if (prog_fd < 0) {
10451 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10452 return libbpf_err_ptr(-EINVAL);
10453 }
10454
10455 link = calloc(1, sizeof(*link));
10456 if (!link)
10457 return libbpf_err_ptr(-ENOMEM);
10458 link->detach = &bpf_link__detach_fd;
10459
10460 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
10461 if (pfd < 0) {
10462 pfd = -errno;
10463 free(link);
10464 pr_warn("prog '%s': failed to attach: %s\n",
10465 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
10466 return libbpf_err_ptr(pfd);
10467 }
10468 link->fd = pfd;
10469 return (struct bpf_link *)link;
10470}
10471
10472struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
10473{
10474 return bpf_program__attach_btf_id(prog);
10475}
10476
10477struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
10478{
10479 return bpf_program__attach_btf_id(prog);
10480}
10481
10482static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie)
10483{
10484 return bpf_program__attach_trace(prog);
10485}
10486
10487static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie)
10488{
10489 return bpf_program__attach_lsm(prog);
10490}
10491
10492static struct bpf_link *
10493bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id,
10494 const char *target_name)
10495{
10496 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
10497 .target_btf_id = btf_id);
10498 enum bpf_attach_type attach_type;
10499 char errmsg[STRERR_BUFSIZE];
10500 struct bpf_link *link;
10501 int prog_fd, link_fd;
10502
10503 prog_fd = bpf_program__fd(prog);
10504 if (prog_fd < 0) {
10505 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10506 return libbpf_err_ptr(-EINVAL);
10507 }
10508
10509 link = calloc(1, sizeof(*link));
10510 if (!link)
10511 return libbpf_err_ptr(-ENOMEM);
10512 link->detach = &bpf_link__detach_fd;
10513
10514 attach_type = bpf_program__get_expected_attach_type(prog);
10515 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
10516 if (link_fd < 0) {
10517 link_fd = -errno;
10518 free(link);
10519 pr_warn("prog '%s': failed to attach to %s: %s\n",
10520 prog->name, target_name,
10521 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10522 return libbpf_err_ptr(link_fd);
10523 }
10524 link->fd = link_fd;
10525 return link;
10526}
10527
10528struct bpf_link *
10529bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
10530{
10531 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
10532}
10533
10534struct bpf_link *
10535bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
10536{
10537 return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
10538}
10539
10540struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
10541{
10542
10543 return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
10544}
10545
10546struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
10547 int target_fd,
10548 const char *attach_func_name)
10549{
10550 int btf_id;
10551
10552 if (!!target_fd != !!attach_func_name) {
10553 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10554 prog->name);
10555 return libbpf_err_ptr(-EINVAL);
10556 }
10557
10558 if (prog->type != BPF_PROG_TYPE_EXT) {
10559 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10560 prog->name);
10561 return libbpf_err_ptr(-EINVAL);
10562 }
10563
10564 if (target_fd) {
10565 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10566 if (btf_id < 0)
10567 return libbpf_err_ptr(btf_id);
10568
10569 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10570 } else {
10571
10572
10573
10574 return bpf_program__attach_trace(prog);
10575 }
10576}
10577
10578struct bpf_link *
10579bpf_program__attach_iter(const struct bpf_program *prog,
10580 const struct bpf_iter_attach_opts *opts)
10581{
10582 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
10583 char errmsg[STRERR_BUFSIZE];
10584 struct bpf_link *link;
10585 int prog_fd, link_fd;
10586 __u32 target_fd = 0;
10587
10588 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10589 return libbpf_err_ptr(-EINVAL);
10590
10591 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10592 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
10593
10594 prog_fd = bpf_program__fd(prog);
10595 if (prog_fd < 0) {
10596 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10597 return libbpf_err_ptr(-EINVAL);
10598 }
10599
10600 link = calloc(1, sizeof(*link));
10601 if (!link)
10602 return libbpf_err_ptr(-ENOMEM);
10603 link->detach = &bpf_link__detach_fd;
10604
10605 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10606 &link_create_opts);
10607 if (link_fd < 0) {
10608 link_fd = -errno;
10609 free(link);
10610 pr_warn("prog '%s': failed to attach to iterator: %s\n",
10611 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10612 return libbpf_err_ptr(link_fd);
10613 }
10614 link->fd = link_fd;
10615 return link;
10616}
10617
10618static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie)
10619{
10620 return bpf_program__attach_iter(prog, NULL);
10621}
10622
10623struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
10624{
10625 if (!prog->sec_def || !prog->sec_def->attach_fn)
10626 return libbpf_err_ptr(-ESRCH);
10627
10628 return prog->sec_def->attach_fn(prog, prog->sec_def->cookie);
10629}
10630
10631static int bpf_link__detach_struct_ops(struct bpf_link *link)
10632{
10633 __u32 zero = 0;
10634
10635 if (bpf_map_delete_elem(link->fd, &zero))
10636 return -errno;
10637
10638 return 0;
10639}
10640
10641struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
10642{
10643 struct bpf_struct_ops *st_ops;
10644 struct bpf_link *link;
10645 __u32 i, zero = 0;
10646 int err;
10647
10648 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
10649 return libbpf_err_ptr(-EINVAL);
10650
10651 link = calloc(1, sizeof(*link));
10652 if (!link)
10653 return libbpf_err_ptr(-EINVAL);
10654
10655 st_ops = map->st_ops;
10656 for (i = 0; i < btf_vlen(st_ops->type); i++) {
10657 struct bpf_program *prog = st_ops->progs[i];
10658 void *kern_data;
10659 int prog_fd;
10660
10661 if (!prog)
10662 continue;
10663
10664 prog_fd = bpf_program__fd(prog);
10665 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
10666 *(unsigned long *)kern_data = prog_fd;
10667 }
10668
10669 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
10670 if (err) {
10671 err = -errno;
10672 free(link);
10673 return libbpf_err_ptr(err);
10674 }
10675
10676 link->detach = bpf_link__detach_struct_ops;
10677 link->fd = map->fd;
10678
10679 return link;
10680}
10681
10682static enum bpf_perf_event_ret
10683perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10684 void **copy_mem, size_t *copy_size,
10685 bpf_perf_event_print_t fn, void *private_data)
10686{
10687 struct perf_event_mmap_page *header = mmap_mem;
10688 __u64 data_head = ring_buffer_read_head(header);
10689 __u64 data_tail = header->data_tail;
10690 void *base = ((__u8 *)header) + page_size;
10691 int ret = LIBBPF_PERF_EVENT_CONT;
10692 struct perf_event_header *ehdr;
10693 size_t ehdr_size;
10694
10695 while (data_head != data_tail) {
10696 ehdr = base + (data_tail & (mmap_size - 1));
10697 ehdr_size = ehdr->size;
10698
10699 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
10700 void *copy_start = ehdr;
10701 size_t len_first = base + mmap_size - copy_start;
10702 size_t len_secnd = ehdr_size - len_first;
10703
10704 if (*copy_size < ehdr_size) {
10705 free(*copy_mem);
10706 *copy_mem = malloc(ehdr_size);
10707 if (!*copy_mem) {
10708 *copy_size = 0;
10709 ret = LIBBPF_PERF_EVENT_ERROR;
10710 break;
10711 }
10712 *copy_size = ehdr_size;
10713 }
10714
10715 memcpy(*copy_mem, copy_start, len_first);
10716 memcpy(*copy_mem + len_first, base, len_secnd);
10717 ehdr = *copy_mem;
10718 }
10719
10720 ret = fn(ehdr, private_data);
10721 data_tail += ehdr_size;
10722 if (ret != LIBBPF_PERF_EVENT_CONT)
10723 break;
10724 }
10725
10726 ring_buffer_write_tail(header, data_tail);
10727 return libbpf_err(ret);
10728}
10729
10730__attribute__((alias("perf_event_read_simple")))
10731enum bpf_perf_event_ret
10732bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10733 void **copy_mem, size_t *copy_size,
10734 bpf_perf_event_print_t fn, void *private_data);
10735
10736struct perf_buffer;
10737
10738struct perf_buffer_params {
10739 struct perf_event_attr *attr;
10740
10741 perf_buffer_event_fn event_cb;
10742
10743 perf_buffer_sample_fn sample_cb;
10744 perf_buffer_lost_fn lost_cb;
10745 void *ctx;
10746 int cpu_cnt;
10747 int *cpus;
10748 int *map_keys;
10749};
10750
10751struct perf_cpu_buf {
10752 struct perf_buffer *pb;
10753 void *base;
10754 void *buf;
10755 size_t buf_size;
10756 int fd;
10757 int cpu;
10758 int map_key;
10759};
10760
10761struct perf_buffer {
10762 perf_buffer_event_fn event_cb;
10763 perf_buffer_sample_fn sample_cb;
10764 perf_buffer_lost_fn lost_cb;
10765 void *ctx;
10766
10767 size_t page_size;
10768 size_t mmap_size;
10769 struct perf_cpu_buf **cpu_bufs;
10770 struct epoll_event *events;
10771 int cpu_cnt;
10772 int epoll_fd;
10773 int map_fd;
10774};
10775
10776static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
10777 struct perf_cpu_buf *cpu_buf)
10778{
10779 if (!cpu_buf)
10780 return;
10781 if (cpu_buf->base &&
10782 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
10783 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
10784 if (cpu_buf->fd >= 0) {
10785 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
10786 close(cpu_buf->fd);
10787 }
10788 free(cpu_buf->buf);
10789 free(cpu_buf);
10790}
10791
10792void perf_buffer__free(struct perf_buffer *pb)
10793{
10794 int i;
10795
10796 if (IS_ERR_OR_NULL(pb))
10797 return;
10798 if (pb->cpu_bufs) {
10799 for (i = 0; i < pb->cpu_cnt; i++) {
10800 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10801
10802 if (!cpu_buf)
10803 continue;
10804
10805 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
10806 perf_buffer__free_cpu_buf(pb, cpu_buf);
10807 }
10808 free(pb->cpu_bufs);
10809 }
10810 if (pb->epoll_fd >= 0)
10811 close(pb->epoll_fd);
10812 free(pb->events);
10813 free(pb);
10814}
10815
10816static struct perf_cpu_buf *
10817perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
10818 int cpu, int map_key)
10819{
10820 struct perf_cpu_buf *cpu_buf;
10821 char msg[STRERR_BUFSIZE];
10822 int err;
10823
10824 cpu_buf = calloc(1, sizeof(*cpu_buf));
10825 if (!cpu_buf)
10826 return ERR_PTR(-ENOMEM);
10827
10828 cpu_buf->pb = pb;
10829 cpu_buf->cpu = cpu;
10830 cpu_buf->map_key = map_key;
10831
10832 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 , cpu,
10833 -1, PERF_FLAG_FD_CLOEXEC);
10834 if (cpu_buf->fd < 0) {
10835 err = -errno;
10836 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
10837 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10838 goto error;
10839 }
10840
10841 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
10842 PROT_READ | PROT_WRITE, MAP_SHARED,
10843 cpu_buf->fd, 0);
10844 if (cpu_buf->base == MAP_FAILED) {
10845 cpu_buf->base = NULL;
10846 err = -errno;
10847 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
10848 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10849 goto error;
10850 }
10851
10852 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10853 err = -errno;
10854 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
10855 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10856 goto error;
10857 }
10858
10859 return cpu_buf;
10860
10861error:
10862 perf_buffer__free_cpu_buf(pb, cpu_buf);
10863 return (struct perf_cpu_buf *)ERR_PTR(err);
10864}
10865
10866static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10867 struct perf_buffer_params *p);
10868
10869DEFAULT_VERSION(perf_buffer__new_v0_6_0, perf_buffer__new, LIBBPF_0.6.0)
10870struct perf_buffer *perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
10871 perf_buffer_sample_fn sample_cb,
10872 perf_buffer_lost_fn lost_cb,
10873 void *ctx,
10874 const struct perf_buffer_opts *opts)
10875{
10876 struct perf_buffer_params p = {};
10877 struct perf_event_attr attr = {};
10878
10879 if (!OPTS_VALID(opts, perf_buffer_opts))
10880 return libbpf_err_ptr(-EINVAL);
10881
10882 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
10883 attr.type = PERF_TYPE_SOFTWARE;
10884 attr.sample_type = PERF_SAMPLE_RAW;
10885 attr.sample_period = 1;
10886 attr.wakeup_events = 1;
10887
10888 p.attr = &attr;
10889 p.sample_cb = sample_cb;
10890 p.lost_cb = lost_cb;
10891 p.ctx = ctx;
10892
10893 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
10894}
10895
10896COMPAT_VERSION(perf_buffer__new_deprecated, perf_buffer__new, LIBBPF_0.0.4)
10897struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
10898 const struct perf_buffer_opts *opts)
10899{
10900 return perf_buffer__new_v0_6_0(map_fd, page_cnt,
10901 opts ? opts->sample_cb : NULL,
10902 opts ? opts->lost_cb : NULL,
10903 opts ? opts->ctx : NULL,
10904 NULL);
10905}
10906
10907DEFAULT_VERSION(perf_buffer__new_raw_v0_6_0, perf_buffer__new_raw, LIBBPF_0.6.0)
10908struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt,
10909 struct perf_event_attr *attr,
10910 perf_buffer_event_fn event_cb, void *ctx,
10911 const struct perf_buffer_raw_opts *opts)
10912{
10913 struct perf_buffer_params p = {};
10914
10915 if (page_cnt == 0 || !attr)
10916 return libbpf_err_ptr(-EINVAL);
10917
10918 if (!OPTS_VALID(opts, perf_buffer_raw_opts))
10919 return libbpf_err_ptr(-EINVAL);
10920
10921 p.attr = attr;
10922 p.event_cb = event_cb;
10923 p.ctx = ctx;
10924 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
10925 p.cpus = OPTS_GET(opts, cpus, NULL);
10926 p.map_keys = OPTS_GET(opts, map_keys, NULL);
10927
10928 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
10929}
10930
10931COMPAT_VERSION(perf_buffer__new_raw_deprecated, perf_buffer__new_raw, LIBBPF_0.0.4)
10932struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
10933 const struct perf_buffer_raw_opts *opts)
10934{
10935 LIBBPF_OPTS(perf_buffer_raw_opts, inner_opts,
10936 .cpu_cnt = opts->cpu_cnt,
10937 .cpus = opts->cpus,
10938 .map_keys = opts->map_keys,
10939 );
10940
10941 return perf_buffer__new_raw_v0_6_0(map_fd, page_cnt, opts->attr,
10942 opts->event_cb, opts->ctx, &inner_opts);
10943}
10944
10945static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10946 struct perf_buffer_params *p)
10947{
10948 const char *online_cpus_file = "/sys/devices/system/cpu/online";
10949 struct bpf_map_info map;
10950 char msg[STRERR_BUFSIZE];
10951 struct perf_buffer *pb;
10952 bool *online = NULL;
10953 __u32 map_info_len;
10954 int err, i, j, n;
10955
10956 if (page_cnt & (page_cnt - 1)) {
10957 pr_warn("page count should be power of two, but is %zu\n",
10958 page_cnt);
10959 return ERR_PTR(-EINVAL);
10960 }
10961
10962
10963 memset(&map, 0, sizeof(map));
10964 map_info_len = sizeof(map);
10965 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
10966 if (err) {
10967 err = -errno;
10968
10969
10970
10971 if (err != -EINVAL) {
10972 pr_warn("failed to get map info for map FD %d: %s\n",
10973 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
10974 return ERR_PTR(err);
10975 }
10976 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
10977 map_fd);
10978 } else {
10979 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
10980 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
10981 map.name);
10982 return ERR_PTR(-EINVAL);
10983 }
10984 }
10985
10986 pb = calloc(1, sizeof(*pb));
10987 if (!pb)
10988 return ERR_PTR(-ENOMEM);
10989
10990 pb->event_cb = p->event_cb;
10991 pb->sample_cb = p->sample_cb;
10992 pb->lost_cb = p->lost_cb;
10993 pb->ctx = p->ctx;
10994
10995 pb->page_size = getpagesize();
10996 pb->mmap_size = pb->page_size * page_cnt;
10997 pb->map_fd = map_fd;
10998
10999 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
11000 if (pb->epoll_fd < 0) {
11001 err = -errno;
11002 pr_warn("failed to create epoll instance: %s\n",
11003 libbpf_strerror_r(err, msg, sizeof(msg)));
11004 goto error;
11005 }
11006
11007 if (p->cpu_cnt > 0) {
11008 pb->cpu_cnt = p->cpu_cnt;
11009 } else {
11010 pb->cpu_cnt = libbpf_num_possible_cpus();
11011 if (pb->cpu_cnt < 0) {
11012 err = pb->cpu_cnt;
11013 goto error;
11014 }
11015 if (map.max_entries && map.max_entries < pb->cpu_cnt)
11016 pb->cpu_cnt = map.max_entries;
11017 }
11018
11019 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
11020 if (!pb->events) {
11021 err = -ENOMEM;
11022 pr_warn("failed to allocate events: out of memory\n");
11023 goto error;
11024 }
11025 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
11026 if (!pb->cpu_bufs) {
11027 err = -ENOMEM;
11028 pr_warn("failed to allocate buffers: out of memory\n");
11029 goto error;
11030 }
11031
11032 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
11033 if (err) {
11034 pr_warn("failed to get online CPU mask: %d\n", err);
11035 goto error;
11036 }
11037
11038 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
11039 struct perf_cpu_buf *cpu_buf;
11040 int cpu, map_key;
11041
11042 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
11043 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
11044
11045
11046
11047
11048 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
11049 continue;
11050
11051 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
11052 if (IS_ERR(cpu_buf)) {
11053 err = PTR_ERR(cpu_buf);
11054 goto error;
11055 }
11056
11057 pb->cpu_bufs[j] = cpu_buf;
11058
11059 err = bpf_map_update_elem(pb->map_fd, &map_key,
11060 &cpu_buf->fd, 0);
11061 if (err) {
11062 err = -errno;
11063 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
11064 cpu, map_key, cpu_buf->fd,
11065 libbpf_strerror_r(err, msg, sizeof(msg)));
11066 goto error;
11067 }
11068
11069 pb->events[j].events = EPOLLIN;
11070 pb->events[j].data.ptr = cpu_buf;
11071 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
11072 &pb->events[j]) < 0) {
11073 err = -errno;
11074 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
11075 cpu, cpu_buf->fd,
11076 libbpf_strerror_r(err, msg, sizeof(msg)));
11077 goto error;
11078 }
11079 j++;
11080 }
11081 pb->cpu_cnt = j;
11082 free(online);
11083
11084 return pb;
11085
11086error:
11087 free(online);
11088 if (pb)
11089 perf_buffer__free(pb);
11090 return ERR_PTR(err);
11091}
11092
11093struct perf_sample_raw {
11094 struct perf_event_header header;
11095 uint32_t size;
11096 char data[];
11097};
11098
11099struct perf_sample_lost {
11100 struct perf_event_header header;
11101 uint64_t id;
11102 uint64_t lost;
11103 uint64_t sample_id;
11104};
11105
11106static enum bpf_perf_event_ret
11107perf_buffer__process_record(struct perf_event_header *e, void *ctx)
11108{
11109 struct perf_cpu_buf *cpu_buf = ctx;
11110 struct perf_buffer *pb = cpu_buf->pb;
11111 void *data = e;
11112
11113
11114 if (pb->event_cb)
11115 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
11116
11117 switch (e->type) {
11118 case PERF_RECORD_SAMPLE: {
11119 struct perf_sample_raw *s = data;
11120
11121 if (pb->sample_cb)
11122 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
11123 break;
11124 }
11125 case PERF_RECORD_LOST: {
11126 struct perf_sample_lost *s = data;
11127
11128 if (pb->lost_cb)
11129 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
11130 break;
11131 }
11132 default:
11133 pr_warn("unknown perf sample type %d\n", e->type);
11134 return LIBBPF_PERF_EVENT_ERROR;
11135 }
11136 return LIBBPF_PERF_EVENT_CONT;
11137}
11138
11139static int perf_buffer__process_records(struct perf_buffer *pb,
11140 struct perf_cpu_buf *cpu_buf)
11141{
11142 enum bpf_perf_event_ret ret;
11143
11144 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
11145 pb->page_size, &cpu_buf->buf,
11146 &cpu_buf->buf_size,
11147 perf_buffer__process_record, cpu_buf);
11148 if (ret != LIBBPF_PERF_EVENT_CONT)
11149 return ret;
11150 return 0;
11151}
11152
11153int perf_buffer__epoll_fd(const struct perf_buffer *pb)
11154{
11155 return pb->epoll_fd;
11156}
11157
11158int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
11159{
11160 int i, cnt, err;
11161
11162 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
11163 if (cnt < 0)
11164 return -errno;
11165
11166 for (i = 0; i < cnt; i++) {
11167 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
11168
11169 err = perf_buffer__process_records(pb, cpu_buf);
11170 if (err) {
11171 pr_warn("error while processing records: %d\n", err);
11172 return libbpf_err(err);
11173 }
11174 }
11175 return cnt;
11176}
11177
11178
11179
11180
11181size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
11182{
11183 return pb->cpu_cnt;
11184}
11185
11186
11187
11188
11189
11190
11191int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
11192{
11193 struct perf_cpu_buf *cpu_buf;
11194
11195 if (buf_idx >= pb->cpu_cnt)
11196 return libbpf_err(-EINVAL);
11197
11198 cpu_buf = pb->cpu_bufs[buf_idx];
11199 if (!cpu_buf)
11200 return libbpf_err(-ENOENT);
11201
11202 return cpu_buf->fd;
11203}
11204
11205
11206
11207
11208
11209
11210
11211
11212
11213int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
11214{
11215 struct perf_cpu_buf *cpu_buf;
11216
11217 if (buf_idx >= pb->cpu_cnt)
11218 return libbpf_err(-EINVAL);
11219
11220 cpu_buf = pb->cpu_bufs[buf_idx];
11221 if (!cpu_buf)
11222 return libbpf_err(-ENOENT);
11223
11224 return perf_buffer__process_records(pb, cpu_buf);
11225}
11226
11227int perf_buffer__consume(struct perf_buffer *pb)
11228{
11229 int i, err;
11230
11231 for (i = 0; i < pb->cpu_cnt; i++) {
11232 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
11233
11234 if (!cpu_buf)
11235 continue;
11236
11237 err = perf_buffer__process_records(pb, cpu_buf);
11238 if (err) {
11239 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
11240 return libbpf_err(err);
11241 }
11242 }
11243 return 0;
11244}
11245
11246struct bpf_prog_info_array_desc {
11247 int array_offset;
11248 int count_offset;
11249 int size_offset;
11250
11251
11252};
11253
11254static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
11255 [BPF_PROG_INFO_JITED_INSNS] = {
11256 offsetof(struct bpf_prog_info, jited_prog_insns),
11257 offsetof(struct bpf_prog_info, jited_prog_len),
11258 -1,
11259 },
11260 [BPF_PROG_INFO_XLATED_INSNS] = {
11261 offsetof(struct bpf_prog_info, xlated_prog_insns),
11262 offsetof(struct bpf_prog_info, xlated_prog_len),
11263 -1,
11264 },
11265 [BPF_PROG_INFO_MAP_IDS] = {
11266 offsetof(struct bpf_prog_info, map_ids),
11267 offsetof(struct bpf_prog_info, nr_map_ids),
11268 -(int)sizeof(__u32),
11269 },
11270 [BPF_PROG_INFO_JITED_KSYMS] = {
11271 offsetof(struct bpf_prog_info, jited_ksyms),
11272 offsetof(struct bpf_prog_info, nr_jited_ksyms),
11273 -(int)sizeof(__u64),
11274 },
11275 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
11276 offsetof(struct bpf_prog_info, jited_func_lens),
11277 offsetof(struct bpf_prog_info, nr_jited_func_lens),
11278 -(int)sizeof(__u32),
11279 },
11280 [BPF_PROG_INFO_FUNC_INFO] = {
11281 offsetof(struct bpf_prog_info, func_info),
11282 offsetof(struct bpf_prog_info, nr_func_info),
11283 offsetof(struct bpf_prog_info, func_info_rec_size),
11284 },
11285 [BPF_PROG_INFO_LINE_INFO] = {
11286 offsetof(struct bpf_prog_info, line_info),
11287 offsetof(struct bpf_prog_info, nr_line_info),
11288 offsetof(struct bpf_prog_info, line_info_rec_size),
11289 },
11290 [BPF_PROG_INFO_JITED_LINE_INFO] = {
11291 offsetof(struct bpf_prog_info, jited_line_info),
11292 offsetof(struct bpf_prog_info, nr_jited_line_info),
11293 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
11294 },
11295 [BPF_PROG_INFO_PROG_TAGS] = {
11296 offsetof(struct bpf_prog_info, prog_tags),
11297 offsetof(struct bpf_prog_info, nr_prog_tags),
11298 -(int)sizeof(__u8) * BPF_TAG_SIZE,
11299 },
11300
11301};
11302
11303static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
11304 int offset)
11305{
11306 __u32 *array = (__u32 *)info;
11307
11308 if (offset >= 0)
11309 return array[offset / sizeof(__u32)];
11310 return -(int)offset;
11311}
11312
11313static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
11314 int offset)
11315{
11316 __u64 *array = (__u64 *)info;
11317
11318 if (offset >= 0)
11319 return array[offset / sizeof(__u64)];
11320 return -(int)offset;
11321}
11322
11323static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
11324 __u32 val)
11325{
11326 __u32 *array = (__u32 *)info;
11327
11328 if (offset >= 0)
11329 array[offset / sizeof(__u32)] = val;
11330}
11331
11332static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
11333 __u64 val)
11334{
11335 __u64 *array = (__u64 *)info;
11336
11337 if (offset >= 0)
11338 array[offset / sizeof(__u64)] = val;
11339}
11340
11341struct bpf_prog_info_linear *
11342bpf_program__get_prog_info_linear(int fd, __u64 arrays)
11343{
11344 struct bpf_prog_info_linear *info_linear;
11345 struct bpf_prog_info info = {};
11346 __u32 info_len = sizeof(info);
11347 __u32 data_len = 0;
11348 int i, err;
11349 void *ptr;
11350
11351 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
11352 return libbpf_err_ptr(-EINVAL);
11353
11354
11355 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
11356 if (err) {
11357 pr_debug("can't get prog info: %s", strerror(errno));
11358 return libbpf_err_ptr(-EFAULT);
11359 }
11360
11361
11362 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11363 bool include_array = (arrays & (1UL << i)) > 0;
11364 struct bpf_prog_info_array_desc *desc;
11365 __u32 count, size;
11366
11367 desc = bpf_prog_info_array_desc + i;
11368
11369
11370 if (info_len < desc->array_offset + sizeof(__u32) ||
11371 info_len < desc->count_offset + sizeof(__u32) ||
11372 (desc->size_offset > 0 && info_len < desc->size_offset))
11373 include_array = false;
11374
11375 if (!include_array) {
11376 arrays &= ~(1UL << i);
11377 continue;
11378 }
11379
11380 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11381 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11382
11383 data_len += count * size;
11384 }
11385
11386
11387 data_len = roundup(data_len, sizeof(__u64));
11388 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
11389 if (!info_linear)
11390 return libbpf_err_ptr(-ENOMEM);
11391
11392
11393 info_linear->arrays = arrays;
11394 memset(&info_linear->info, 0, sizeof(info));
11395 ptr = info_linear->data;
11396
11397 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11398 struct bpf_prog_info_array_desc *desc;
11399 __u32 count, size;
11400
11401 if ((arrays & (1UL << i)) == 0)
11402 continue;
11403
11404 desc = bpf_prog_info_array_desc + i;
11405 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11406 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11407 bpf_prog_info_set_offset_u32(&info_linear->info,
11408 desc->count_offset, count);
11409 bpf_prog_info_set_offset_u32(&info_linear->info,
11410 desc->size_offset, size);
11411 bpf_prog_info_set_offset_u64(&info_linear->info,
11412 desc->array_offset,
11413 ptr_to_u64(ptr));
11414 ptr += count * size;
11415 }
11416
11417
11418 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
11419 if (err) {
11420 pr_debug("can't get prog info: %s", strerror(errno));
11421 free(info_linear);
11422 return libbpf_err_ptr(-EFAULT);
11423 }
11424
11425
11426 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11427 struct bpf_prog_info_array_desc *desc;
11428 __u32 v1, v2;
11429
11430 if ((arrays & (1UL << i)) == 0)
11431 continue;
11432
11433 desc = bpf_prog_info_array_desc + i;
11434 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
11435 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11436 desc->count_offset);
11437 if (v1 != v2)
11438 pr_warn("%s: mismatch in element count\n", __func__);
11439
11440 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
11441 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
11442 desc->size_offset);
11443 if (v1 != v2)
11444 pr_warn("%s: mismatch in rec size\n", __func__);
11445 }
11446
11447
11448 info_linear->info_len = sizeof(struct bpf_prog_info);
11449 info_linear->data_len = data_len;
11450
11451 return info_linear;
11452}
11453
11454void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
11455{
11456 int i;
11457
11458 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11459 struct bpf_prog_info_array_desc *desc;
11460 __u64 addr, offs;
11461
11462 if ((info_linear->arrays & (1UL << i)) == 0)
11463 continue;
11464
11465 desc = bpf_prog_info_array_desc + i;
11466 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
11467 desc->array_offset);
11468 offs = addr - ptr_to_u64(info_linear->data);
11469 bpf_prog_info_set_offset_u64(&info_linear->info,
11470 desc->array_offset, offs);
11471 }
11472}
11473
11474void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
11475{
11476 int i;
11477
11478 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
11479 struct bpf_prog_info_array_desc *desc;
11480 __u64 addr, offs;
11481
11482 if ((info_linear->arrays & (1UL << i)) == 0)
11483 continue;
11484
11485 desc = bpf_prog_info_array_desc + i;
11486 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
11487 desc->array_offset);
11488 addr = offs + ptr_to_u64(info_linear->data);
11489 bpf_prog_info_set_offset_u64(&info_linear->info,
11490 desc->array_offset, addr);
11491 }
11492}
11493
11494int bpf_program__set_attach_target(struct bpf_program *prog,
11495 int attach_prog_fd,
11496 const char *attach_func_name)
11497{
11498 int btf_obj_fd = 0, btf_id = 0, err;
11499
11500 if (!prog || attach_prog_fd < 0)
11501 return libbpf_err(-EINVAL);
11502
11503 if (prog->obj->loaded)
11504 return libbpf_err(-EINVAL);
11505
11506 if (attach_prog_fd && !attach_func_name) {
11507
11508
11509
11510 prog->attach_prog_fd = attach_prog_fd;
11511 return 0;
11512 }
11513
11514 if (attach_prog_fd) {
11515 btf_id = libbpf_find_prog_btf_id(attach_func_name,
11516 attach_prog_fd);
11517 if (btf_id < 0)
11518 return libbpf_err(btf_id);
11519 } else {
11520 if (!attach_func_name)
11521 return libbpf_err(-EINVAL);
11522
11523
11524 err = bpf_object__load_vmlinux_btf(prog->obj, true);
11525 if (err)
11526 return libbpf_err(err);
11527 err = find_kernel_btf_id(prog->obj, attach_func_name,
11528 prog->expected_attach_type,
11529 &btf_obj_fd, &btf_id);
11530 if (err)
11531 return libbpf_err(err);
11532 }
11533
11534 prog->attach_btf_id = btf_id;
11535 prog->attach_btf_obj_fd = btf_obj_fd;
11536 prog->attach_prog_fd = attach_prog_fd;
11537 return 0;
11538}
11539
11540int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
11541{
11542 int err = 0, n, len, start, end = -1;
11543 bool *tmp;
11544
11545 *mask = NULL;
11546 *mask_sz = 0;
11547
11548
11549 while (*s) {
11550 if (*s == ',' || *s == '\n') {
11551 s++;
11552 continue;
11553 }
11554 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
11555 if (n <= 0 || n > 2) {
11556 pr_warn("Failed to get CPU range %s: %d\n", s, n);
11557 err = -EINVAL;
11558 goto cleanup;
11559 } else if (n == 1) {
11560 end = start;
11561 }
11562 if (start < 0 || start > end) {
11563 pr_warn("Invalid CPU range [%d,%d] in %s\n",
11564 start, end, s);
11565 err = -EINVAL;
11566 goto cleanup;
11567 }
11568 tmp = realloc(*mask, end + 1);
11569 if (!tmp) {
11570 err = -ENOMEM;
11571 goto cleanup;
11572 }
11573 *mask = tmp;
11574 memset(tmp + *mask_sz, 0, start - *mask_sz);
11575 memset(tmp + start, 1, end - start + 1);
11576 *mask_sz = end + 1;
11577 s += len;
11578 }
11579 if (!*mask_sz) {
11580 pr_warn("Empty CPU range\n");
11581 return -EINVAL;
11582 }
11583 return 0;
11584cleanup:
11585 free(*mask);
11586 *mask = NULL;
11587 return err;
11588}
11589
11590int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
11591{
11592 int fd, err = 0, len;
11593 char buf[128];
11594
11595 fd = open(fcpu, O_RDONLY | O_CLOEXEC);
11596 if (fd < 0) {
11597 err = -errno;
11598 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
11599 return err;
11600 }
11601 len = read(fd, buf, sizeof(buf));
11602 close(fd);
11603 if (len <= 0) {
11604 err = len ? -errno : -EINVAL;
11605 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
11606 return err;
11607 }
11608 if (len >= sizeof(buf)) {
11609 pr_warn("CPU mask is too big in file %s\n", fcpu);
11610 return -E2BIG;
11611 }
11612 buf[len] = '\0';
11613
11614 return parse_cpu_mask_str(buf, mask, mask_sz);
11615}
11616
11617int libbpf_num_possible_cpus(void)
11618{
11619 static const char *fcpu = "/sys/devices/system/cpu/possible";
11620 static int cpus;
11621 int err, n, i, tmp_cpus;
11622 bool *mask;
11623
11624 tmp_cpus = READ_ONCE(cpus);
11625 if (tmp_cpus > 0)
11626 return tmp_cpus;
11627
11628 err = parse_cpu_mask_file(fcpu, &mask, &n);
11629 if (err)
11630 return libbpf_err(err);
11631
11632 tmp_cpus = 0;
11633 for (i = 0; i < n; i++) {
11634 if (mask[i])
11635 tmp_cpus++;
11636 }
11637 free(mask);
11638
11639 WRITE_ONCE(cpus, tmp_cpus);
11640 return tmp_cpus;
11641}
11642
11643int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
11644 const struct bpf_object_open_opts *opts)
11645{
11646 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
11647 .object_name = s->name,
11648 );
11649 struct bpf_object *obj;
11650 int i, err;
11651
11652
11653
11654
11655
11656
11657
11658 if (opts) {
11659 memcpy(&skel_opts, opts, sizeof(*opts));
11660 if (!opts->object_name)
11661 skel_opts.object_name = s->name;
11662 }
11663
11664 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
11665 err = libbpf_get_error(obj);
11666 if (err) {
11667 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
11668 s->name, err);
11669 return libbpf_err(err);
11670 }
11671
11672 *s->obj = obj;
11673
11674 for (i = 0; i < s->map_cnt; i++) {
11675 struct bpf_map **map = s->maps[i].map;
11676 const char *name = s->maps[i].name;
11677 void **mmaped = s->maps[i].mmaped;
11678
11679 *map = bpf_object__find_map_by_name(obj, name);
11680 if (!*map) {
11681 pr_warn("failed to find skeleton map '%s'\n", name);
11682 return libbpf_err(-ESRCH);
11683 }
11684
11685
11686 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
11687 *mmaped = (*map)->mmaped;
11688 }
11689
11690 for (i = 0; i < s->prog_cnt; i++) {
11691 struct bpf_program **prog = s->progs[i].prog;
11692 const char *name = s->progs[i].name;
11693
11694 *prog = bpf_object__find_program_by_name(obj, name);
11695 if (!*prog) {
11696 pr_warn("failed to find skeleton program '%s'\n", name);
11697 return libbpf_err(-ESRCH);
11698 }
11699 }
11700
11701 return 0;
11702}
11703
11704int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
11705{
11706 int i, err;
11707
11708 err = bpf_object__load(*s->obj);
11709 if (err) {
11710 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
11711 return libbpf_err(err);
11712 }
11713
11714 for (i = 0; i < s->map_cnt; i++) {
11715 struct bpf_map *map = *s->maps[i].map;
11716 size_t mmap_sz = bpf_map_mmap_sz(map);
11717 int prot, map_fd = bpf_map__fd(map);
11718 void **mmaped = s->maps[i].mmaped;
11719
11720 if (!mmaped)
11721 continue;
11722
11723 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
11724 *mmaped = NULL;
11725 continue;
11726 }
11727
11728 if (map->def.map_flags & BPF_F_RDONLY_PROG)
11729 prot = PROT_READ;
11730 else
11731 prot = PROT_READ | PROT_WRITE;
11732
11733
11734
11735
11736
11737
11738
11739
11740
11741
11742
11743 *mmaped = mmap(map->mmaped, mmap_sz, prot,
11744 MAP_SHARED | MAP_FIXED, map_fd, 0);
11745 if (*mmaped == MAP_FAILED) {
11746 err = -errno;
11747 *mmaped = NULL;
11748 pr_warn("failed to re-mmap() map '%s': %d\n",
11749 bpf_map__name(map), err);
11750 return libbpf_err(err);
11751 }
11752 }
11753
11754 return 0;
11755}
11756
11757int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
11758{
11759 int i, err;
11760
11761 for (i = 0; i < s->prog_cnt; i++) {
11762 struct bpf_program *prog = *s->progs[i].prog;
11763 struct bpf_link **link = s->progs[i].link;
11764
11765 if (!prog->load)
11766 continue;
11767
11768
11769 if (!prog->sec_def || !prog->sec_def->attach_fn)
11770 continue;
11771
11772 *link = bpf_program__attach(prog);
11773 err = libbpf_get_error(*link);
11774 if (err) {
11775 pr_warn("failed to auto-attach program '%s': %d\n",
11776 bpf_program__name(prog), err);
11777 return libbpf_err(err);
11778 }
11779 }
11780
11781 return 0;
11782}
11783
11784void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
11785{
11786 int i;
11787
11788 for (i = 0; i < s->prog_cnt; i++) {
11789 struct bpf_link **link = s->progs[i].link;
11790
11791 bpf_link__destroy(*link);
11792 *link = NULL;
11793 }
11794}
11795
11796void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
11797{
11798 if (s->progs)
11799 bpf_object__detach_skeleton(s);
11800 if (s->obj)
11801 bpf_object__close(*s->obj);
11802 free(s->maps);
11803 free(s->progs);
11804 free(s);
11805}
11806