1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _GNU_SOURCE
14#define _GNU_SOURCE
15#endif
16#include <stdlib.h>
17#include <stdio.h>
18#include <stdarg.h>
19#include <libgen.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <string.h>
23#include <unistd.h>
24#include <endian.h>
25#include <fcntl.h>
26#include <errno.h>
27#include <ctype.h>
28#include <asm/unistd.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
31#include <linux/bpf.h>
32#include <linux/btf.h>
33#include <linux/filter.h>
34#include <linux/list.h>
35#include <linux/limits.h>
36#include <linux/perf_event.h>
37#include <linux/ring_buffer.h>
38#include <linux/version.h>
39#include <sys/epoll.h>
40#include <sys/ioctl.h>
41#include <sys/mman.h>
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/vfs.h>
45#include <sys/utsname.h>
46#include <sys/resource.h>
47#include <libelf.h>
48#include <gelf.h>
49#include <zlib.h>
50
51#include "libbpf.h"
52#include "bpf.h"
53#include "btf.h"
54#include "str_error.h"
55#include "libbpf_internal.h"
56#include "hashmap.h"
57
58#ifndef EM_BPF
59#define EM_BPF 247
60#endif
61
62#ifndef BPF_FS_MAGIC
63#define BPF_FS_MAGIC 0xcafe4a11
64#endif
65
66#define BPF_INSN_SZ (sizeof(struct bpf_insn))
67
68
69
70
71#pragma GCC diagnostic ignored "-Wformat-nonliteral"
72
73#define __printf(a, b) __attribute__((format(printf, a, b)))
74
75static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
76static const struct btf_type *
77skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
78
79static int __base_pr(enum libbpf_print_level level, const char *format,
80 va_list args)
81{
82 if (level == LIBBPF_DEBUG)
83 return 0;
84
85 return vfprintf(stderr, format, args);
86}
87
88static libbpf_print_fn_t __libbpf_pr = __base_pr;
89
90libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
91{
92 libbpf_print_fn_t old_print_fn = __libbpf_pr;
93
94 __libbpf_pr = fn;
95 return old_print_fn;
96}
97
98__printf(2, 3)
99void libbpf_print(enum libbpf_print_level level, const char *format, ...)
100{
101 va_list args;
102
103 if (!__libbpf_pr)
104 return;
105
106 va_start(args, format);
107 __libbpf_pr(level, format, args);
108 va_end(args);
109}
110
111static void pr_perm_msg(int err)
112{
113 struct rlimit limit;
114 char buf[100];
115
116 if (err != -EPERM || geteuid() != 0)
117 return;
118
119 err = getrlimit(RLIMIT_MEMLOCK, &limit);
120 if (err)
121 return;
122
123 if (limit.rlim_cur == RLIM_INFINITY)
124 return;
125
126 if (limit.rlim_cur < 1024)
127 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
128 else if (limit.rlim_cur < 1024*1024)
129 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
130 else
131 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
132
133 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
134 buf);
135}
136
137#define STRERR_BUFSIZE 128
138
139
140#ifndef zfree
141# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
142#endif
143
144#ifndef zclose
145# define zclose(fd) ({ \
146 int ___err = 0; \
147 if ((fd) >= 0) \
148 ___err = close((fd)); \
149 fd = -1; \
150 ___err; })
151#endif
152
153static inline __u64 ptr_to_u64(const void *ptr)
154{
155 return (__u64) (unsigned long) ptr;
156}
157
158enum kern_feature_id {
159
160 FEAT_PROG_NAME,
161
162 FEAT_GLOBAL_DATA,
163
164 FEAT_BTF,
165
166 FEAT_BTF_FUNC,
167
168 FEAT_BTF_DATASEC,
169
170 FEAT_BTF_GLOBAL_FUNC,
171
172 FEAT_ARRAY_MMAP,
173
174 FEAT_EXP_ATTACH_TYPE,
175
176 FEAT_PROBE_READ_KERN,
177
178 FEAT_PROG_BIND_MAP,
179
180 FEAT_MODULE_BTF,
181
182 FEAT_BTF_FLOAT,
183 __FEAT_CNT,
184};
185
186static bool kernel_supports(enum kern_feature_id feat_id);
187
188enum reloc_type {
189 RELO_LD64,
190 RELO_CALL,
191 RELO_DATA,
192 RELO_EXTERN,
193};
194
195struct reloc_desc {
196 enum reloc_type type;
197 int insn_idx;
198 int map_idx;
199 int sym_off;
200 bool processed;
201};
202
203struct bpf_sec_def;
204
205typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
206 struct bpf_program *prog);
207
208struct bpf_sec_def {
209 const char *sec;
210 size_t len;
211 enum bpf_prog_type prog_type;
212 enum bpf_attach_type expected_attach_type;
213 bool is_exp_attach_type_optional;
214 bool is_attachable;
215 bool is_attach_btf;
216 bool is_sleepable;
217 attach_fn_t attach_fn;
218};
219
220
221
222
223
224struct bpf_program {
225 const struct bpf_sec_def *sec_def;
226 char *sec_name;
227 size_t sec_idx;
228
229
230
231 size_t sec_insn_off;
232
233
234
235
236 size_t sec_insn_cnt;
237
238
239
240
241
242
243
244
245 size_t sub_insn_off;
246
247 char *name;
248
249
250
251 char *pin_name;
252
253
254
255
256
257
258 struct bpf_insn *insns;
259
260
261
262
263 size_t insns_cnt;
264
265 struct reloc_desc *reloc_desc;
266 int nr_reloc;
267 int log_level;
268
269 struct {
270 int nr;
271 int *fds;
272 } instances;
273 bpf_program_prep_t preprocessor;
274
275 struct bpf_object *obj;
276 void *priv;
277 bpf_program_clear_priv_t clear_priv;
278
279 bool load;
280 enum bpf_prog_type type;
281 enum bpf_attach_type expected_attach_type;
282 int prog_ifindex;
283 __u32 attach_btf_obj_fd;
284 __u32 attach_btf_id;
285 __u32 attach_prog_fd;
286 void *func_info;
287 __u32 func_info_rec_size;
288 __u32 func_info_cnt;
289
290 void *line_info;
291 __u32 line_info_rec_size;
292 __u32 line_info_cnt;
293 __u32 prog_flags;
294};
295
296struct bpf_struct_ops {
297 const char *tname;
298 const struct btf_type *type;
299 struct bpf_program **progs;
300 __u32 *kern_func_off;
301
302 void *data;
303
304
305
306
307
308
309
310
311
312
313 void *kern_vdata;
314 __u32 type_id;
315};
316
317#define DATA_SEC ".data"
318#define BSS_SEC ".bss"
319#define RODATA_SEC ".rodata"
320#define KCONFIG_SEC ".kconfig"
321#define KSYMS_SEC ".ksyms"
322#define STRUCT_OPS_SEC ".struct_ops"
323
324enum libbpf_map_type {
325 LIBBPF_MAP_UNSPEC,
326 LIBBPF_MAP_DATA,
327 LIBBPF_MAP_BSS,
328 LIBBPF_MAP_RODATA,
329 LIBBPF_MAP_KCONFIG,
330};
331
332static const char * const libbpf_type_to_btf_name[] = {
333 [LIBBPF_MAP_DATA] = DATA_SEC,
334 [LIBBPF_MAP_BSS] = BSS_SEC,
335 [LIBBPF_MAP_RODATA] = RODATA_SEC,
336 [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
337};
338
339struct bpf_map {
340 char *name;
341 int fd;
342 int sec_idx;
343 size_t sec_offset;
344 int map_ifindex;
345 int inner_map_fd;
346 struct bpf_map_def def;
347 __u32 numa_node;
348 __u32 btf_var_idx;
349 __u32 btf_key_type_id;
350 __u32 btf_value_type_id;
351 __u32 btf_vmlinux_value_type_id;
352 void *priv;
353 bpf_map_clear_priv_t clear_priv;
354 enum libbpf_map_type libbpf_type;
355 void *mmaped;
356 struct bpf_struct_ops *st_ops;
357 struct bpf_map *inner_map;
358 void **init_slots;
359 int init_slots_sz;
360 char *pin_path;
361 bool pinned;
362 bool reused;
363};
364
365enum extern_type {
366 EXT_UNKNOWN,
367 EXT_KCFG,
368 EXT_KSYM,
369};
370
371enum kcfg_type {
372 KCFG_UNKNOWN,
373 KCFG_CHAR,
374 KCFG_BOOL,
375 KCFG_INT,
376 KCFG_TRISTATE,
377 KCFG_CHAR_ARR,
378};
379
380struct extern_desc {
381 enum extern_type type;
382 int sym_idx;
383 int btf_id;
384 int sec_btf_id;
385 const char *name;
386 bool is_set;
387 bool is_weak;
388 union {
389 struct {
390 enum kcfg_type type;
391 int sz;
392 int align;
393 int data_off;
394 bool is_signed;
395 } kcfg;
396 struct {
397 unsigned long long addr;
398
399
400 int kernel_btf_obj_fd;
401 int kernel_btf_id;
402
403
404 __u32 type_id;
405 } ksym;
406 };
407};
408
409static LIST_HEAD(bpf_objects_list);
410
411struct module_btf {
412 struct btf *btf;
413 char *name;
414 __u32 id;
415 int fd;
416};
417
418struct bpf_object {
419 char name[BPF_OBJ_NAME_LEN];
420 char license[64];
421 __u32 kern_version;
422
423 struct bpf_program *programs;
424 size_t nr_programs;
425 struct bpf_map *maps;
426 size_t nr_maps;
427 size_t maps_cap;
428
429 char *kconfig;
430 struct extern_desc *externs;
431 int nr_extern;
432 int kconfig_map_idx;
433 int rodata_map_idx;
434
435 bool loaded;
436 bool has_subcalls;
437
438
439
440
441
442 struct {
443 int fd;
444 const void *obj_buf;
445 size_t obj_buf_sz;
446 Elf *elf;
447 GElf_Ehdr ehdr;
448 Elf_Data *symbols;
449 Elf_Data *data;
450 Elf_Data *rodata;
451 Elf_Data *bss;
452 Elf_Data *st_ops_data;
453 size_t shstrndx;
454 size_t strtabidx;
455 struct {
456 GElf_Shdr shdr;
457 Elf_Data *data;
458 } *reloc_sects;
459 int nr_reloc_sects;
460 int maps_shndx;
461 int btf_maps_shndx;
462 __u32 btf_maps_sec_btf_id;
463 int text_shndx;
464 int symbols_shndx;
465 int data_shndx;
466 int rodata_shndx;
467 int bss_shndx;
468 int st_ops_shndx;
469 } efile;
470
471
472
473
474
475 struct list_head list;
476
477 struct btf *btf;
478 struct btf_ext *btf_ext;
479
480
481
482
483 struct btf *btf_vmlinux;
484
485 struct btf *btf_vmlinux_override;
486
487 struct module_btf *btf_modules;
488 bool btf_modules_loaded;
489 size_t btf_module_cnt;
490 size_t btf_module_cap;
491
492 void *priv;
493 bpf_object_clear_priv_t clear_priv;
494
495 char path[];
496};
497#define obj_elf_valid(o) ((o)->efile.elf)
498
499static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
500static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
501static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
502static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
503static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
504static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
505static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
506static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
507 size_t off, __u32 sym_type, GElf_Sym *sym);
508
509void bpf_program__unload(struct bpf_program *prog)
510{
511 int i;
512
513 if (!prog)
514 return;
515
516
517
518
519
520 if (prog->instances.nr > 0) {
521 for (i = 0; i < prog->instances.nr; i++)
522 zclose(prog->instances.fds[i]);
523 } else if (prog->instances.nr != -1) {
524 pr_warn("Internal error: instances.nr is %d\n",
525 prog->instances.nr);
526 }
527
528 prog->instances.nr = -1;
529 zfree(&prog->instances.fds);
530
531 zfree(&prog->func_info);
532 zfree(&prog->line_info);
533}
534
535static void bpf_program__exit(struct bpf_program *prog)
536{
537 if (!prog)
538 return;
539
540 if (prog->clear_priv)
541 prog->clear_priv(prog, prog->priv);
542
543 prog->priv = NULL;
544 prog->clear_priv = NULL;
545
546 bpf_program__unload(prog);
547 zfree(&prog->name);
548 zfree(&prog->sec_name);
549 zfree(&prog->pin_name);
550 zfree(&prog->insns);
551 zfree(&prog->reloc_desc);
552
553 prog->nr_reloc = 0;
554 prog->insns_cnt = 0;
555 prog->sec_idx = -1;
556}
557
558static char *__bpf_program__pin_name(struct bpf_program *prog)
559{
560 char *name, *p;
561
562 name = p = strdup(prog->sec_name);
563 while ((p = strchr(p, '/')))
564 *p = '_';
565
566 return name;
567}
568
569static bool insn_is_subprog_call(const struct bpf_insn *insn)
570{
571 return BPF_CLASS(insn->code) == BPF_JMP &&
572 BPF_OP(insn->code) == BPF_CALL &&
573 BPF_SRC(insn->code) == BPF_K &&
574 insn->src_reg == BPF_PSEUDO_CALL &&
575 insn->dst_reg == 0 &&
576 insn->off == 0;
577}
578
579static int
580bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
581 const char *name, size_t sec_idx, const char *sec_name,
582 size_t sec_off, void *insn_data, size_t insn_data_sz)
583{
584 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
585 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
586 sec_name, name, sec_off, insn_data_sz);
587 return -EINVAL;
588 }
589
590 memset(prog, 0, sizeof(*prog));
591 prog->obj = obj;
592
593 prog->sec_idx = sec_idx;
594 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
595 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
596
597 prog->insns_cnt = prog->sec_insn_cnt;
598
599 prog->type = BPF_PROG_TYPE_UNSPEC;
600 prog->load = true;
601
602 prog->instances.fds = NULL;
603 prog->instances.nr = -1;
604
605 prog->sec_name = strdup(sec_name);
606 if (!prog->sec_name)
607 goto errout;
608
609 prog->name = strdup(name);
610 if (!prog->name)
611 goto errout;
612
613 prog->pin_name = __bpf_program__pin_name(prog);
614 if (!prog->pin_name)
615 goto errout;
616
617 prog->insns = malloc(insn_data_sz);
618 if (!prog->insns)
619 goto errout;
620 memcpy(prog->insns, insn_data, insn_data_sz);
621
622 return 0;
623errout:
624 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
625 bpf_program__exit(prog);
626 return -ENOMEM;
627}
628
629static int
630bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
631 const char *sec_name, int sec_idx)
632{
633 struct bpf_program *prog, *progs;
634 void *data = sec_data->d_buf;
635 size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
636 int nr_progs, err;
637 const char *name;
638 GElf_Sym sym;
639
640 progs = obj->programs;
641 nr_progs = obj->nr_programs;
642 sec_off = 0;
643
644 while (sec_off < sec_sz) {
645 if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
646 pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
647 sec_name, sec_off);
648 return -LIBBPF_ERRNO__FORMAT;
649 }
650
651 prog_sz = sym.st_size;
652
653 name = elf_sym_str(obj, sym.st_name);
654 if (!name) {
655 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
656 sec_name, sec_off);
657 return -LIBBPF_ERRNO__FORMAT;
658 }
659
660 if (sec_off + prog_sz > sec_sz) {
661 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
662 sec_name, sec_off);
663 return -LIBBPF_ERRNO__FORMAT;
664 }
665
666 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
667 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
668
669 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
670 if (!progs) {
671
672
673
674
675
676 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
677 sec_name, name);
678 return -ENOMEM;
679 }
680 obj->programs = progs;
681
682 prog = &progs[nr_progs];
683
684 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
685 sec_off, data + sec_off, prog_sz);
686 if (err)
687 return err;
688
689 nr_progs++;
690 obj->nr_programs = nr_progs;
691
692 sec_off += prog_sz;
693 }
694
695 return 0;
696}
697
698static __u32 get_kernel_version(void)
699{
700 __u32 major, minor, patch;
701 struct utsname info;
702
703 uname(&info);
704 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
705 return 0;
706 return KERNEL_VERSION(major, minor, patch);
707}
708
709static const struct btf_member *
710find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
711{
712 struct btf_member *m;
713 int i;
714
715 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
716 if (btf_member_bit_offset(t, i) == bit_offset)
717 return m;
718 }
719
720 return NULL;
721}
722
723static const struct btf_member *
724find_member_by_name(const struct btf *btf, const struct btf_type *t,
725 const char *name)
726{
727 struct btf_member *m;
728 int i;
729
730 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
731 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
732 return m;
733 }
734
735 return NULL;
736}
737
738#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
739static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
740 const char *name, __u32 kind);
741
742static int
743find_struct_ops_kern_types(const struct btf *btf, const char *tname,
744 const struct btf_type **type, __u32 *type_id,
745 const struct btf_type **vtype, __u32 *vtype_id,
746 const struct btf_member **data_member)
747{
748 const struct btf_type *kern_type, *kern_vtype;
749 const struct btf_member *kern_data_member;
750 __s32 kern_vtype_id, kern_type_id;
751 __u32 i;
752
753 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
754 if (kern_type_id < 0) {
755 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
756 tname);
757 return kern_type_id;
758 }
759 kern_type = btf__type_by_id(btf, kern_type_id);
760
761
762
763
764
765
766 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
767 tname, BTF_KIND_STRUCT);
768 if (kern_vtype_id < 0) {
769 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
770 STRUCT_OPS_VALUE_PREFIX, tname);
771 return kern_vtype_id;
772 }
773 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
774
775
776
777
778
779
780
781 kern_data_member = btf_members(kern_vtype);
782 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
783 if (kern_data_member->type == kern_type_id)
784 break;
785 }
786 if (i == btf_vlen(kern_vtype)) {
787 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
788 tname, STRUCT_OPS_VALUE_PREFIX, tname);
789 return -EINVAL;
790 }
791
792 *type = kern_type;
793 *type_id = kern_type_id;
794 *vtype = kern_vtype;
795 *vtype_id = kern_vtype_id;
796 *data_member = kern_data_member;
797
798 return 0;
799}
800
801static bool bpf_map__is_struct_ops(const struct bpf_map *map)
802{
803 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
804}
805
806
807static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
808 const struct btf *btf,
809 const struct btf *kern_btf)
810{
811 const struct btf_member *member, *kern_member, *kern_data_member;
812 const struct btf_type *type, *kern_type, *kern_vtype;
813 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
814 struct bpf_struct_ops *st_ops;
815 void *data, *kern_data;
816 const char *tname;
817 int err;
818
819 st_ops = map->st_ops;
820 type = st_ops->type;
821 tname = st_ops->tname;
822 err = find_struct_ops_kern_types(kern_btf, tname,
823 &kern_type, &kern_type_id,
824 &kern_vtype, &kern_vtype_id,
825 &kern_data_member);
826 if (err)
827 return err;
828
829 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
830 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
831
832 map->def.value_size = kern_vtype->size;
833 map->btf_vmlinux_value_type_id = kern_vtype_id;
834
835 st_ops->kern_vdata = calloc(1, kern_vtype->size);
836 if (!st_ops->kern_vdata)
837 return -ENOMEM;
838
839 data = st_ops->data;
840 kern_data_off = kern_data_member->offset / 8;
841 kern_data = st_ops->kern_vdata + kern_data_off;
842
843 member = btf_members(type);
844 for (i = 0; i < btf_vlen(type); i++, member++) {
845 const struct btf_type *mtype, *kern_mtype;
846 __u32 mtype_id, kern_mtype_id;
847 void *mdata, *kern_mdata;
848 __s64 msize, kern_msize;
849 __u32 moff, kern_moff;
850 __u32 kern_member_idx;
851 const char *mname;
852
853 mname = btf__name_by_offset(btf, member->name_off);
854 kern_member = find_member_by_name(kern_btf, kern_type, mname);
855 if (!kern_member) {
856 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
857 map->name, mname);
858 return -ENOTSUP;
859 }
860
861 kern_member_idx = kern_member - btf_members(kern_type);
862 if (btf_member_bitfield_size(type, i) ||
863 btf_member_bitfield_size(kern_type, kern_member_idx)) {
864 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
865 map->name, mname);
866 return -ENOTSUP;
867 }
868
869 moff = member->offset / 8;
870 kern_moff = kern_member->offset / 8;
871
872 mdata = data + moff;
873 kern_mdata = kern_data + kern_moff;
874
875 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
876 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
877 &kern_mtype_id);
878 if (BTF_INFO_KIND(mtype->info) !=
879 BTF_INFO_KIND(kern_mtype->info)) {
880 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
881 map->name, mname, BTF_INFO_KIND(mtype->info),
882 BTF_INFO_KIND(kern_mtype->info));
883 return -ENOTSUP;
884 }
885
886 if (btf_is_ptr(mtype)) {
887 struct bpf_program *prog;
888
889 mtype = skip_mods_and_typedefs(btf, mtype->type, &mtype_id);
890 kern_mtype = skip_mods_and_typedefs(kern_btf,
891 kern_mtype->type,
892 &kern_mtype_id);
893 if (!btf_is_func_proto(mtype) ||
894 !btf_is_func_proto(kern_mtype)) {
895 pr_warn("struct_ops init_kern %s: non func ptr %s is not supported\n",
896 map->name, mname);
897 return -ENOTSUP;
898 }
899
900 prog = st_ops->progs[i];
901 if (!prog) {
902 pr_debug("struct_ops init_kern %s: func ptr %s is not set\n",
903 map->name, mname);
904 continue;
905 }
906
907 prog->attach_btf_id = kern_type_id;
908 prog->expected_attach_type = kern_member_idx;
909
910 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
911
912 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
913 map->name, mname, prog->name, moff,
914 kern_moff);
915
916 continue;
917 }
918
919 msize = btf__resolve_size(btf, mtype_id);
920 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
921 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
922 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
923 map->name, mname, (ssize_t)msize,
924 (ssize_t)kern_msize);
925 return -ENOTSUP;
926 }
927
928 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
929 map->name, mname, (unsigned int)msize,
930 moff, kern_moff);
931 memcpy(kern_mdata, mdata, msize);
932 }
933
934 return 0;
935}
936
937static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
938{
939 struct bpf_map *map;
940 size_t i;
941 int err;
942
943 for (i = 0; i < obj->nr_maps; i++) {
944 map = &obj->maps[i];
945
946 if (!bpf_map__is_struct_ops(map))
947 continue;
948
949 err = bpf_map__init_kern_struct_ops(map, obj->btf,
950 obj->btf_vmlinux);
951 if (err)
952 return err;
953 }
954
955 return 0;
956}
957
958static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
959{
960 const struct btf_type *type, *datasec;
961 const struct btf_var_secinfo *vsi;
962 struct bpf_struct_ops *st_ops;
963 const char *tname, *var_name;
964 __s32 type_id, datasec_id;
965 const struct btf *btf;
966 struct bpf_map *map;
967 __u32 i;
968
969 if (obj->efile.st_ops_shndx == -1)
970 return 0;
971
972 btf = obj->btf;
973 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
974 BTF_KIND_DATASEC);
975 if (datasec_id < 0) {
976 pr_warn("struct_ops init: DATASEC %s not found\n",
977 STRUCT_OPS_SEC);
978 return -EINVAL;
979 }
980
981 datasec = btf__type_by_id(btf, datasec_id);
982 vsi = btf_var_secinfos(datasec);
983 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
984 type = btf__type_by_id(obj->btf, vsi->type);
985 var_name = btf__name_by_offset(obj->btf, type->name_off);
986
987 type_id = btf__resolve_type(obj->btf, vsi->type);
988 if (type_id < 0) {
989 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
990 vsi->type, STRUCT_OPS_SEC);
991 return -EINVAL;
992 }
993
994 type = btf__type_by_id(obj->btf, type_id);
995 tname = btf__name_by_offset(obj->btf, type->name_off);
996 if (!tname[0]) {
997 pr_warn("struct_ops init: anonymous type is not supported\n");
998 return -ENOTSUP;
999 }
1000 if (!btf_is_struct(type)) {
1001 pr_warn("struct_ops init: %s is not a struct\n", tname);
1002 return -EINVAL;
1003 }
1004
1005 map = bpf_object__add_map(obj);
1006 if (IS_ERR(map))
1007 return PTR_ERR(map);
1008
1009 map->sec_idx = obj->efile.st_ops_shndx;
1010 map->sec_offset = vsi->offset;
1011 map->name = strdup(var_name);
1012 if (!map->name)
1013 return -ENOMEM;
1014
1015 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1016 map->def.key_size = sizeof(int);
1017 map->def.value_size = type->size;
1018 map->def.max_entries = 1;
1019
1020 map->st_ops = calloc(1, sizeof(*map->st_ops));
1021 if (!map->st_ops)
1022 return -ENOMEM;
1023 st_ops = map->st_ops;
1024 st_ops->data = malloc(type->size);
1025 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1026 st_ops->kern_func_off = malloc(btf_vlen(type) *
1027 sizeof(*st_ops->kern_func_off));
1028 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1029 return -ENOMEM;
1030
1031 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1032 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1033 var_name, STRUCT_OPS_SEC);
1034 return -EINVAL;
1035 }
1036
1037 memcpy(st_ops->data,
1038 obj->efile.st_ops_data->d_buf + vsi->offset,
1039 type->size);
1040 st_ops->tname = tname;
1041 st_ops->type = type;
1042 st_ops->type_id = type_id;
1043
1044 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1045 tname, type_id, var_name, vsi->offset);
1046 }
1047
1048 return 0;
1049}
1050
1051static struct bpf_object *bpf_object__new(const char *path,
1052 const void *obj_buf,
1053 size_t obj_buf_sz,
1054 const char *obj_name)
1055{
1056 struct bpf_object *obj;
1057 char *end;
1058
1059 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1060 if (!obj) {
1061 pr_warn("alloc memory failed for %s\n", path);
1062 return ERR_PTR(-ENOMEM);
1063 }
1064
1065 strcpy(obj->path, path);
1066 if (obj_name) {
1067 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1068 obj->name[sizeof(obj->name) - 1] = 0;
1069 } else {
1070
1071 strncpy(obj->name, basename((void *)path),
1072 sizeof(obj->name) - 1);
1073 end = strchr(obj->name, '.');
1074 if (end)
1075 *end = 0;
1076 }
1077
1078 obj->efile.fd = -1;
1079
1080
1081
1082
1083
1084
1085 obj->efile.obj_buf = obj_buf;
1086 obj->efile.obj_buf_sz = obj_buf_sz;
1087 obj->efile.maps_shndx = -1;
1088 obj->efile.btf_maps_shndx = -1;
1089 obj->efile.data_shndx = -1;
1090 obj->efile.rodata_shndx = -1;
1091 obj->efile.bss_shndx = -1;
1092 obj->efile.st_ops_shndx = -1;
1093 obj->kconfig_map_idx = -1;
1094 obj->rodata_map_idx = -1;
1095
1096 obj->kern_version = get_kernel_version();
1097 obj->loaded = false;
1098
1099 INIT_LIST_HEAD(&obj->list);
1100 list_add(&obj->list, &bpf_objects_list);
1101 return obj;
1102}
1103
1104static void bpf_object__elf_finish(struct bpf_object *obj)
1105{
1106 if (!obj_elf_valid(obj))
1107 return;
1108
1109 if (obj->efile.elf) {
1110 elf_end(obj->efile.elf);
1111 obj->efile.elf = NULL;
1112 }
1113 obj->efile.symbols = NULL;
1114 obj->efile.data = NULL;
1115 obj->efile.rodata = NULL;
1116 obj->efile.bss = NULL;
1117 obj->efile.st_ops_data = NULL;
1118
1119 zfree(&obj->efile.reloc_sects);
1120 obj->efile.nr_reloc_sects = 0;
1121 zclose(obj->efile.fd);
1122 obj->efile.obj_buf = NULL;
1123 obj->efile.obj_buf_sz = 0;
1124}
1125
1126
1127#ifndef ELF_C_READ_MMAP
1128#define ELF_C_READ_MMAP ELF_C_READ
1129#endif
1130
1131static int bpf_object__elf_init(struct bpf_object *obj)
1132{
1133 int err = 0;
1134 GElf_Ehdr *ep;
1135
1136 if (obj_elf_valid(obj)) {
1137 pr_warn("elf: init internal error\n");
1138 return -LIBBPF_ERRNO__LIBELF;
1139 }
1140
1141 if (obj->efile.obj_buf_sz > 0) {
1142
1143
1144
1145
1146 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1147 obj->efile.obj_buf_sz);
1148 } else {
1149 obj->efile.fd = open(obj->path, O_RDONLY);
1150 if (obj->efile.fd < 0) {
1151 char errmsg[STRERR_BUFSIZE], *cp;
1152
1153 err = -errno;
1154 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1155 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1156 return err;
1157 }
1158
1159 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1160 }
1161
1162 if (!obj->efile.elf) {
1163 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1164 err = -LIBBPF_ERRNO__LIBELF;
1165 goto errout;
1166 }
1167
1168 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1169 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1170 err = -LIBBPF_ERRNO__FORMAT;
1171 goto errout;
1172 }
1173 ep = &obj->efile.ehdr;
1174
1175 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1176 pr_warn("elf: failed to get section names section index for %s: %s\n",
1177 obj->path, elf_errmsg(-1));
1178 err = -LIBBPF_ERRNO__FORMAT;
1179 goto errout;
1180 }
1181
1182
1183 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1184 pr_warn("elf: failed to get section names strings from %s: %s\n",
1185 obj->path, elf_errmsg(-1));
1186 err = -LIBBPF_ERRNO__FORMAT;
1187 goto errout;
1188 }
1189
1190
1191 if (ep->e_type != ET_REL ||
1192 (ep->e_machine && ep->e_machine != EM_BPF)) {
1193 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1194 err = -LIBBPF_ERRNO__FORMAT;
1195 goto errout;
1196 }
1197
1198 return 0;
1199errout:
1200 bpf_object__elf_finish(obj);
1201 return err;
1202}
1203
1204static int bpf_object__check_endianness(struct bpf_object *obj)
1205{
1206#if __BYTE_ORDER == __LITTLE_ENDIAN
1207 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1208 return 0;
1209#elif __BYTE_ORDER == __BIG_ENDIAN
1210 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1211 return 0;
1212#else
1213# error "Unrecognized __BYTE_ORDER__"
1214#endif
1215 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1216 return -LIBBPF_ERRNO__ENDIAN;
1217}
1218
1219static int
1220bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1221{
1222 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1223 pr_debug("license of %s is %s\n", obj->path, obj->license);
1224 return 0;
1225}
1226
1227static int
1228bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1229{
1230 __u32 kver;
1231
1232 if (size != sizeof(kver)) {
1233 pr_warn("invalid kver section in %s\n", obj->path);
1234 return -LIBBPF_ERRNO__FORMAT;
1235 }
1236 memcpy(&kver, data, sizeof(kver));
1237 obj->kern_version = kver;
1238 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1239 return 0;
1240}
1241
1242static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1243{
1244 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1245 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1246 return true;
1247 return false;
1248}
1249
1250int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1251 __u32 *size)
1252{
1253 int ret = -ENOENT;
1254
1255 *size = 0;
1256 if (!name) {
1257 return -EINVAL;
1258 } else if (!strcmp(name, DATA_SEC)) {
1259 if (obj->efile.data)
1260 *size = obj->efile.data->d_size;
1261 } else if (!strcmp(name, BSS_SEC)) {
1262 if (obj->efile.bss)
1263 *size = obj->efile.bss->d_size;
1264 } else if (!strcmp(name, RODATA_SEC)) {
1265 if (obj->efile.rodata)
1266 *size = obj->efile.rodata->d_size;
1267 } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1268 if (obj->efile.st_ops_data)
1269 *size = obj->efile.st_ops_data->d_size;
1270 } else {
1271 Elf_Scn *scn = elf_sec_by_name(obj, name);
1272 Elf_Data *data = elf_sec_data(obj, scn);
1273
1274 if (data) {
1275 ret = 0;
1276 *size = data->d_size;
1277 }
1278 }
1279
1280 return *size ? 0 : ret;
1281}
1282
1283int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1284 __u32 *off)
1285{
1286 Elf_Data *symbols = obj->efile.symbols;
1287 const char *sname;
1288 size_t si;
1289
1290 if (!name || !off)
1291 return -EINVAL;
1292
1293 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1294 GElf_Sym sym;
1295
1296 if (!gelf_getsym(symbols, si, &sym))
1297 continue;
1298 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1299 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1300 continue;
1301
1302 sname = elf_sym_str(obj, sym.st_name);
1303 if (!sname) {
1304 pr_warn("failed to get sym name string for var %s\n",
1305 name);
1306 return -EIO;
1307 }
1308 if (strcmp(name, sname) == 0) {
1309 *off = sym.st_value;
1310 return 0;
1311 }
1312 }
1313
1314 return -ENOENT;
1315}
1316
1317static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1318{
1319 struct bpf_map *new_maps;
1320 size_t new_cap;
1321 int i;
1322
1323 if (obj->nr_maps < obj->maps_cap)
1324 return &obj->maps[obj->nr_maps++];
1325
1326 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1327 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1328 if (!new_maps) {
1329 pr_warn("alloc maps for object failed\n");
1330 return ERR_PTR(-ENOMEM);
1331 }
1332
1333 obj->maps_cap = new_cap;
1334 obj->maps = new_maps;
1335
1336
1337 memset(obj->maps + obj->nr_maps, 0,
1338 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1339
1340
1341
1342
1343 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1344 obj->maps[i].fd = -1;
1345 obj->maps[i].inner_map_fd = -1;
1346 }
1347
1348 return &obj->maps[obj->nr_maps++];
1349}
1350
1351static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1352{
1353 long page_sz = sysconf(_SC_PAGE_SIZE);
1354 size_t map_sz;
1355
1356 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1357 map_sz = roundup(map_sz, page_sz);
1358 return map_sz;
1359}
1360
1361static char *internal_map_name(struct bpf_object *obj,
1362 enum libbpf_map_type type)
1363{
1364 char map_name[BPF_OBJ_NAME_LEN], *p;
1365 const char *sfx = libbpf_type_to_btf_name[type];
1366 int sfx_len = max((size_t)7, strlen(sfx));
1367 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1368 strlen(obj->name));
1369
1370 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1371 sfx_len, libbpf_type_to_btf_name[type]);
1372
1373
1374 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1375 if (!isalnum(*p) && *p != '_' && *p != '.')
1376 *p = '_';
1377
1378 return strdup(map_name);
1379}
1380
1381static int
1382bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1383 int sec_idx, void *data, size_t data_sz)
1384{
1385 struct bpf_map_def *def;
1386 struct bpf_map *map;
1387 int err;
1388
1389 map = bpf_object__add_map(obj);
1390 if (IS_ERR(map))
1391 return PTR_ERR(map);
1392
1393 map->libbpf_type = type;
1394 map->sec_idx = sec_idx;
1395 map->sec_offset = 0;
1396 map->name = internal_map_name(obj, type);
1397 if (!map->name) {
1398 pr_warn("failed to alloc map name\n");
1399 return -ENOMEM;
1400 }
1401
1402 def = &map->def;
1403 def->type = BPF_MAP_TYPE_ARRAY;
1404 def->key_size = sizeof(int);
1405 def->value_size = data_sz;
1406 def->max_entries = 1;
1407 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1408 ? BPF_F_RDONLY_PROG : 0;
1409 def->map_flags |= BPF_F_MMAPABLE;
1410
1411 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1412 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1413
1414 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1415 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1416 if (map->mmaped == MAP_FAILED) {
1417 err = -errno;
1418 map->mmaped = NULL;
1419 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1420 map->name, err);
1421 zfree(&map->name);
1422 return err;
1423 }
1424
1425 if (data)
1426 memcpy(map->mmaped, data, data_sz);
1427
1428 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1429 return 0;
1430}
1431
1432static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1433{
1434 int err;
1435
1436
1437
1438
1439 if (obj->efile.data_shndx >= 0) {
1440 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1441 obj->efile.data_shndx,
1442 obj->efile.data->d_buf,
1443 obj->efile.data->d_size);
1444 if (err)
1445 return err;
1446 }
1447 if (obj->efile.rodata_shndx >= 0) {
1448 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1449 obj->efile.rodata_shndx,
1450 obj->efile.rodata->d_buf,
1451 obj->efile.rodata->d_size);
1452 if (err)
1453 return err;
1454
1455 obj->rodata_map_idx = obj->nr_maps - 1;
1456 }
1457 if (obj->efile.bss_shndx >= 0) {
1458 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1459 obj->efile.bss_shndx,
1460 NULL,
1461 obj->efile.bss->d_size);
1462 if (err)
1463 return err;
1464 }
1465 return 0;
1466}
1467
1468
1469static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1470 const void *name)
1471{
1472 int i;
1473
1474 for (i = 0; i < obj->nr_extern; i++) {
1475 if (strcmp(obj->externs[i].name, name) == 0)
1476 return &obj->externs[i];
1477 }
1478 return NULL;
1479}
1480
1481static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1482 char value)
1483{
1484 switch (ext->kcfg.type) {
1485 case KCFG_BOOL:
1486 if (value == 'm') {
1487 pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1488 ext->name, value);
1489 return -EINVAL;
1490 }
1491 *(bool *)ext_val = value == 'y' ? true : false;
1492 break;
1493 case KCFG_TRISTATE:
1494 if (value == 'y')
1495 *(enum libbpf_tristate *)ext_val = TRI_YES;
1496 else if (value == 'm')
1497 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1498 else
1499 *(enum libbpf_tristate *)ext_val = TRI_NO;
1500 break;
1501 case KCFG_CHAR:
1502 *(char *)ext_val = value;
1503 break;
1504 case KCFG_UNKNOWN:
1505 case KCFG_INT:
1506 case KCFG_CHAR_ARR:
1507 default:
1508 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1509 ext->name, value);
1510 return -EINVAL;
1511 }
1512 ext->is_set = true;
1513 return 0;
1514}
1515
1516static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1517 const char *value)
1518{
1519 size_t len;
1520
1521 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1522 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1523 return -EINVAL;
1524 }
1525
1526 len = strlen(value);
1527 if (value[len - 1] != '"') {
1528 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1529 ext->name, value);
1530 return -EINVAL;
1531 }
1532
1533
1534 len -= 2;
1535 if (len >= ext->kcfg.sz) {
1536 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1537 ext->name, value, len, ext->kcfg.sz - 1);
1538 len = ext->kcfg.sz - 1;
1539 }
1540 memcpy(ext_val, value + 1, len);
1541 ext_val[len] = '\0';
1542 ext->is_set = true;
1543 return 0;
1544}
1545
1546static int parse_u64(const char *value, __u64 *res)
1547{
1548 char *value_end;
1549 int err;
1550
1551 errno = 0;
1552 *res = strtoull(value, &value_end, 0);
1553 if (errno) {
1554 err = -errno;
1555 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1556 return err;
1557 }
1558 if (*value_end) {
1559 pr_warn("failed to parse '%s' as integer completely\n", value);
1560 return -EINVAL;
1561 }
1562 return 0;
1563}
1564
1565static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1566{
1567 int bit_sz = ext->kcfg.sz * 8;
1568
1569 if (ext->kcfg.sz == 8)
1570 return true;
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584 if (ext->kcfg.is_signed)
1585 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1586 else
1587 return (v >> bit_sz) == 0;
1588}
1589
1590static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1591 __u64 value)
1592{
1593 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1594 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1595 ext->name, (unsigned long long)value);
1596 return -EINVAL;
1597 }
1598 if (!is_kcfg_value_in_range(ext, value)) {
1599 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1600 ext->name, (unsigned long long)value, ext->kcfg.sz);
1601 return -ERANGE;
1602 }
1603 switch (ext->kcfg.sz) {
1604 case 1: *(__u8 *)ext_val = value; break;
1605 case 2: *(__u16 *)ext_val = value; break;
1606 case 4: *(__u32 *)ext_val = value; break;
1607 case 8: *(__u64 *)ext_val = value; break;
1608 default:
1609 return -EINVAL;
1610 }
1611 ext->is_set = true;
1612 return 0;
1613}
1614
1615static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1616 char *buf, void *data)
1617{
1618 struct extern_desc *ext;
1619 char *sep, *value;
1620 int len, err = 0;
1621 void *ext_val;
1622 __u64 num;
1623
1624 if (strncmp(buf, "CONFIG_", 7))
1625 return 0;
1626
1627 sep = strchr(buf, '=');
1628 if (!sep) {
1629 pr_warn("failed to parse '%s': no separator\n", buf);
1630 return -EINVAL;
1631 }
1632
1633
1634 len = strlen(buf);
1635 if (buf[len - 1] == '\n')
1636 buf[len - 1] = '\0';
1637
1638 *sep = '\0';
1639 if (!sep[1]) {
1640 *sep = '=';
1641 pr_warn("failed to parse '%s': no value\n", buf);
1642 return -EINVAL;
1643 }
1644
1645 ext = find_extern_by_name(obj, buf);
1646 if (!ext || ext->is_set)
1647 return 0;
1648
1649 ext_val = data + ext->kcfg.data_off;
1650 value = sep + 1;
1651
1652 switch (*value) {
1653 case 'y': case 'n': case 'm':
1654 err = set_kcfg_value_tri(ext, ext_val, *value);
1655 break;
1656 case '"':
1657 err = set_kcfg_value_str(ext, ext_val, value);
1658 break;
1659 default:
1660
1661 err = parse_u64(value, &num);
1662 if (err) {
1663 pr_warn("extern (kcfg) %s=%s should be integer\n",
1664 ext->name, value);
1665 return err;
1666 }
1667 err = set_kcfg_value_num(ext, ext_val, num);
1668 break;
1669 }
1670 if (err)
1671 return err;
1672 pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1673 return 0;
1674}
1675
1676static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1677{
1678 char buf[PATH_MAX];
1679 struct utsname uts;
1680 int len, err = 0;
1681 gzFile file;
1682
1683 uname(&uts);
1684 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1685 if (len < 0)
1686 return -EINVAL;
1687 else if (len >= PATH_MAX)
1688 return -ENAMETOOLONG;
1689
1690
1691 file = gzopen(buf, "r");
1692 if (!file)
1693 file = gzopen("/proc/config.gz", "r");
1694
1695 if (!file) {
1696 pr_warn("failed to open system Kconfig\n");
1697 return -ENOENT;
1698 }
1699
1700 while (gzgets(file, buf, sizeof(buf))) {
1701 err = bpf_object__process_kconfig_line(obj, buf, data);
1702 if (err) {
1703 pr_warn("error parsing system Kconfig line '%s': %d\n",
1704 buf, err);
1705 goto out;
1706 }
1707 }
1708
1709out:
1710 gzclose(file);
1711 return err;
1712}
1713
1714static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1715 const char *config, void *data)
1716{
1717 char buf[PATH_MAX];
1718 int err = 0;
1719 FILE *file;
1720
1721 file = fmemopen((void *)config, strlen(config), "r");
1722 if (!file) {
1723 err = -errno;
1724 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1725 return err;
1726 }
1727
1728 while (fgets(buf, sizeof(buf), file)) {
1729 err = bpf_object__process_kconfig_line(obj, buf, data);
1730 if (err) {
1731 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1732 buf, err);
1733 break;
1734 }
1735 }
1736
1737 fclose(file);
1738 return err;
1739}
1740
1741static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1742{
1743 struct extern_desc *last_ext = NULL, *ext;
1744 size_t map_sz;
1745 int i, err;
1746
1747 for (i = 0; i < obj->nr_extern; i++) {
1748 ext = &obj->externs[i];
1749 if (ext->type == EXT_KCFG)
1750 last_ext = ext;
1751 }
1752
1753 if (!last_ext)
1754 return 0;
1755
1756 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1757 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1758 obj->efile.symbols_shndx,
1759 NULL, map_sz);
1760 if (err)
1761 return err;
1762
1763 obj->kconfig_map_idx = obj->nr_maps - 1;
1764
1765 return 0;
1766}
1767
1768static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1769{
1770 Elf_Data *symbols = obj->efile.symbols;
1771 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1772 Elf_Data *data = NULL;
1773 Elf_Scn *scn;
1774
1775 if (obj->efile.maps_shndx < 0)
1776 return 0;
1777
1778 if (!symbols)
1779 return -EINVAL;
1780
1781
1782 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1783 data = elf_sec_data(obj, scn);
1784 if (!scn || !data) {
1785 pr_warn("elf: failed to get legacy map definitions for %s\n",
1786 obj->path);
1787 return -EINVAL;
1788 }
1789
1790
1791
1792
1793
1794
1795
1796
1797 nr_syms = symbols->d_size / sizeof(GElf_Sym);
1798 for (i = 0; i < nr_syms; i++) {
1799 GElf_Sym sym;
1800
1801 if (!gelf_getsym(symbols, i, &sym))
1802 continue;
1803 if (sym.st_shndx != obj->efile.maps_shndx)
1804 continue;
1805 nr_maps++;
1806 }
1807
1808 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1809 nr_maps, data->d_size, obj->path);
1810
1811 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1812 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1813 obj->path);
1814 return -EINVAL;
1815 }
1816 map_def_sz = data->d_size / nr_maps;
1817
1818
1819 for (i = 0; i < nr_syms; i++) {
1820 GElf_Sym sym;
1821 const char *map_name;
1822 struct bpf_map_def *def;
1823 struct bpf_map *map;
1824
1825 if (!gelf_getsym(symbols, i, &sym))
1826 continue;
1827 if (sym.st_shndx != obj->efile.maps_shndx)
1828 continue;
1829
1830 map = bpf_object__add_map(obj);
1831 if (IS_ERR(map))
1832 return PTR_ERR(map);
1833
1834 map_name = elf_sym_str(obj, sym.st_name);
1835 if (!map_name) {
1836 pr_warn("failed to get map #%d name sym string for obj %s\n",
1837 i, obj->path);
1838 return -LIBBPF_ERRNO__FORMAT;
1839 }
1840
1841 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1842 map->sec_idx = sym.st_shndx;
1843 map->sec_offset = sym.st_value;
1844 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1845 map_name, map->sec_idx, map->sec_offset);
1846 if (sym.st_value + map_def_sz > data->d_size) {
1847 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1848 obj->path, map_name);
1849 return -EINVAL;
1850 }
1851
1852 map->name = strdup(map_name);
1853 if (!map->name) {
1854 pr_warn("failed to alloc map name\n");
1855 return -ENOMEM;
1856 }
1857 pr_debug("map %d is \"%s\"\n", i, map->name);
1858 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1859
1860
1861
1862
1863
1864
1865 if (map_def_sz <= sizeof(struct bpf_map_def)) {
1866 memcpy(&map->def, def, map_def_sz);
1867 } else {
1868
1869
1870
1871
1872
1873
1874 char *b;
1875
1876 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1877 b < ((char *)def) + map_def_sz; b++) {
1878 if (*b != 0) {
1879 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1880 obj->path, map_name);
1881 if (strict)
1882 return -EINVAL;
1883 }
1884 }
1885 memcpy(&map->def, def, sizeof(struct bpf_map_def));
1886 }
1887 }
1888 return 0;
1889}
1890
1891static const struct btf_type *
1892skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1893{
1894 const struct btf_type *t = btf__type_by_id(btf, id);
1895
1896 if (res_id)
1897 *res_id = id;
1898
1899 while (btf_is_mod(t) || btf_is_typedef(t)) {
1900 if (res_id)
1901 *res_id = t->type;
1902 t = btf__type_by_id(btf, t->type);
1903 }
1904
1905 return t;
1906}
1907
1908static const struct btf_type *
1909resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1910{
1911 const struct btf_type *t;
1912
1913 t = skip_mods_and_typedefs(btf, id, NULL);
1914 if (!btf_is_ptr(t))
1915 return NULL;
1916
1917 t = skip_mods_and_typedefs(btf, t->type, res_id);
1918
1919 return btf_is_func_proto(t) ? t : NULL;
1920}
1921
1922static const char *btf_kind_str(const struct btf_type *t)
1923{
1924 switch (btf_kind(t)) {
1925 case BTF_KIND_UNKN: return "void";
1926 case BTF_KIND_INT: return "int";
1927 case BTF_KIND_PTR: return "ptr";
1928 case BTF_KIND_ARRAY: return "array";
1929 case BTF_KIND_STRUCT: return "struct";
1930 case BTF_KIND_UNION: return "union";
1931 case BTF_KIND_ENUM: return "enum";
1932 case BTF_KIND_FWD: return "fwd";
1933 case BTF_KIND_TYPEDEF: return "typedef";
1934 case BTF_KIND_VOLATILE: return "volatile";
1935 case BTF_KIND_CONST: return "const";
1936 case BTF_KIND_RESTRICT: return "restrict";
1937 case BTF_KIND_FUNC: return "func";
1938 case BTF_KIND_FUNC_PROTO: return "func_proto";
1939 case BTF_KIND_VAR: return "var";
1940 case BTF_KIND_DATASEC: return "datasec";
1941 case BTF_KIND_FLOAT: return "float";
1942 default: return "unknown";
1943 }
1944}
1945
1946
1947
1948
1949
1950
1951
1952
1953static bool get_map_field_int(const char *map_name, const struct btf *btf,
1954 const struct btf_member *m, __u32 *res)
1955{
1956 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1957 const char *name = btf__name_by_offset(btf, m->name_off);
1958 const struct btf_array *arr_info;
1959 const struct btf_type *arr_t;
1960
1961 if (!btf_is_ptr(t)) {
1962 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
1963 map_name, name, btf_kind_str(t));
1964 return false;
1965 }
1966
1967 arr_t = btf__type_by_id(btf, t->type);
1968 if (!arr_t) {
1969 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1970 map_name, name, t->type);
1971 return false;
1972 }
1973 if (!btf_is_array(arr_t)) {
1974 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
1975 map_name, name, btf_kind_str(arr_t));
1976 return false;
1977 }
1978 arr_info = btf_array(arr_t);
1979 *res = arr_info->nelems;
1980 return true;
1981}
1982
1983static int build_map_pin_path(struct bpf_map *map, const char *path)
1984{
1985 char buf[PATH_MAX];
1986 int len;
1987
1988 if (!path)
1989 path = "/sys/fs/bpf";
1990
1991 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
1992 if (len < 0)
1993 return -EINVAL;
1994 else if (len >= PATH_MAX)
1995 return -ENAMETOOLONG;
1996
1997 return bpf_map__set_pin_path(map, buf);
1998}
1999
2000
2001static int parse_btf_map_def(struct bpf_object *obj,
2002 struct bpf_map *map,
2003 const struct btf_type *def,
2004 bool strict, bool is_inner,
2005 const char *pin_root_path)
2006{
2007 const struct btf_type *t;
2008 const struct btf_member *m;
2009 int vlen, i;
2010
2011 vlen = btf_vlen(def);
2012 m = btf_members(def);
2013 for (i = 0; i < vlen; i++, m++) {
2014 const char *name = btf__name_by_offset(obj->btf, m->name_off);
2015
2016 if (!name) {
2017 pr_warn("map '%s': invalid field #%d.\n", map->name, i);
2018 return -EINVAL;
2019 }
2020 if (strcmp(name, "type") == 0) {
2021 if (!get_map_field_int(map->name, obj->btf, m,
2022 &map->def.type))
2023 return -EINVAL;
2024 pr_debug("map '%s': found type = %u.\n",
2025 map->name, map->def.type);
2026 } else if (strcmp(name, "max_entries") == 0) {
2027 if (!get_map_field_int(map->name, obj->btf, m,
2028 &map->def.max_entries))
2029 return -EINVAL;
2030 pr_debug("map '%s': found max_entries = %u.\n",
2031 map->name, map->def.max_entries);
2032 } else if (strcmp(name, "map_flags") == 0) {
2033 if (!get_map_field_int(map->name, obj->btf, m,
2034 &map->def.map_flags))
2035 return -EINVAL;
2036 pr_debug("map '%s': found map_flags = %u.\n",
2037 map->name, map->def.map_flags);
2038 } else if (strcmp(name, "numa_node") == 0) {
2039 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
2040 return -EINVAL;
2041 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node);
2042 } else if (strcmp(name, "key_size") == 0) {
2043 __u32 sz;
2044
2045 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2046 return -EINVAL;
2047 pr_debug("map '%s': found key_size = %u.\n",
2048 map->name, sz);
2049 if (map->def.key_size && map->def.key_size != sz) {
2050 pr_warn("map '%s': conflicting key size %u != %u.\n",
2051 map->name, map->def.key_size, sz);
2052 return -EINVAL;
2053 }
2054 map->def.key_size = sz;
2055 } else if (strcmp(name, "key") == 0) {
2056 __s64 sz;
2057
2058 t = btf__type_by_id(obj->btf, m->type);
2059 if (!t) {
2060 pr_warn("map '%s': key type [%d] not found.\n",
2061 map->name, m->type);
2062 return -EINVAL;
2063 }
2064 if (!btf_is_ptr(t)) {
2065 pr_warn("map '%s': key spec is not PTR: %s.\n",
2066 map->name, btf_kind_str(t));
2067 return -EINVAL;
2068 }
2069 sz = btf__resolve_size(obj->btf, t->type);
2070 if (sz < 0) {
2071 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2072 map->name, t->type, (ssize_t)sz);
2073 return sz;
2074 }
2075 pr_debug("map '%s': found key [%u], sz = %zd.\n",
2076 map->name, t->type, (ssize_t)sz);
2077 if (map->def.key_size && map->def.key_size != sz) {
2078 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2079 map->name, map->def.key_size, (ssize_t)sz);
2080 return -EINVAL;
2081 }
2082 map->def.key_size = sz;
2083 map->btf_key_type_id = t->type;
2084 } else if (strcmp(name, "value_size") == 0) {
2085 __u32 sz;
2086
2087 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2088 return -EINVAL;
2089 pr_debug("map '%s': found value_size = %u.\n",
2090 map->name, sz);
2091 if (map->def.value_size && map->def.value_size != sz) {
2092 pr_warn("map '%s': conflicting value size %u != %u.\n",
2093 map->name, map->def.value_size, sz);
2094 return -EINVAL;
2095 }
2096 map->def.value_size = sz;
2097 } else if (strcmp(name, "value") == 0) {
2098 __s64 sz;
2099
2100 t = btf__type_by_id(obj->btf, m->type);
2101 if (!t) {
2102 pr_warn("map '%s': value type [%d] not found.\n",
2103 map->name, m->type);
2104 return -EINVAL;
2105 }
2106 if (!btf_is_ptr(t)) {
2107 pr_warn("map '%s': value spec is not PTR: %s.\n",
2108 map->name, btf_kind_str(t));
2109 return -EINVAL;
2110 }
2111 sz = btf__resolve_size(obj->btf, t->type);
2112 if (sz < 0) {
2113 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2114 map->name, t->type, (ssize_t)sz);
2115 return sz;
2116 }
2117 pr_debug("map '%s': found value [%u], sz = %zd.\n",
2118 map->name, t->type, (ssize_t)sz);
2119 if (map->def.value_size && map->def.value_size != sz) {
2120 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2121 map->name, map->def.value_size, (ssize_t)sz);
2122 return -EINVAL;
2123 }
2124 map->def.value_size = sz;
2125 map->btf_value_type_id = t->type;
2126 }
2127 else if (strcmp(name, "values") == 0) {
2128 int err;
2129
2130 if (is_inner) {
2131 pr_warn("map '%s': multi-level inner maps not supported.\n",
2132 map->name);
2133 return -ENOTSUP;
2134 }
2135 if (i != vlen - 1) {
2136 pr_warn("map '%s': '%s' member should be last.\n",
2137 map->name, name);
2138 return -EINVAL;
2139 }
2140 if (!bpf_map_type__is_map_in_map(map->def.type)) {
2141 pr_warn("map '%s': should be map-in-map.\n",
2142 map->name);
2143 return -ENOTSUP;
2144 }
2145 if (map->def.value_size && map->def.value_size != 4) {
2146 pr_warn("map '%s': conflicting value size %u != 4.\n",
2147 map->name, map->def.value_size);
2148 return -EINVAL;
2149 }
2150 map->def.value_size = 4;
2151 t = btf__type_by_id(obj->btf, m->type);
2152 if (!t) {
2153 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2154 map->name, m->type);
2155 return -EINVAL;
2156 }
2157 if (!btf_is_array(t) || btf_array(t)->nelems) {
2158 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2159 map->name);
2160 return -EINVAL;
2161 }
2162 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
2163 NULL);
2164 if (!btf_is_ptr(t)) {
2165 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2166 map->name, btf_kind_str(t));
2167 return -EINVAL;
2168 }
2169 t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
2170 if (!btf_is_struct(t)) {
2171 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2172 map->name, btf_kind_str(t));
2173 return -EINVAL;
2174 }
2175
2176 map->inner_map = calloc(1, sizeof(*map->inner_map));
2177 if (!map->inner_map)
2178 return -ENOMEM;
2179 map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
2180 map->inner_map->name = malloc(strlen(map->name) +
2181 sizeof(".inner") + 1);
2182 if (!map->inner_map->name)
2183 return -ENOMEM;
2184 sprintf(map->inner_map->name, "%s.inner", map->name);
2185
2186 err = parse_btf_map_def(obj, map->inner_map, t, strict,
2187 true , NULL);
2188 if (err)
2189 return err;
2190 } else if (strcmp(name, "pinning") == 0) {
2191 __u32 val;
2192 int err;
2193
2194 if (is_inner) {
2195 pr_debug("map '%s': inner def can't be pinned.\n",
2196 map->name);
2197 return -EINVAL;
2198 }
2199 if (!get_map_field_int(map->name, obj->btf, m, &val))
2200 return -EINVAL;
2201 pr_debug("map '%s': found pinning = %u.\n",
2202 map->name, val);
2203
2204 if (val != LIBBPF_PIN_NONE &&
2205 val != LIBBPF_PIN_BY_NAME) {
2206 pr_warn("map '%s': invalid pinning value %u.\n",
2207 map->name, val);
2208 return -EINVAL;
2209 }
2210 if (val == LIBBPF_PIN_BY_NAME) {
2211 err = build_map_pin_path(map, pin_root_path);
2212 if (err) {
2213 pr_warn("map '%s': couldn't build pin path.\n",
2214 map->name);
2215 return err;
2216 }
2217 }
2218 } else {
2219 if (strict) {
2220 pr_warn("map '%s': unknown field '%s'.\n",
2221 map->name, name);
2222 return -ENOTSUP;
2223 }
2224 pr_debug("map '%s': ignoring unknown field '%s'.\n",
2225 map->name, name);
2226 }
2227 }
2228
2229 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
2230 pr_warn("map '%s': map type isn't specified.\n", map->name);
2231 return -EINVAL;
2232 }
2233
2234 return 0;
2235}
2236
2237static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2238 const struct btf_type *sec,
2239 int var_idx, int sec_idx,
2240 const Elf_Data *data, bool strict,
2241 const char *pin_root_path)
2242{
2243 const struct btf_type *var, *def;
2244 const struct btf_var_secinfo *vi;
2245 const struct btf_var *var_extra;
2246 const char *map_name;
2247 struct bpf_map *map;
2248
2249 vi = btf_var_secinfos(sec) + var_idx;
2250 var = btf__type_by_id(obj->btf, vi->type);
2251 var_extra = btf_var(var);
2252 map_name = btf__name_by_offset(obj->btf, var->name_off);
2253
2254 if (map_name == NULL || map_name[0] == '\0') {
2255 pr_warn("map #%d: empty name.\n", var_idx);
2256 return -EINVAL;
2257 }
2258 if ((__u64)vi->offset + vi->size > data->d_size) {
2259 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2260 return -EINVAL;
2261 }
2262 if (!btf_is_var(var)) {
2263 pr_warn("map '%s': unexpected var kind %s.\n",
2264 map_name, btf_kind_str(var));
2265 return -EINVAL;
2266 }
2267 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2268 var_extra->linkage != BTF_VAR_STATIC) {
2269 pr_warn("map '%s': unsupported var linkage %u.\n",
2270 map_name, var_extra->linkage);
2271 return -EOPNOTSUPP;
2272 }
2273
2274 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2275 if (!btf_is_struct(def)) {
2276 pr_warn("map '%s': unexpected def kind %s.\n",
2277 map_name, btf_kind_str(var));
2278 return -EINVAL;
2279 }
2280 if (def->size > vi->size) {
2281 pr_warn("map '%s': invalid def size.\n", map_name);
2282 return -EINVAL;
2283 }
2284
2285 map = bpf_object__add_map(obj);
2286 if (IS_ERR(map))
2287 return PTR_ERR(map);
2288 map->name = strdup(map_name);
2289 if (!map->name) {
2290 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2291 return -ENOMEM;
2292 }
2293 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2294 map->def.type = BPF_MAP_TYPE_UNSPEC;
2295 map->sec_idx = sec_idx;
2296 map->sec_offset = vi->offset;
2297 map->btf_var_idx = var_idx;
2298 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2299 map_name, map->sec_idx, map->sec_offset);
2300
2301 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
2302}
2303
2304static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2305 const char *pin_root_path)
2306{
2307 const struct btf_type *sec = NULL;
2308 int nr_types, i, vlen, err;
2309 const struct btf_type *t;
2310 const char *name;
2311 Elf_Data *data;
2312 Elf_Scn *scn;
2313
2314 if (obj->efile.btf_maps_shndx < 0)
2315 return 0;
2316
2317 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2318 data = elf_sec_data(obj, scn);
2319 if (!scn || !data) {
2320 pr_warn("elf: failed to get %s map definitions for %s\n",
2321 MAPS_ELF_SEC, obj->path);
2322 return -EINVAL;
2323 }
2324
2325 nr_types = btf__get_nr_types(obj->btf);
2326 for (i = 1; i <= nr_types; i++) {
2327 t = btf__type_by_id(obj->btf, i);
2328 if (!btf_is_datasec(t))
2329 continue;
2330 name = btf__name_by_offset(obj->btf, t->name_off);
2331 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2332 sec = t;
2333 obj->efile.btf_maps_sec_btf_id = i;
2334 break;
2335 }
2336 }
2337
2338 if (!sec) {
2339 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2340 return -ENOENT;
2341 }
2342
2343 vlen = btf_vlen(sec);
2344 for (i = 0; i < vlen; i++) {
2345 err = bpf_object__init_user_btf_map(obj, sec, i,
2346 obj->efile.btf_maps_shndx,
2347 data, strict,
2348 pin_root_path);
2349 if (err)
2350 return err;
2351 }
2352
2353 return 0;
2354}
2355
2356static int bpf_object__init_maps(struct bpf_object *obj,
2357 const struct bpf_object_open_opts *opts)
2358{
2359 const char *pin_root_path;
2360 bool strict;
2361 int err;
2362
2363 strict = !OPTS_GET(opts, relaxed_maps, false);
2364 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2365
2366 err = bpf_object__init_user_maps(obj, strict);
2367 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2368 err = err ?: bpf_object__init_global_data_maps(obj);
2369 err = err ?: bpf_object__init_kconfig_map(obj);
2370 err = err ?: bpf_object__init_struct_ops_maps(obj);
2371 if (err)
2372 return err;
2373
2374 return 0;
2375}
2376
2377static bool section_have_execinstr(struct bpf_object *obj, int idx)
2378{
2379 GElf_Shdr sh;
2380
2381 if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
2382 return false;
2383
2384 return sh.sh_flags & SHF_EXECINSTR;
2385}
2386
2387static bool btf_needs_sanitization(struct bpf_object *obj)
2388{
2389 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2390 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2391 bool has_float = kernel_supports(FEAT_BTF_FLOAT);
2392 bool has_func = kernel_supports(FEAT_BTF_FUNC);
2393
2394 return !has_func || !has_datasec || !has_func_global || !has_float;
2395}
2396
2397static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2398{
2399 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2400 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2401 bool has_float = kernel_supports(FEAT_BTF_FLOAT);
2402 bool has_func = kernel_supports(FEAT_BTF_FUNC);
2403 struct btf_type *t;
2404 int i, j, vlen;
2405
2406 for (i = 1; i <= btf__get_nr_types(btf); i++) {
2407 t = (struct btf_type *)btf__type_by_id(btf, i);
2408
2409 if (!has_datasec && btf_is_var(t)) {
2410
2411 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2412
2413
2414
2415
2416
2417 t->size = 1;
2418 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 32);
2419 } else if (!has_datasec && btf_is_datasec(t)) {
2420
2421 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2422 struct btf_member *m = btf_members(t);
2423 struct btf_type *vt;
2424 char *name;
2425
2426 name = (char *)btf__name_by_offset(btf, t->name_off);
2427 while (*name) {
2428 if (*name == '.')
2429 *name = '_';
2430 name++;
2431 }
2432
2433 vlen = btf_vlen(t);
2434 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2435 for (j = 0; j < vlen; j++, v++, m++) {
2436
2437 m->offset = v->offset * 8;
2438 m->type = v->type;
2439
2440 vt = (void *)btf__type_by_id(btf, v->type);
2441 m->name_off = vt->name_off;
2442 }
2443 } else if (!has_func && btf_is_func_proto(t)) {
2444
2445 vlen = btf_vlen(t);
2446 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2447 t->size = sizeof(__u32);
2448 } else if (!has_func && btf_is_func(t)) {
2449
2450 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2451 } else if (!has_func_global && btf_is_func(t)) {
2452
2453 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2454 } else if (!has_float && btf_is_float(t)) {
2455
2456
2457
2458
2459 t->name_off = 0;
2460 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2461 }
2462 }
2463}
2464
2465static bool libbpf_needs_btf(const struct bpf_object *obj)
2466{
2467 return obj->efile.btf_maps_shndx >= 0 ||
2468 obj->efile.st_ops_shndx >= 0 ||
2469 obj->nr_extern > 0;
2470}
2471
2472static bool kernel_needs_btf(const struct bpf_object *obj)
2473{
2474 return obj->efile.st_ops_shndx >= 0;
2475}
2476
2477static int bpf_object__init_btf(struct bpf_object *obj,
2478 Elf_Data *btf_data,
2479 Elf_Data *btf_ext_data)
2480{
2481 int err = -ENOENT;
2482
2483 if (btf_data) {
2484 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2485 if (IS_ERR(obj->btf)) {
2486 err = PTR_ERR(obj->btf);
2487 obj->btf = NULL;
2488 pr_warn("Error loading ELF section %s: %d.\n",
2489 BTF_ELF_SEC, err);
2490 goto out;
2491 }
2492
2493 btf__set_pointer_size(obj->btf, 8);
2494 err = 0;
2495 }
2496 if (btf_ext_data) {
2497 if (!obj->btf) {
2498 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2499 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2500 goto out;
2501 }
2502 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2503 btf_ext_data->d_size);
2504 if (IS_ERR(obj->btf_ext)) {
2505 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
2506 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
2507 obj->btf_ext = NULL;
2508 goto out;
2509 }
2510 }
2511out:
2512 if (err && libbpf_needs_btf(obj)) {
2513 pr_warn("BTF is required, but is missing or corrupted.\n");
2514 return err;
2515 }
2516 return 0;
2517}
2518
2519static int bpf_object__finalize_btf(struct bpf_object *obj)
2520{
2521 int err;
2522
2523 if (!obj->btf)
2524 return 0;
2525
2526 err = btf__finalize_data(obj, obj->btf);
2527 if (err) {
2528 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2529 return err;
2530 }
2531
2532 return 0;
2533}
2534
2535static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2536{
2537 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2538 prog->type == BPF_PROG_TYPE_LSM)
2539 return true;
2540
2541
2542
2543
2544 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2545 return true;
2546
2547 return false;
2548}
2549
2550static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2551{
2552 struct bpf_program *prog;
2553 int i;
2554
2555
2556 if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
2557 return true;
2558
2559
2560 for (i = 0; i < obj->nr_extern; i++) {
2561 const struct extern_desc *ext;
2562
2563 ext = &obj->externs[i];
2564 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2565 return true;
2566 }
2567
2568 bpf_object__for_each_program(prog, obj) {
2569 if (!prog->load)
2570 continue;
2571 if (prog_needs_vmlinux_btf(prog))
2572 return true;
2573 }
2574
2575 return false;
2576}
2577
2578static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2579{
2580 int err;
2581
2582
2583 if (obj->btf_vmlinux)
2584 return 0;
2585
2586 if (!force && !obj_needs_vmlinux_btf(obj))
2587 return 0;
2588
2589 obj->btf_vmlinux = libbpf_find_kernel_btf();
2590 if (IS_ERR(obj->btf_vmlinux)) {
2591 err = PTR_ERR(obj->btf_vmlinux);
2592 pr_warn("Error loading vmlinux BTF: %d\n", err);
2593 obj->btf_vmlinux = NULL;
2594 return err;
2595 }
2596 return 0;
2597}
2598
2599static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2600{
2601 struct btf *kern_btf = obj->btf;
2602 bool btf_mandatory, sanitize;
2603 int err = 0;
2604
2605 if (!obj->btf)
2606 return 0;
2607
2608 if (!kernel_supports(FEAT_BTF)) {
2609 if (kernel_needs_btf(obj)) {
2610 err = -EOPNOTSUPP;
2611 goto report;
2612 }
2613 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2614 return 0;
2615 }
2616
2617 sanitize = btf_needs_sanitization(obj);
2618 if (sanitize) {
2619 const void *raw_data;
2620 __u32 sz;
2621
2622
2623 raw_data = btf__get_raw_data(obj->btf, &sz);
2624 kern_btf = btf__new(raw_data, sz);
2625 if (IS_ERR(kern_btf))
2626 return PTR_ERR(kern_btf);
2627
2628
2629 btf__set_pointer_size(obj->btf, 8);
2630 bpf_object__sanitize_btf(obj, kern_btf);
2631 }
2632
2633 err = btf__load(kern_btf);
2634 if (sanitize) {
2635 if (!err) {
2636
2637 btf__set_fd(obj->btf, btf__fd(kern_btf));
2638 btf__set_fd(kern_btf, -1);
2639 }
2640 btf__free(kern_btf);
2641 }
2642report:
2643 if (err) {
2644 btf_mandatory = kernel_needs_btf(obj);
2645 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
2646 btf_mandatory ? "BTF is mandatory, can't proceed."
2647 : "BTF is optional, ignoring.");
2648 if (!btf_mandatory)
2649 err = 0;
2650 }
2651 return err;
2652}
2653
2654static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2655{
2656 const char *name;
2657
2658 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2659 if (!name) {
2660 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2661 off, obj->path, elf_errmsg(-1));
2662 return NULL;
2663 }
2664
2665 return name;
2666}
2667
2668static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2669{
2670 const char *name;
2671
2672 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2673 if (!name) {
2674 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2675 off, obj->path, elf_errmsg(-1));
2676 return NULL;
2677 }
2678
2679 return name;
2680}
2681
2682static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2683{
2684 Elf_Scn *scn;
2685
2686 scn = elf_getscn(obj->efile.elf, idx);
2687 if (!scn) {
2688 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
2689 idx, obj->path, elf_errmsg(-1));
2690 return NULL;
2691 }
2692 return scn;
2693}
2694
2695static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2696{
2697 Elf_Scn *scn = NULL;
2698 Elf *elf = obj->efile.elf;
2699 const char *sec_name;
2700
2701 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2702 sec_name = elf_sec_name(obj, scn);
2703 if (!sec_name)
2704 return NULL;
2705
2706 if (strcmp(sec_name, name) != 0)
2707 continue;
2708
2709 return scn;
2710 }
2711 return NULL;
2712}
2713
2714static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2715{
2716 if (!scn)
2717 return -EINVAL;
2718
2719 if (gelf_getshdr(scn, hdr) != hdr) {
2720 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
2721 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2722 return -EINVAL;
2723 }
2724
2725 return 0;
2726}
2727
2728static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2729{
2730 const char *name;
2731 GElf_Shdr sh;
2732
2733 if (!scn)
2734 return NULL;
2735
2736 if (elf_sec_hdr(obj, scn, &sh))
2737 return NULL;
2738
2739 name = elf_sec_str(obj, sh.sh_name);
2740 if (!name) {
2741 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
2742 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2743 return NULL;
2744 }
2745
2746 return name;
2747}
2748
2749static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2750{
2751 Elf_Data *data;
2752
2753 if (!scn)
2754 return NULL;
2755
2756 data = elf_getdata(scn, 0);
2757 if (!data) {
2758 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
2759 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2760 obj->path, elf_errmsg(-1));
2761 return NULL;
2762 }
2763
2764 return data;
2765}
2766
2767static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
2768 size_t off, __u32 sym_type, GElf_Sym *sym)
2769{
2770 Elf_Data *symbols = obj->efile.symbols;
2771 size_t n = symbols->d_size / sizeof(GElf_Sym);
2772 int i;
2773
2774 for (i = 0; i < n; i++) {
2775 if (!gelf_getsym(symbols, i, sym))
2776 continue;
2777 if (sym->st_shndx != sec_idx || sym->st_value != off)
2778 continue;
2779 if (GELF_ST_TYPE(sym->st_info) != sym_type)
2780 continue;
2781 return 0;
2782 }
2783
2784 return -ENOENT;
2785}
2786
2787static bool is_sec_name_dwarf(const char *name)
2788{
2789
2790 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
2791}
2792
2793static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
2794{
2795
2796 if (hdr->sh_type == SHT_STRTAB)
2797 return true;
2798
2799
2800 if (hdr->sh_type == 0x6FFF4C03 )
2801 return true;
2802
2803
2804 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
2805 strcmp(name, ".text") == 0)
2806 return true;
2807
2808
2809 if (is_sec_name_dwarf(name))
2810 return true;
2811
2812 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
2813 name += sizeof(".rel") - 1;
2814
2815 if (is_sec_name_dwarf(name))
2816 return true;
2817
2818
2819 if (strcmp(name, BTF_ELF_SEC) == 0 ||
2820 strcmp(name, BTF_EXT_ELF_SEC) == 0)
2821 return true;
2822 }
2823
2824 return false;
2825}
2826
2827static int cmp_progs(const void *_a, const void *_b)
2828{
2829 const struct bpf_program *a = _a;
2830 const struct bpf_program *b = _b;
2831
2832 if (a->sec_idx != b->sec_idx)
2833 return a->sec_idx < b->sec_idx ? -1 : 1;
2834
2835
2836 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
2837}
2838
2839static int bpf_object__elf_collect(struct bpf_object *obj)
2840{
2841 Elf *elf = obj->efile.elf;
2842 Elf_Data *btf_ext_data = NULL;
2843 Elf_Data *btf_data = NULL;
2844 int idx = 0, err = 0;
2845 const char *name;
2846 Elf_Data *data;
2847 Elf_Scn *scn;
2848 GElf_Shdr sh;
2849
2850
2851
2852
2853 scn = NULL;
2854 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2855 if (elf_sec_hdr(obj, scn, &sh))
2856 return -LIBBPF_ERRNO__FORMAT;
2857
2858 if (sh.sh_type == SHT_SYMTAB) {
2859 if (obj->efile.symbols) {
2860 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2861 return -LIBBPF_ERRNO__FORMAT;
2862 }
2863
2864 data = elf_sec_data(obj, scn);
2865 if (!data)
2866 return -LIBBPF_ERRNO__FORMAT;
2867
2868 obj->efile.symbols = data;
2869 obj->efile.symbols_shndx = elf_ndxscn(scn);
2870 obj->efile.strtabidx = sh.sh_link;
2871 }
2872 }
2873
2874 scn = NULL;
2875 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2876 idx++;
2877
2878 if (elf_sec_hdr(obj, scn, &sh))
2879 return -LIBBPF_ERRNO__FORMAT;
2880
2881 name = elf_sec_str(obj, sh.sh_name);
2882 if (!name)
2883 return -LIBBPF_ERRNO__FORMAT;
2884
2885 if (ignore_elf_section(&sh, name))
2886 continue;
2887
2888 data = elf_sec_data(obj, scn);
2889 if (!data)
2890 return -LIBBPF_ERRNO__FORMAT;
2891
2892 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
2893 idx, name, (unsigned long)data->d_size,
2894 (int)sh.sh_link, (unsigned long)sh.sh_flags,
2895 (int)sh.sh_type);
2896
2897 if (strcmp(name, "license") == 0) {
2898 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
2899 if (err)
2900 return err;
2901 } else if (strcmp(name, "version") == 0) {
2902 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
2903 if (err)
2904 return err;
2905 } else if (strcmp(name, "maps") == 0) {
2906 obj->efile.maps_shndx = idx;
2907 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
2908 obj->efile.btf_maps_shndx = idx;
2909 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
2910 btf_data = data;
2911 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
2912 btf_ext_data = data;
2913 } else if (sh.sh_type == SHT_SYMTAB) {
2914
2915 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
2916 if (sh.sh_flags & SHF_EXECINSTR) {
2917 if (strcmp(name, ".text") == 0)
2918 obj->efile.text_shndx = idx;
2919 err = bpf_object__add_programs(obj, data, name, idx);
2920 if (err)
2921 return err;
2922 } else if (strcmp(name, DATA_SEC) == 0) {
2923 obj->efile.data = data;
2924 obj->efile.data_shndx = idx;
2925 } else if (strcmp(name, RODATA_SEC) == 0) {
2926 obj->efile.rodata = data;
2927 obj->efile.rodata_shndx = idx;
2928 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
2929 obj->efile.st_ops_data = data;
2930 obj->efile.st_ops_shndx = idx;
2931 } else {
2932 pr_info("elf: skipping unrecognized data section(%d) %s\n",
2933 idx, name);
2934 }
2935 } else if (sh.sh_type == SHT_REL) {
2936 int nr_sects = obj->efile.nr_reloc_sects;
2937 void *sects = obj->efile.reloc_sects;
2938 int sec = sh.sh_info;
2939
2940
2941 if (!section_have_execinstr(obj, sec) &&
2942 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
2943 strcmp(name, ".rel" MAPS_ELF_SEC)) {
2944 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
2945 idx, name, sec,
2946 elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
2947 continue;
2948 }
2949
2950 sects = libbpf_reallocarray(sects, nr_sects + 1,
2951 sizeof(*obj->efile.reloc_sects));
2952 if (!sects)
2953 return -ENOMEM;
2954
2955 obj->efile.reloc_sects = sects;
2956 obj->efile.nr_reloc_sects++;
2957
2958 obj->efile.reloc_sects[nr_sects].shdr = sh;
2959 obj->efile.reloc_sects[nr_sects].data = data;
2960 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
2961 obj->efile.bss = data;
2962 obj->efile.bss_shndx = idx;
2963 } else {
2964 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
2965 (size_t)sh.sh_size);
2966 }
2967 }
2968
2969 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
2970 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
2971 return -LIBBPF_ERRNO__FORMAT;
2972 }
2973
2974
2975
2976 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
2977
2978 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
2979}
2980
2981static bool sym_is_extern(const GElf_Sym *sym)
2982{
2983 int bind = GELF_ST_BIND(sym->st_info);
2984
2985 return sym->st_shndx == SHN_UNDEF &&
2986 (bind == STB_GLOBAL || bind == STB_WEAK) &&
2987 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
2988}
2989
2990static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
2991{
2992 const struct btf_type *t;
2993 const char *var_name;
2994 int i, n;
2995
2996 if (!btf)
2997 return -ESRCH;
2998
2999 n = btf__get_nr_types(btf);
3000 for (i = 1; i <= n; i++) {
3001 t = btf__type_by_id(btf, i);
3002
3003 if (!btf_is_var(t))
3004 continue;
3005
3006 var_name = btf__name_by_offset(btf, t->name_off);
3007 if (strcmp(var_name, ext_name))
3008 continue;
3009
3010 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3011 return -EINVAL;
3012
3013 return i;
3014 }
3015
3016 return -ENOENT;
3017}
3018
3019static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3020 const struct btf_var_secinfo *vs;
3021 const struct btf_type *t;
3022 int i, j, n;
3023
3024 if (!btf)
3025 return -ESRCH;
3026
3027 n = btf__get_nr_types(btf);
3028 for (i = 1; i <= n; i++) {
3029 t = btf__type_by_id(btf, i);
3030
3031 if (!btf_is_datasec(t))
3032 continue;
3033
3034 vs = btf_var_secinfos(t);
3035 for (j = 0; j < btf_vlen(t); j++, vs++) {
3036 if (vs->type == ext_btf_id)
3037 return i;
3038 }
3039 }
3040
3041 return -ENOENT;
3042}
3043
3044static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3045 bool *is_signed)
3046{
3047 const struct btf_type *t;
3048 const char *name;
3049
3050 t = skip_mods_and_typedefs(btf, id, NULL);
3051 name = btf__name_by_offset(btf, t->name_off);
3052
3053 if (is_signed)
3054 *is_signed = false;
3055 switch (btf_kind(t)) {
3056 case BTF_KIND_INT: {
3057 int enc = btf_int_encoding(t);
3058
3059 if (enc & BTF_INT_BOOL)
3060 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3061 if (is_signed)
3062 *is_signed = enc & BTF_INT_SIGNED;
3063 if (t->size == 1)
3064 return KCFG_CHAR;
3065 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3066 return KCFG_UNKNOWN;
3067 return KCFG_INT;
3068 }
3069 case BTF_KIND_ENUM:
3070 if (t->size != 4)
3071 return KCFG_UNKNOWN;
3072 if (strcmp(name, "libbpf_tristate"))
3073 return KCFG_UNKNOWN;
3074 return KCFG_TRISTATE;
3075 case BTF_KIND_ARRAY:
3076 if (btf_array(t)->nelems == 0)
3077 return KCFG_UNKNOWN;
3078 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3079 return KCFG_UNKNOWN;
3080 return KCFG_CHAR_ARR;
3081 default:
3082 return KCFG_UNKNOWN;
3083 }
3084}
3085
3086static int cmp_externs(const void *_a, const void *_b)
3087{
3088 const struct extern_desc *a = _a;
3089 const struct extern_desc *b = _b;
3090
3091 if (a->type != b->type)
3092 return a->type < b->type ? -1 : 1;
3093
3094 if (a->type == EXT_KCFG) {
3095
3096 if (a->kcfg.align != b->kcfg.align)
3097 return a->kcfg.align > b->kcfg.align ? -1 : 1;
3098
3099 if (a->kcfg.sz != b->kcfg.sz)
3100 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3101 }
3102
3103
3104 return strcmp(a->name, b->name);
3105}
3106
3107static int find_int_btf_id(const struct btf *btf)
3108{
3109 const struct btf_type *t;
3110 int i, n;
3111
3112 n = btf__get_nr_types(btf);
3113 for (i = 1; i <= n; i++) {
3114 t = btf__type_by_id(btf, i);
3115
3116 if (btf_is_int(t) && btf_int_bits(t) == 32)
3117 return i;
3118 }
3119
3120 return 0;
3121}
3122
3123static int bpf_object__collect_externs(struct bpf_object *obj)
3124{
3125 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3126 const struct btf_type *t;
3127 struct extern_desc *ext;
3128 int i, n, off;
3129 const char *ext_name, *sec_name;
3130 Elf_Scn *scn;
3131 GElf_Shdr sh;
3132
3133 if (!obj->efile.symbols)
3134 return 0;
3135
3136 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3137 if (elf_sec_hdr(obj, scn, &sh))
3138 return -LIBBPF_ERRNO__FORMAT;
3139
3140 n = sh.sh_size / sh.sh_entsize;
3141 pr_debug("looking for externs among %d symbols...\n", n);
3142
3143 for (i = 0; i < n; i++) {
3144 GElf_Sym sym;
3145
3146 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3147 return -LIBBPF_ERRNO__FORMAT;
3148 if (!sym_is_extern(&sym))
3149 continue;
3150 ext_name = elf_sym_str(obj, sym.st_name);
3151 if (!ext_name || !ext_name[0])
3152 continue;
3153
3154 ext = obj->externs;
3155 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3156 if (!ext)
3157 return -ENOMEM;
3158 obj->externs = ext;
3159 ext = &ext[obj->nr_extern];
3160 memset(ext, 0, sizeof(*ext));
3161 obj->nr_extern++;
3162
3163 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3164 if (ext->btf_id <= 0) {
3165 pr_warn("failed to find BTF for extern '%s': %d\n",
3166 ext_name, ext->btf_id);
3167 return ext->btf_id;
3168 }
3169 t = btf__type_by_id(obj->btf, ext->btf_id);
3170 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3171 ext->sym_idx = i;
3172 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
3173
3174 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3175 if (ext->sec_btf_id <= 0) {
3176 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3177 ext_name, ext->btf_id, ext->sec_btf_id);
3178 return ext->sec_btf_id;
3179 }
3180 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3181 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3182
3183 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3184 kcfg_sec = sec;
3185 ext->type = EXT_KCFG;
3186 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3187 if (ext->kcfg.sz <= 0) {
3188 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3189 ext_name, ext->kcfg.sz);
3190 return ext->kcfg.sz;
3191 }
3192 ext->kcfg.align = btf__align_of(obj->btf, t->type);
3193 if (ext->kcfg.align <= 0) {
3194 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3195 ext_name, ext->kcfg.align);
3196 return -EINVAL;
3197 }
3198 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3199 &ext->kcfg.is_signed);
3200 if (ext->kcfg.type == KCFG_UNKNOWN) {
3201 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3202 return -ENOTSUP;
3203 }
3204 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3205 ksym_sec = sec;
3206 ext->type = EXT_KSYM;
3207 skip_mods_and_typedefs(obj->btf, t->type,
3208 &ext->ksym.type_id);
3209 } else {
3210 pr_warn("unrecognized extern section '%s'\n", sec_name);
3211 return -ENOTSUP;
3212 }
3213 }
3214 pr_debug("collected %d externs total\n", obj->nr_extern);
3215
3216 if (!obj->nr_extern)
3217 return 0;
3218
3219
3220 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3221
3222
3223
3224
3225
3226 if (ksym_sec) {
3227
3228
3229
3230 int int_btf_id = find_int_btf_id(obj->btf);
3231
3232 for (i = 0; i < obj->nr_extern; i++) {
3233 ext = &obj->externs[i];
3234 if (ext->type != EXT_KSYM)
3235 continue;
3236 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3237 i, ext->sym_idx, ext->name);
3238 }
3239
3240 sec = ksym_sec;
3241 n = btf_vlen(sec);
3242 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3243 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3244 struct btf_type *vt;
3245
3246 vt = (void *)btf__type_by_id(obj->btf, vs->type);
3247 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3248 ext = find_extern_by_name(obj, ext_name);
3249 if (!ext) {
3250 pr_warn("failed to find extern definition for BTF var '%s'\n",
3251 ext_name);
3252 return -ESRCH;
3253 }
3254 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3255 vt->type = int_btf_id;
3256 vs->offset = off;
3257 vs->size = sizeof(int);
3258 }
3259 sec->size = off;
3260 }
3261
3262 if (kcfg_sec) {
3263 sec = kcfg_sec;
3264
3265 off = 0;
3266 for (i = 0; i < obj->nr_extern; i++) {
3267 ext = &obj->externs[i];
3268 if (ext->type != EXT_KCFG)
3269 continue;
3270
3271 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3272 off = ext->kcfg.data_off + ext->kcfg.sz;
3273 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3274 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3275 }
3276 sec->size = off;
3277 n = btf_vlen(sec);
3278 for (i = 0; i < n; i++) {
3279 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3280
3281 t = btf__type_by_id(obj->btf, vs->type);
3282 ext_name = btf__name_by_offset(obj->btf, t->name_off);
3283 ext = find_extern_by_name(obj, ext_name);
3284 if (!ext) {
3285 pr_warn("failed to find extern definition for BTF var '%s'\n",
3286 ext_name);
3287 return -ESRCH;
3288 }
3289 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3290 vs->offset = ext->kcfg.data_off;
3291 }
3292 }
3293 return 0;
3294}
3295
3296struct bpf_program *
3297bpf_object__find_program_by_title(const struct bpf_object *obj,
3298 const char *title)
3299{
3300 struct bpf_program *pos;
3301
3302 bpf_object__for_each_program(pos, obj) {
3303 if (pos->sec_name && !strcmp(pos->sec_name, title))
3304 return pos;
3305 }
3306 return NULL;
3307}
3308
3309static bool prog_is_subprog(const struct bpf_object *obj,
3310 const struct bpf_program *prog)
3311{
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3325}
3326
3327struct bpf_program *
3328bpf_object__find_program_by_name(const struct bpf_object *obj,
3329 const char *name)
3330{
3331 struct bpf_program *prog;
3332
3333 bpf_object__for_each_program(prog, obj) {
3334 if (prog_is_subprog(obj, prog))
3335 continue;
3336 if (!strcmp(prog->name, name))
3337 return prog;
3338 }
3339 return NULL;
3340}
3341
3342static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3343 int shndx)
3344{
3345 return shndx == obj->efile.data_shndx ||
3346 shndx == obj->efile.bss_shndx ||
3347 shndx == obj->efile.rodata_shndx;
3348}
3349
3350static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3351 int shndx)
3352{
3353 return shndx == obj->efile.maps_shndx ||
3354 shndx == obj->efile.btf_maps_shndx;
3355}
3356
3357static enum libbpf_map_type
3358bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3359{
3360 if (shndx == obj->efile.data_shndx)
3361 return LIBBPF_MAP_DATA;
3362 else if (shndx == obj->efile.bss_shndx)
3363 return LIBBPF_MAP_BSS;
3364 else if (shndx == obj->efile.rodata_shndx)
3365 return LIBBPF_MAP_RODATA;
3366 else if (shndx == obj->efile.symbols_shndx)
3367 return LIBBPF_MAP_KCONFIG;
3368 else
3369 return LIBBPF_MAP_UNSPEC;
3370}
3371
3372static int bpf_program__record_reloc(struct bpf_program *prog,
3373 struct reloc_desc *reloc_desc,
3374 __u32 insn_idx, const char *sym_name,
3375 const GElf_Sym *sym, const GElf_Rel *rel)
3376{
3377 struct bpf_insn *insn = &prog->insns[insn_idx];
3378 size_t map_idx, nr_maps = prog->obj->nr_maps;
3379 struct bpf_object *obj = prog->obj;
3380 __u32 shdr_idx = sym->st_shndx;
3381 enum libbpf_map_type type;
3382 const char *sym_sec_name;
3383 struct bpf_map *map;
3384
3385 reloc_desc->processed = false;
3386
3387
3388 if (insn->code == (BPF_JMP | BPF_CALL)) {
3389 if (insn->src_reg != BPF_PSEUDO_CALL) {
3390 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3391 return -LIBBPF_ERRNO__RELOC;
3392 }
3393
3394 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3395 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3396 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3397 prog->name, sym_name, sym_sec_name);
3398 return -LIBBPF_ERRNO__RELOC;
3399 }
3400 if (sym->st_value % BPF_INSN_SZ) {
3401 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3402 prog->name, sym_name, (size_t)sym->st_value);
3403 return -LIBBPF_ERRNO__RELOC;
3404 }
3405 reloc_desc->type = RELO_CALL;
3406 reloc_desc->insn_idx = insn_idx;
3407 reloc_desc->sym_off = sym->st_value;
3408 return 0;
3409 }
3410
3411 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
3412 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3413 prog->name, sym_name, insn_idx, insn->code);
3414 return -LIBBPF_ERRNO__RELOC;
3415 }
3416
3417 if (sym_is_extern(sym)) {
3418 int sym_idx = GELF_R_SYM(rel->r_info);
3419 int i, n = obj->nr_extern;
3420 struct extern_desc *ext;
3421
3422 for (i = 0; i < n; i++) {
3423 ext = &obj->externs[i];
3424 if (ext->sym_idx == sym_idx)
3425 break;
3426 }
3427 if (i >= n) {
3428 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3429 prog->name, sym_name, sym_idx);
3430 return -LIBBPF_ERRNO__RELOC;
3431 }
3432 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3433 prog->name, i, ext->name, ext->sym_idx, insn_idx);
3434 reloc_desc->type = RELO_EXTERN;
3435 reloc_desc->insn_idx = insn_idx;
3436 reloc_desc->sym_off = i;
3437 return 0;
3438 }
3439
3440 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3441 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3442 prog->name, sym_name, shdr_idx);
3443 return -LIBBPF_ERRNO__RELOC;
3444 }
3445
3446 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3447 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3448
3449
3450 if (type == LIBBPF_MAP_UNSPEC) {
3451 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3452 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3453 prog->name, sym_name, sym_sec_name);
3454 return -LIBBPF_ERRNO__RELOC;
3455 }
3456 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3457 map = &obj->maps[map_idx];
3458 if (map->libbpf_type != type ||
3459 map->sec_idx != sym->st_shndx ||
3460 map->sec_offset != sym->st_value)
3461 continue;
3462 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
3463 prog->name, map_idx, map->name, map->sec_idx,
3464 map->sec_offset, insn_idx);
3465 break;
3466 }
3467 if (map_idx >= nr_maps) {
3468 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
3469 prog->name, sym_sec_name, (size_t)sym->st_value);
3470 return -LIBBPF_ERRNO__RELOC;
3471 }
3472 reloc_desc->type = RELO_LD64;
3473 reloc_desc->insn_idx = insn_idx;
3474 reloc_desc->map_idx = map_idx;
3475 reloc_desc->sym_off = 0;
3476 return 0;
3477 }
3478
3479
3480 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3481 pr_warn("prog '%s': bad data relo against section '%s'\n",
3482 prog->name, sym_sec_name);
3483 return -LIBBPF_ERRNO__RELOC;
3484 }
3485 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3486 map = &obj->maps[map_idx];
3487 if (map->libbpf_type != type)
3488 continue;
3489 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3490 prog->name, map_idx, map->name, map->sec_idx,
3491 map->sec_offset, insn_idx);
3492 break;
3493 }
3494 if (map_idx >= nr_maps) {
3495 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
3496 prog->name, sym_sec_name);
3497 return -LIBBPF_ERRNO__RELOC;
3498 }
3499
3500 reloc_desc->type = RELO_DATA;
3501 reloc_desc->insn_idx = insn_idx;
3502 reloc_desc->map_idx = map_idx;
3503 reloc_desc->sym_off = sym->st_value;
3504 return 0;
3505}
3506
3507static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
3508{
3509 return insn_idx >= prog->sec_insn_off &&
3510 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
3511}
3512
3513static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3514 size_t sec_idx, size_t insn_idx)
3515{
3516 int l = 0, r = obj->nr_programs - 1, m;
3517 struct bpf_program *prog;
3518
3519 while (l < r) {
3520 m = l + (r - l + 1) / 2;
3521 prog = &obj->programs[m];
3522
3523 if (prog->sec_idx < sec_idx ||
3524 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
3525 l = m;
3526 else
3527 r = m - 1;
3528 }
3529
3530
3531
3532 prog = &obj->programs[l];
3533 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
3534 return prog;
3535 return NULL;
3536}
3537
3538static int
3539bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
3540{
3541 Elf_Data *symbols = obj->efile.symbols;
3542 const char *relo_sec_name, *sec_name;
3543 size_t sec_idx = shdr->sh_info;
3544 struct bpf_program *prog;
3545 struct reloc_desc *relos;
3546 int err, i, nrels;
3547 const char *sym_name;
3548 __u32 insn_idx;
3549 GElf_Sym sym;
3550 GElf_Rel rel;
3551
3552 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3553 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
3554 if (!relo_sec_name || !sec_name)
3555 return -EINVAL;
3556
3557 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
3558 relo_sec_name, sec_idx, sec_name);
3559 nrels = shdr->sh_size / shdr->sh_entsize;
3560
3561 for (i = 0; i < nrels; i++) {
3562 if (!gelf_getrel(data, i, &rel)) {
3563 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
3564 return -LIBBPF_ERRNO__FORMAT;
3565 }
3566 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
3567 pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
3568 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3569 return -LIBBPF_ERRNO__FORMAT;
3570 }
3571 if (rel.r_offset % BPF_INSN_SZ) {
3572 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
3573 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3574 return -LIBBPF_ERRNO__FORMAT;
3575 }
3576
3577 insn_idx = rel.r_offset / BPF_INSN_SZ;
3578
3579
3580
3581
3582
3583
3584 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
3585 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3586 else
3587 sym_name = elf_sym_str(obj, sym.st_name);
3588 sym_name = sym_name ?: "<?";
3589
3590 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
3591 relo_sec_name, i, insn_idx, sym_name);
3592
3593 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3594 if (!prog) {
3595 pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
3596 relo_sec_name, i, sec_name, insn_idx);
3597 return -LIBBPF_ERRNO__RELOC;
3598 }
3599
3600 relos = libbpf_reallocarray(prog->reloc_desc,
3601 prog->nr_reloc + 1, sizeof(*relos));
3602 if (!relos)
3603 return -ENOMEM;
3604 prog->reloc_desc = relos;
3605
3606
3607 insn_idx -= prog->sec_insn_off;
3608 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
3609 insn_idx, sym_name, &sym, &rel);
3610 if (err)
3611 return err;
3612
3613 prog->nr_reloc++;
3614 }
3615 return 0;
3616}
3617
3618static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3619{
3620 struct bpf_map_def *def = &map->def;
3621 __u32 key_type_id = 0, value_type_id = 0;
3622 int ret;
3623
3624
3625
3626
3627
3628 if (map->sec_idx == obj->efile.btf_maps_shndx ||
3629 bpf_map__is_struct_ops(map))
3630 return 0;
3631
3632 if (!bpf_map__is_internal(map)) {
3633 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3634 def->value_size, &key_type_id,
3635 &value_type_id);
3636 } else {
3637
3638
3639
3640
3641 ret = btf__find_by_name(obj->btf,
3642 libbpf_type_to_btf_name[map->libbpf_type]);
3643 }
3644 if (ret < 0)
3645 return ret;
3646
3647 map->btf_key_type_id = key_type_id;
3648 map->btf_value_type_id = bpf_map__is_internal(map) ?
3649 ret : value_type_id;
3650 return 0;
3651}
3652
3653int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3654{
3655 struct bpf_map_info info = {};
3656 __u32 len = sizeof(info);
3657 int new_fd, err;
3658 char *new_name;
3659
3660 err = bpf_obj_get_info_by_fd(fd, &info, &len);
3661 if (err)
3662 return err;
3663
3664 new_name = strdup(info.name);
3665 if (!new_name)
3666 return -errno;
3667
3668 new_fd = open("/", O_RDONLY | O_CLOEXEC);
3669 if (new_fd < 0) {
3670 err = -errno;
3671 goto err_free_new_name;
3672 }
3673
3674 new_fd = dup3(fd, new_fd, O_CLOEXEC);
3675 if (new_fd < 0) {
3676 err = -errno;
3677 goto err_close_new_fd;
3678 }
3679
3680 err = zclose(map->fd);
3681 if (err) {
3682 err = -errno;
3683 goto err_close_new_fd;
3684 }
3685 free(map->name);
3686
3687 map->fd = new_fd;
3688 map->name = new_name;
3689 map->def.type = info.type;
3690 map->def.key_size = info.key_size;
3691 map->def.value_size = info.value_size;
3692 map->def.max_entries = info.max_entries;
3693 map->def.map_flags = info.map_flags;
3694 map->btf_key_type_id = info.btf_key_type_id;
3695 map->btf_value_type_id = info.btf_value_type_id;
3696 map->reused = true;
3697
3698 return 0;
3699
3700err_close_new_fd:
3701 close(new_fd);
3702err_free_new_name:
3703 free(new_name);
3704 return err;
3705}
3706
3707__u32 bpf_map__max_entries(const struct bpf_map *map)
3708{
3709 return map->def.max_entries;
3710}
3711
3712int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
3713{
3714 if (map->fd >= 0)
3715 return -EBUSY;
3716 map->def.max_entries = max_entries;
3717 return 0;
3718}
3719
3720int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3721{
3722 if (!map || !max_entries)
3723 return -EINVAL;
3724
3725 return bpf_map__set_max_entries(map, max_entries);
3726}
3727
3728static int
3729bpf_object__probe_loading(struct bpf_object *obj)
3730{
3731 struct bpf_load_program_attr attr;
3732 char *cp, errmsg[STRERR_BUFSIZE];
3733 struct bpf_insn insns[] = {
3734 BPF_MOV64_IMM(BPF_REG_0, 0),
3735 BPF_EXIT_INSN(),
3736 };
3737 int ret;
3738
3739
3740
3741 memset(&attr, 0, sizeof(attr));
3742 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3743 attr.insns = insns;
3744 attr.insns_cnt = ARRAY_SIZE(insns);
3745 attr.license = "GPL";
3746
3747 ret = bpf_load_program_xattr(&attr, NULL, 0);
3748 if (ret < 0) {
3749 ret = errno;
3750 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3751 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
3752 "program. Make sure your kernel supports BPF "
3753 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
3754 "set to big enough value.\n", __func__, cp, ret);
3755 return -ret;
3756 }
3757 close(ret);
3758
3759 return 0;
3760}
3761
3762static int probe_fd(int fd)
3763{
3764 if (fd >= 0)
3765 close(fd);
3766 return fd >= 0;
3767}
3768
3769static int probe_kern_prog_name(void)
3770{
3771 struct bpf_load_program_attr attr;
3772 struct bpf_insn insns[] = {
3773 BPF_MOV64_IMM(BPF_REG_0, 0),
3774 BPF_EXIT_INSN(),
3775 };
3776 int ret;
3777
3778
3779
3780 memset(&attr, 0, sizeof(attr));
3781 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3782 attr.insns = insns;
3783 attr.insns_cnt = ARRAY_SIZE(insns);
3784 attr.license = "GPL";
3785 attr.name = "test";
3786 ret = bpf_load_program_xattr(&attr, NULL, 0);
3787 return probe_fd(ret);
3788}
3789
3790static int probe_kern_global_data(void)
3791{
3792 struct bpf_load_program_attr prg_attr;
3793 struct bpf_create_map_attr map_attr;
3794 char *cp, errmsg[STRERR_BUFSIZE];
3795 struct bpf_insn insns[] = {
3796 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
3797 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
3798 BPF_MOV64_IMM(BPF_REG_0, 0),
3799 BPF_EXIT_INSN(),
3800 };
3801 int ret, map;
3802
3803 memset(&map_attr, 0, sizeof(map_attr));
3804 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3805 map_attr.key_size = sizeof(int);
3806 map_attr.value_size = 32;
3807 map_attr.max_entries = 1;
3808
3809 map = bpf_create_map_xattr(&map_attr);
3810 if (map < 0) {
3811 ret = -errno;
3812 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3813 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3814 __func__, cp, -ret);
3815 return ret;
3816 }
3817
3818 insns[0].imm = map;
3819
3820 memset(&prg_attr, 0, sizeof(prg_attr));
3821 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3822 prg_attr.insns = insns;
3823 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3824 prg_attr.license = "GPL";
3825
3826 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
3827 close(map);
3828 return probe_fd(ret);
3829}
3830
3831static int probe_kern_btf(void)
3832{
3833 static const char strs[] = "\0int";
3834 __u32 types[] = {
3835
3836 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3837 };
3838
3839 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3840 strs, sizeof(strs)));
3841}
3842
3843static int probe_kern_btf_func(void)
3844{
3845 static const char strs[] = "\0int\0x\0a";
3846
3847 __u32 types[] = {
3848
3849 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3850
3851 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3852 BTF_PARAM_ENC(7, 1),
3853
3854 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
3855 };
3856
3857 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3858 strs, sizeof(strs)));
3859}
3860
3861static int probe_kern_btf_func_global(void)
3862{
3863 static const char strs[] = "\0int\0x\0a";
3864
3865 __u32 types[] = {
3866
3867 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3868
3869 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3870 BTF_PARAM_ENC(7, 1),
3871
3872 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
3873 };
3874
3875 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3876 strs, sizeof(strs)));
3877}
3878
3879static int probe_kern_btf_datasec(void)
3880{
3881 static const char strs[] = "\0x\0.data";
3882
3883 __u32 types[] = {
3884
3885 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
3886
3887 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
3888 BTF_VAR_STATIC,
3889
3890 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
3891 BTF_VAR_SECINFO_ENC(2, 0, 4),
3892 };
3893
3894 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3895 strs, sizeof(strs)));
3896}
3897
3898static int probe_kern_btf_float(void)
3899{
3900 static const char strs[] = "\0float";
3901 __u32 types[] = {
3902
3903 BTF_TYPE_FLOAT_ENC(1, 4),
3904 };
3905
3906 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3907 strs, sizeof(strs)));
3908}
3909
3910static int probe_kern_array_mmap(void)
3911{
3912 struct bpf_create_map_attr attr = {
3913 .map_type = BPF_MAP_TYPE_ARRAY,
3914 .map_flags = BPF_F_MMAPABLE,
3915 .key_size = sizeof(int),
3916 .value_size = sizeof(int),
3917 .max_entries = 1,
3918 };
3919
3920 return probe_fd(bpf_create_map_xattr(&attr));
3921}
3922
3923static int probe_kern_exp_attach_type(void)
3924{
3925 struct bpf_load_program_attr attr;
3926 struct bpf_insn insns[] = {
3927 BPF_MOV64_IMM(BPF_REG_0, 0),
3928 BPF_EXIT_INSN(),
3929 };
3930
3931 memset(&attr, 0, sizeof(attr));
3932
3933
3934
3935
3936
3937 attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
3938 attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
3939 attr.insns = insns;
3940 attr.insns_cnt = ARRAY_SIZE(insns);
3941 attr.license = "GPL";
3942
3943 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3944}
3945
3946static int probe_kern_probe_read_kernel(void)
3947{
3948 struct bpf_load_program_attr attr;
3949 struct bpf_insn insns[] = {
3950 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3951 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
3952 BPF_MOV64_IMM(BPF_REG_2, 8),
3953 BPF_MOV64_IMM(BPF_REG_3, 0),
3954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
3955 BPF_EXIT_INSN(),
3956 };
3957
3958 memset(&attr, 0, sizeof(attr));
3959 attr.prog_type = BPF_PROG_TYPE_KPROBE;
3960 attr.insns = insns;
3961 attr.insns_cnt = ARRAY_SIZE(insns);
3962 attr.license = "GPL";
3963
3964 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3965}
3966
3967static int probe_prog_bind_map(void)
3968{
3969 struct bpf_load_program_attr prg_attr;
3970 struct bpf_create_map_attr map_attr;
3971 char *cp, errmsg[STRERR_BUFSIZE];
3972 struct bpf_insn insns[] = {
3973 BPF_MOV64_IMM(BPF_REG_0, 0),
3974 BPF_EXIT_INSN(),
3975 };
3976 int ret, map, prog;
3977
3978 memset(&map_attr, 0, sizeof(map_attr));
3979 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3980 map_attr.key_size = sizeof(int);
3981 map_attr.value_size = 32;
3982 map_attr.max_entries = 1;
3983
3984 map = bpf_create_map_xattr(&map_attr);
3985 if (map < 0) {
3986 ret = -errno;
3987 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3988 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3989 __func__, cp, -ret);
3990 return ret;
3991 }
3992
3993 memset(&prg_attr, 0, sizeof(prg_attr));
3994 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3995 prg_attr.insns = insns;
3996 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3997 prg_attr.license = "GPL";
3998
3999 prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
4000 if (prog < 0) {
4001 close(map);
4002 return 0;
4003 }
4004
4005 ret = bpf_prog_bind_map(prog, map, NULL);
4006
4007 close(map);
4008 close(prog);
4009
4010 return ret >= 0;
4011}
4012
4013static int probe_module_btf(void)
4014{
4015 static const char strs[] = "\0int";
4016 __u32 types[] = {
4017
4018 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4019 };
4020 struct bpf_btf_info info;
4021 __u32 len = sizeof(info);
4022 char name[16];
4023 int fd, err;
4024
4025 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4026 if (fd < 0)
4027 return 0;
4028
4029 memset(&info, 0, sizeof(info));
4030 info.name = ptr_to_u64(name);
4031 info.name_len = sizeof(name);
4032
4033
4034
4035
4036
4037 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4038 close(fd);
4039 return !err;
4040}
4041
4042enum kern_feature_result {
4043 FEAT_UNKNOWN = 0,
4044 FEAT_SUPPORTED = 1,
4045 FEAT_MISSING = 2,
4046};
4047
4048typedef int (*feature_probe_fn)(void);
4049
4050static struct kern_feature_desc {
4051 const char *desc;
4052 feature_probe_fn probe;
4053 enum kern_feature_result res;
4054} feature_probes[__FEAT_CNT] = {
4055 [FEAT_PROG_NAME] = {
4056 "BPF program name", probe_kern_prog_name,
4057 },
4058 [FEAT_GLOBAL_DATA] = {
4059 "global variables", probe_kern_global_data,
4060 },
4061 [FEAT_BTF] = {
4062 "minimal BTF", probe_kern_btf,
4063 },
4064 [FEAT_BTF_FUNC] = {
4065 "BTF functions", probe_kern_btf_func,
4066 },
4067 [FEAT_BTF_GLOBAL_FUNC] = {
4068 "BTF global function", probe_kern_btf_func_global,
4069 },
4070 [FEAT_BTF_DATASEC] = {
4071 "BTF data section and variable", probe_kern_btf_datasec,
4072 },
4073 [FEAT_ARRAY_MMAP] = {
4074 "ARRAY map mmap()", probe_kern_array_mmap,
4075 },
4076 [FEAT_EXP_ATTACH_TYPE] = {
4077 "BPF_PROG_LOAD expected_attach_type attribute",
4078 probe_kern_exp_attach_type,
4079 },
4080 [FEAT_PROBE_READ_KERN] = {
4081 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4082 },
4083 [FEAT_PROG_BIND_MAP] = {
4084 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4085 },
4086 [FEAT_MODULE_BTF] = {
4087 "module BTF support", probe_module_btf,
4088 },
4089 [FEAT_BTF_FLOAT] = {
4090 "BTF_KIND_FLOAT support", probe_kern_btf_float,
4091 },
4092};
4093
4094static bool kernel_supports(enum kern_feature_id feat_id)
4095{
4096 struct kern_feature_desc *feat = &feature_probes[feat_id];
4097 int ret;
4098
4099 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4100 ret = feat->probe();
4101 if (ret > 0) {
4102 WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4103 } else if (ret == 0) {
4104 WRITE_ONCE(feat->res, FEAT_MISSING);
4105 } else {
4106 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4107 WRITE_ONCE(feat->res, FEAT_MISSING);
4108 }
4109 }
4110
4111 return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4112}
4113
4114static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4115{
4116 struct bpf_map_info map_info = {};
4117 char msg[STRERR_BUFSIZE];
4118 __u32 map_info_len;
4119
4120 map_info_len = sizeof(map_info);
4121
4122 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
4123 pr_warn("failed to get map info for map FD %d: %s\n",
4124 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
4125 return false;
4126 }
4127
4128 return (map_info.type == map->def.type &&
4129 map_info.key_size == map->def.key_size &&
4130 map_info.value_size == map->def.value_size &&
4131 map_info.max_entries == map->def.max_entries &&
4132 map_info.map_flags == map->def.map_flags);
4133}
4134
4135static int
4136bpf_object__reuse_map(struct bpf_map *map)
4137{
4138 char *cp, errmsg[STRERR_BUFSIZE];
4139 int err, pin_fd;
4140
4141 pin_fd = bpf_obj_get(map->pin_path);
4142 if (pin_fd < 0) {
4143 err = -errno;
4144 if (err == -ENOENT) {
4145 pr_debug("found no pinned map to reuse at '%s'\n",
4146 map->pin_path);
4147 return 0;
4148 }
4149
4150 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4151 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4152 map->pin_path, cp);
4153 return err;
4154 }
4155
4156 if (!map_is_reuse_compat(map, pin_fd)) {
4157 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4158 map->pin_path);
4159 close(pin_fd);
4160 return -EINVAL;
4161 }
4162
4163 err = bpf_map__reuse_fd(map, pin_fd);
4164 if (err) {
4165 close(pin_fd);
4166 return err;
4167 }
4168 map->pinned = true;
4169 pr_debug("reused pinned map at '%s'\n", map->pin_path);
4170
4171 return 0;
4172}
4173
4174static int
4175bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4176{
4177 enum libbpf_map_type map_type = map->libbpf_type;
4178 char *cp, errmsg[STRERR_BUFSIZE];
4179 int err, zero = 0;
4180
4181 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4182 if (err) {
4183 err = -errno;
4184 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4185 pr_warn("Error setting initial map(%s) contents: %s\n",
4186 map->name, cp);
4187 return err;
4188 }
4189
4190
4191 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4192 err = bpf_map_freeze(map->fd);
4193 if (err) {
4194 err = -errno;
4195 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4196 pr_warn("Error freezing map(%s) as read-only: %s\n",
4197 map->name, cp);
4198 return err;
4199 }
4200 }
4201 return 0;
4202}
4203
4204static void bpf_map__destroy(struct bpf_map *map);
4205
4206static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
4207{
4208 struct bpf_create_map_attr create_attr;
4209 struct bpf_map_def *def = &map->def;
4210
4211 memset(&create_attr, 0, sizeof(create_attr));
4212
4213 if (kernel_supports(FEAT_PROG_NAME))
4214 create_attr.name = map->name;
4215 create_attr.map_ifindex = map->map_ifindex;
4216 create_attr.map_type = def->type;
4217 create_attr.map_flags = def->map_flags;
4218 create_attr.key_size = def->key_size;
4219 create_attr.value_size = def->value_size;
4220 create_attr.numa_node = map->numa_node;
4221
4222 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4223 int nr_cpus;
4224
4225 nr_cpus = libbpf_num_possible_cpus();
4226 if (nr_cpus < 0) {
4227 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4228 map->name, nr_cpus);
4229 return nr_cpus;
4230 }
4231 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4232 create_attr.max_entries = nr_cpus;
4233 } else {
4234 create_attr.max_entries = def->max_entries;
4235 }
4236
4237 if (bpf_map__is_struct_ops(map))
4238 create_attr.btf_vmlinux_value_type_id =
4239 map->btf_vmlinux_value_type_id;
4240
4241 create_attr.btf_fd = 0;
4242 create_attr.btf_key_type_id = 0;
4243 create_attr.btf_value_type_id = 0;
4244 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4245 create_attr.btf_fd = btf__fd(obj->btf);
4246 create_attr.btf_key_type_id = map->btf_key_type_id;
4247 create_attr.btf_value_type_id = map->btf_value_type_id;
4248 }
4249
4250 if (bpf_map_type__is_map_in_map(def->type)) {
4251 if (map->inner_map) {
4252 int err;
4253
4254 err = bpf_object__create_map(obj, map->inner_map);
4255 if (err) {
4256 pr_warn("map '%s': failed to create inner map: %d\n",
4257 map->name, err);
4258 return err;
4259 }
4260 map->inner_map_fd = bpf_map__fd(map->inner_map);
4261 }
4262 if (map->inner_map_fd >= 0)
4263 create_attr.inner_map_fd = map->inner_map_fd;
4264 }
4265
4266 map->fd = bpf_create_map_xattr(&create_attr);
4267 if (map->fd < 0 && (create_attr.btf_key_type_id ||
4268 create_attr.btf_value_type_id)) {
4269 char *cp, errmsg[STRERR_BUFSIZE];
4270 int err = -errno;
4271
4272 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4273 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4274 map->name, cp, err);
4275 create_attr.btf_fd = 0;
4276 create_attr.btf_key_type_id = 0;
4277 create_attr.btf_value_type_id = 0;
4278 map->btf_key_type_id = 0;
4279 map->btf_value_type_id = 0;
4280 map->fd = bpf_create_map_xattr(&create_attr);
4281 }
4282
4283 if (map->fd < 0)
4284 return -errno;
4285
4286 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4287 bpf_map__destroy(map->inner_map);
4288 zfree(&map->inner_map);
4289 }
4290
4291 return 0;
4292}
4293
4294static int init_map_slots(struct bpf_map *map)
4295{
4296 const struct bpf_map *targ_map;
4297 unsigned int i;
4298 int fd, err;
4299
4300 for (i = 0; i < map->init_slots_sz; i++) {
4301 if (!map->init_slots[i])
4302 continue;
4303
4304 targ_map = map->init_slots[i];
4305 fd = bpf_map__fd(targ_map);
4306 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4307 if (err) {
4308 err = -errno;
4309 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4310 map->name, i, targ_map->name,
4311 fd, err);
4312 return err;
4313 }
4314 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4315 map->name, i, targ_map->name, fd);
4316 }
4317
4318 zfree(&map->init_slots);
4319 map->init_slots_sz = 0;
4320
4321 return 0;
4322}
4323
4324static int
4325bpf_object__create_maps(struct bpf_object *obj)
4326{
4327 struct bpf_map *map;
4328 char *cp, errmsg[STRERR_BUFSIZE];
4329 unsigned int i, j;
4330 int err;
4331
4332 for (i = 0; i < obj->nr_maps; i++) {
4333 map = &obj->maps[i];
4334
4335 if (map->pin_path) {
4336 err = bpf_object__reuse_map(map);
4337 if (err) {
4338 pr_warn("map '%s': error reusing pinned map\n",
4339 map->name);
4340 goto err_out;
4341 }
4342 }
4343
4344 if (map->fd >= 0) {
4345 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
4346 map->name, map->fd);
4347 } else {
4348 err = bpf_object__create_map(obj, map);
4349 if (err)
4350 goto err_out;
4351
4352 pr_debug("map '%s': created successfully, fd=%d\n",
4353 map->name, map->fd);
4354
4355 if (bpf_map__is_internal(map)) {
4356 err = bpf_object__populate_internal_map(obj, map);
4357 if (err < 0) {
4358 zclose(map->fd);
4359 goto err_out;
4360 }
4361 }
4362
4363 if (map->init_slots_sz) {
4364 err = init_map_slots(map);
4365 if (err < 0) {
4366 zclose(map->fd);
4367 goto err_out;
4368 }
4369 }
4370 }
4371
4372 if (map->pin_path && !map->pinned) {
4373 err = bpf_map__pin(map, NULL);
4374 if (err) {
4375 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
4376 map->name, map->pin_path, err);
4377 zclose(map->fd);
4378 goto err_out;
4379 }
4380 }
4381 }
4382
4383 return 0;
4384
4385err_out:
4386 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4387 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
4388 pr_perm_msg(err);
4389 for (j = 0; j < i; j++)
4390 zclose(obj->maps[j].fd);
4391 return err;
4392}
4393
4394#define BPF_CORE_SPEC_MAX_LEN 64
4395
4396
4397struct bpf_core_accessor {
4398 __u32 type_id;
4399 __u32 idx;
4400 const char *name;
4401};
4402
4403struct bpf_core_spec {
4404 const struct btf *btf;
4405
4406 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
4407
4408 __u32 root_type_id;
4409
4410 enum bpf_core_relo_kind relo_kind;
4411
4412 int len;
4413
4414 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
4415
4416 int raw_len;
4417
4418 __u32 bit_offset;
4419};
4420
4421static bool str_is_empty(const char *s)
4422{
4423 return !s || !s[0];
4424}
4425
4426static bool is_flex_arr(const struct btf *btf,
4427 const struct bpf_core_accessor *acc,
4428 const struct btf_array *arr)
4429{
4430 const struct btf_type *t;
4431
4432
4433 if (!acc->name || arr->nelems > 0)
4434 return false;
4435
4436
4437 t = btf__type_by_id(btf, acc->type_id);
4438 return acc->idx == btf_vlen(t) - 1;
4439}
4440
4441static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
4442{
4443 switch (kind) {
4444 case BPF_FIELD_BYTE_OFFSET: return "byte_off";
4445 case BPF_FIELD_BYTE_SIZE: return "byte_sz";
4446 case BPF_FIELD_EXISTS: return "field_exists";
4447 case BPF_FIELD_SIGNED: return "signed";
4448 case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
4449 case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
4450 case BPF_TYPE_ID_LOCAL: return "local_type_id";
4451 case BPF_TYPE_ID_TARGET: return "target_type_id";
4452 case BPF_TYPE_EXISTS: return "type_exists";
4453 case BPF_TYPE_SIZE: return "type_size";
4454 case BPF_ENUMVAL_EXISTS: return "enumval_exists";
4455 case BPF_ENUMVAL_VALUE: return "enumval_value";
4456 default: return "unknown";
4457 }
4458}
4459
4460static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
4461{
4462 switch (kind) {
4463 case BPF_FIELD_BYTE_OFFSET:
4464 case BPF_FIELD_BYTE_SIZE:
4465 case BPF_FIELD_EXISTS:
4466 case BPF_FIELD_SIGNED:
4467 case BPF_FIELD_LSHIFT_U64:
4468 case BPF_FIELD_RSHIFT_U64:
4469 return true;
4470 default:
4471 return false;
4472 }
4473}
4474
4475static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
4476{
4477 switch (kind) {
4478 case BPF_TYPE_ID_LOCAL:
4479 case BPF_TYPE_ID_TARGET:
4480 case BPF_TYPE_EXISTS:
4481 case BPF_TYPE_SIZE:
4482 return true;
4483 default:
4484 return false;
4485 }
4486}
4487
4488static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
4489{
4490 switch (kind) {
4491 case BPF_ENUMVAL_EXISTS:
4492 case BPF_ENUMVAL_VALUE:
4493 return true;
4494 default:
4495 return false;
4496 }
4497}
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536static int bpf_core_parse_spec(const struct btf *btf,
4537 __u32 type_id,
4538 const char *spec_str,
4539 enum bpf_core_relo_kind relo_kind,
4540 struct bpf_core_spec *spec)
4541{
4542 int access_idx, parsed_len, i;
4543 struct bpf_core_accessor *acc;
4544 const struct btf_type *t;
4545 const char *name;
4546 __u32 id;
4547 __s64 sz;
4548
4549 if (str_is_empty(spec_str) || *spec_str == ':')
4550 return -EINVAL;
4551
4552 memset(spec, 0, sizeof(*spec));
4553 spec->btf = btf;
4554 spec->root_type_id = type_id;
4555 spec->relo_kind = relo_kind;
4556
4557
4558 if (core_relo_is_type_based(relo_kind)) {
4559 if (strcmp(spec_str, "0"))
4560 return -EINVAL;
4561 return 0;
4562 }
4563
4564
4565 while (*spec_str) {
4566 if (*spec_str == ':')
4567 ++spec_str;
4568 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
4569 return -EINVAL;
4570 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4571 return -E2BIG;
4572 spec_str += parsed_len;
4573 spec->raw_spec[spec->raw_len++] = access_idx;
4574 }
4575
4576 if (spec->raw_len == 0)
4577 return -EINVAL;
4578
4579 t = skip_mods_and_typedefs(btf, type_id, &id);
4580 if (!t)
4581 return -EINVAL;
4582
4583 access_idx = spec->raw_spec[0];
4584 acc = &spec->spec[0];
4585 acc->type_id = id;
4586 acc->idx = access_idx;
4587 spec->len++;
4588
4589 if (core_relo_is_enumval_based(relo_kind)) {
4590 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
4591 return -EINVAL;
4592
4593
4594 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
4595 return 0;
4596 }
4597
4598 if (!core_relo_is_field_based(relo_kind))
4599 return -EINVAL;
4600
4601 sz = btf__resolve_size(btf, id);
4602 if (sz < 0)
4603 return sz;
4604 spec->bit_offset = access_idx * sz * 8;
4605
4606 for (i = 1; i < spec->raw_len; i++) {
4607 t = skip_mods_and_typedefs(btf, id, &id);
4608 if (!t)
4609 return -EINVAL;
4610
4611 access_idx = spec->raw_spec[i];
4612 acc = &spec->spec[spec->len];
4613
4614 if (btf_is_composite(t)) {
4615 const struct btf_member *m;
4616 __u32 bit_offset;
4617
4618 if (access_idx >= btf_vlen(t))
4619 return -EINVAL;
4620
4621 bit_offset = btf_member_bit_offset(t, access_idx);
4622 spec->bit_offset += bit_offset;
4623
4624 m = btf_members(t) + access_idx;
4625 if (m->name_off) {
4626 name = btf__name_by_offset(btf, m->name_off);
4627 if (str_is_empty(name))
4628 return -EINVAL;
4629
4630 acc->type_id = id;
4631 acc->idx = access_idx;
4632 acc->name = name;
4633 spec->len++;
4634 }
4635
4636 id = m->type;
4637 } else if (btf_is_array(t)) {
4638 const struct btf_array *a = btf_array(t);
4639 bool flex;
4640
4641 t = skip_mods_and_typedefs(btf, a->type, &id);
4642 if (!t)
4643 return -EINVAL;
4644
4645 flex = is_flex_arr(btf, acc - 1, a);
4646 if (!flex && access_idx >= a->nelems)
4647 return -EINVAL;
4648
4649 spec->spec[spec->len].type_id = id;
4650 spec->spec[spec->len].idx = access_idx;
4651 spec->len++;
4652
4653 sz = btf__resolve_size(btf, id);
4654 if (sz < 0)
4655 return sz;
4656 spec->bit_offset += access_idx * sz * 8;
4657 } else {
4658 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
4659 type_id, spec_str, i, id, btf_kind_str(t));
4660 return -EINVAL;
4661 }
4662 }
4663
4664 return 0;
4665}
4666
4667static bool bpf_core_is_flavor_sep(const char *s)
4668{
4669
4670 return s[0] != '_' &&
4671 s[1] == '_' && s[2] == '_' && s[3] == '_' &&
4672 s[4] != '_';
4673}
4674
4675
4676
4677
4678
4679static size_t bpf_core_essential_name_len(const char *name)
4680{
4681 size_t n = strlen(name);
4682 int i;
4683
4684 for (i = n - 5; i >= 0; i--) {
4685 if (bpf_core_is_flavor_sep(name + i))
4686 return i + 1;
4687 }
4688 return n;
4689}
4690
4691struct core_cand
4692{
4693 const struct btf *btf;
4694 const struct btf_type *t;
4695 const char *name;
4696 __u32 id;
4697};
4698
4699
4700struct core_cand_list {
4701 struct core_cand *cands;
4702 int len;
4703};
4704
4705static void bpf_core_free_cands(struct core_cand_list *cands)
4706{
4707 free(cands->cands);
4708 free(cands);
4709}
4710
4711static int bpf_core_add_cands(struct core_cand *local_cand,
4712 size_t local_essent_len,
4713 const struct btf *targ_btf,
4714 const char *targ_btf_name,
4715 int targ_start_id,
4716 struct core_cand_list *cands)
4717{
4718 struct core_cand *new_cands, *cand;
4719 const struct btf_type *t;
4720 const char *targ_name;
4721 size_t targ_essent_len;
4722 int n, i;
4723
4724 n = btf__get_nr_types(targ_btf);
4725 for (i = targ_start_id; i <= n; i++) {
4726 t = btf__type_by_id(targ_btf, i);
4727 if (btf_kind(t) != btf_kind(local_cand->t))
4728 continue;
4729
4730 targ_name = btf__name_by_offset(targ_btf, t->name_off);
4731 if (str_is_empty(targ_name))
4732 continue;
4733
4734 targ_essent_len = bpf_core_essential_name_len(targ_name);
4735 if (targ_essent_len != local_essent_len)
4736 continue;
4737
4738 if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
4739 continue;
4740
4741 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
4742 local_cand->id, btf_kind_str(local_cand->t),
4743 local_cand->name, i, btf_kind_str(t), targ_name,
4744 targ_btf_name);
4745 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
4746 sizeof(*cands->cands));
4747 if (!new_cands)
4748 return -ENOMEM;
4749
4750 cand = &new_cands[cands->len];
4751 cand->btf = targ_btf;
4752 cand->t = t;
4753 cand->name = targ_name;
4754 cand->id = i;
4755
4756 cands->cands = new_cands;
4757 cands->len++;
4758 }
4759 return 0;
4760}
4761
4762static int load_module_btfs(struct bpf_object *obj)
4763{
4764 struct bpf_btf_info info;
4765 struct module_btf *mod_btf;
4766 struct btf *btf;
4767 char name[64];
4768 __u32 id = 0, len;
4769 int err, fd;
4770
4771 if (obj->btf_modules_loaded)
4772 return 0;
4773
4774
4775 obj->btf_modules_loaded = true;
4776
4777
4778 if (!kernel_supports(FEAT_MODULE_BTF))
4779 return 0;
4780
4781 while (true) {
4782 err = bpf_btf_get_next_id(id, &id);
4783 if (err && errno == ENOENT)
4784 return 0;
4785 if (err) {
4786 err = -errno;
4787 pr_warn("failed to iterate BTF objects: %d\n", err);
4788 return err;
4789 }
4790
4791 fd = bpf_btf_get_fd_by_id(id);
4792 if (fd < 0) {
4793 if (errno == ENOENT)
4794 continue;
4795 err = -errno;
4796 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
4797 return err;
4798 }
4799
4800 len = sizeof(info);
4801 memset(&info, 0, sizeof(info));
4802 info.name = ptr_to_u64(name);
4803 info.name_len = sizeof(name);
4804
4805 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4806 if (err) {
4807 err = -errno;
4808 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
4809 goto err_out;
4810 }
4811
4812
4813 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
4814 close(fd);
4815 continue;
4816 }
4817
4818 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
4819 if (IS_ERR(btf)) {
4820 pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n",
4821 name, id, PTR_ERR(btf));
4822 err = PTR_ERR(btf);
4823 goto err_out;
4824 }
4825
4826 err = btf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
4827 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
4828 if (err)
4829 goto err_out;
4830
4831 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
4832
4833 mod_btf->btf = btf;
4834 mod_btf->id = id;
4835 mod_btf->fd = fd;
4836 mod_btf->name = strdup(name);
4837 if (!mod_btf->name) {
4838 err = -ENOMEM;
4839 goto err_out;
4840 }
4841 continue;
4842
4843err_out:
4844 close(fd);
4845 return err;
4846 }
4847
4848 return 0;
4849}
4850
4851static struct core_cand_list *
4852bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
4853{
4854 struct core_cand local_cand = {};
4855 struct core_cand_list *cands;
4856 const struct btf *main_btf;
4857 size_t local_essent_len;
4858 int err, i;
4859
4860 local_cand.btf = local_btf;
4861 local_cand.t = btf__type_by_id(local_btf, local_type_id);
4862 if (!local_cand.t)
4863 return ERR_PTR(-EINVAL);
4864
4865 local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
4866 if (str_is_empty(local_cand.name))
4867 return ERR_PTR(-EINVAL);
4868 local_essent_len = bpf_core_essential_name_len(local_cand.name);
4869
4870 cands = calloc(1, sizeof(*cands));
4871 if (!cands)
4872 return ERR_PTR(-ENOMEM);
4873
4874
4875 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
4876 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
4877 if (err)
4878 goto err_out;
4879
4880
4881 if (cands->len)
4882 return cands;
4883
4884
4885 if (obj->btf_vmlinux_override)
4886 return cands;
4887
4888
4889 err = load_module_btfs(obj);
4890 if (err)
4891 goto err_out;
4892
4893 for (i = 0; i < obj->btf_module_cnt; i++) {
4894 err = bpf_core_add_cands(&local_cand, local_essent_len,
4895 obj->btf_modules[i].btf,
4896 obj->btf_modules[i].name,
4897 btf__get_nr_types(obj->btf_vmlinux) + 1,
4898 cands);
4899 if (err)
4900 goto err_out;
4901 }
4902
4903 return cands;
4904err_out:
4905 bpf_core_free_cands(cands);
4906 return ERR_PTR(err);
4907}
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926static int bpf_core_fields_are_compat(const struct btf *local_btf,
4927 __u32 local_id,
4928 const struct btf *targ_btf,
4929 __u32 targ_id)
4930{
4931 const struct btf_type *local_type, *targ_type;
4932
4933recur:
4934 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
4935 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4936 if (!local_type || !targ_type)
4937 return -EINVAL;
4938
4939 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
4940 return 1;
4941 if (btf_kind(local_type) != btf_kind(targ_type))
4942 return 0;
4943
4944 switch (btf_kind(local_type)) {
4945 case BTF_KIND_PTR:
4946 case BTF_KIND_FLOAT:
4947 return 1;
4948 case BTF_KIND_FWD:
4949 case BTF_KIND_ENUM: {
4950 const char *local_name, *targ_name;
4951 size_t local_len, targ_len;
4952
4953 local_name = btf__name_by_offset(local_btf,
4954 local_type->name_off);
4955 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
4956 local_len = bpf_core_essential_name_len(local_name);
4957 targ_len = bpf_core_essential_name_len(targ_name);
4958
4959 return local_len == 0 || targ_len == 0 ||
4960 (local_len == targ_len &&
4961 strncmp(local_name, targ_name, local_len) == 0);
4962 }
4963 case BTF_KIND_INT:
4964
4965
4966
4967 return btf_int_offset(local_type) == 0 &&
4968 btf_int_offset(targ_type) == 0;
4969 case BTF_KIND_ARRAY:
4970 local_id = btf_array(local_type)->type;
4971 targ_id = btf_array(targ_type)->type;
4972 goto recur;
4973 default:
4974 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
4975 btf_kind(local_type), local_id, targ_id);
4976 return 0;
4977 }
4978}
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996static int bpf_core_match_member(const struct btf *local_btf,
4997 const struct bpf_core_accessor *local_acc,
4998 const struct btf *targ_btf,
4999 __u32 targ_id,
5000 struct bpf_core_spec *spec,
5001 __u32 *next_targ_id)
5002{
5003 const struct btf_type *local_type, *targ_type;
5004 const struct btf_member *local_member, *m;
5005 const char *local_name, *targ_name;
5006 __u32 local_id;
5007 int i, n, found;
5008
5009 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5010 if (!targ_type)
5011 return -EINVAL;
5012 if (!btf_is_composite(targ_type))
5013 return 0;
5014
5015 local_id = local_acc->type_id;
5016 local_type = btf__type_by_id(local_btf, local_id);
5017 local_member = btf_members(local_type) + local_acc->idx;
5018 local_name = btf__name_by_offset(local_btf, local_member->name_off);
5019
5020 n = btf_vlen(targ_type);
5021 m = btf_members(targ_type);
5022 for (i = 0; i < n; i++, m++) {
5023 __u32 bit_offset;
5024
5025 bit_offset = btf_member_bit_offset(targ_type, i);
5026
5027
5028 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5029 return -E2BIG;
5030
5031
5032 spec->bit_offset += bit_offset;
5033 spec->raw_spec[spec->raw_len++] = i;
5034
5035 targ_name = btf__name_by_offset(targ_btf, m->name_off);
5036 if (str_is_empty(targ_name)) {
5037
5038 found = bpf_core_match_member(local_btf, local_acc,
5039 targ_btf, m->type,
5040 spec, next_targ_id);
5041 if (found)
5042 return found;
5043 } else if (strcmp(local_name, targ_name) == 0) {
5044
5045 struct bpf_core_accessor *targ_acc;
5046
5047 targ_acc = &spec->spec[spec->len++];
5048 targ_acc->type_id = targ_id;
5049 targ_acc->idx = i;
5050 targ_acc->name = targ_name;
5051
5052 *next_targ_id = m->type;
5053 found = bpf_core_fields_are_compat(local_btf,
5054 local_member->type,
5055 targ_btf, m->type);
5056 if (!found)
5057 spec->len--;
5058 return found;
5059 }
5060
5061 spec->bit_offset -= bit_offset;
5062 spec->raw_len--;
5063 }
5064
5065 return 0;
5066}
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5088 const struct btf *targ_btf, __u32 targ_id)
5089{
5090 const struct btf_type *local_type, *targ_type;
5091 int depth = 32;
5092
5093
5094 local_type = btf__type_by_id(local_btf, local_id);
5095 targ_type = btf__type_by_id(targ_btf, targ_id);
5096 if (btf_kind(local_type) != btf_kind(targ_type))
5097 return 0;
5098
5099recur:
5100 depth--;
5101 if (depth < 0)
5102 return -EINVAL;
5103
5104 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5105 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5106 if (!local_type || !targ_type)
5107 return -EINVAL;
5108
5109 if (btf_kind(local_type) != btf_kind(targ_type))
5110 return 0;
5111
5112 switch (btf_kind(local_type)) {
5113 case BTF_KIND_UNKN:
5114 case BTF_KIND_STRUCT:
5115 case BTF_KIND_UNION:
5116 case BTF_KIND_ENUM:
5117 case BTF_KIND_FWD:
5118 return 1;
5119 case BTF_KIND_INT:
5120
5121
5122
5123 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5124 case BTF_KIND_PTR:
5125 local_id = local_type->type;
5126 targ_id = targ_type->type;
5127 goto recur;
5128 case BTF_KIND_ARRAY:
5129 local_id = btf_array(local_type)->type;
5130 targ_id = btf_array(targ_type)->type;
5131 goto recur;
5132 case BTF_KIND_FUNC_PROTO: {
5133 struct btf_param *local_p = btf_params(local_type);
5134 struct btf_param *targ_p = btf_params(targ_type);
5135 __u16 local_vlen = btf_vlen(local_type);
5136 __u16 targ_vlen = btf_vlen(targ_type);
5137 int i, err;
5138
5139 if (local_vlen != targ_vlen)
5140 return 0;
5141
5142 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5143 skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5144 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5145 err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5146 if (err <= 0)
5147 return err;
5148 }
5149
5150
5151 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5152 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5153 goto recur;
5154 }
5155 default:
5156 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5157 btf_kind_str(local_type), local_id, targ_id);
5158 return 0;
5159 }
5160}
5161
5162
5163
5164
5165
5166static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
5167 const struct btf *targ_btf, __u32 targ_id,
5168 struct bpf_core_spec *targ_spec)
5169{
5170 const struct btf_type *targ_type;
5171 const struct bpf_core_accessor *local_acc;
5172 struct bpf_core_accessor *targ_acc;
5173 int i, sz, matched;
5174
5175 memset(targ_spec, 0, sizeof(*targ_spec));
5176 targ_spec->btf = targ_btf;
5177 targ_spec->root_type_id = targ_id;
5178 targ_spec->relo_kind = local_spec->relo_kind;
5179
5180 if (core_relo_is_type_based(local_spec->relo_kind)) {
5181 return bpf_core_types_are_compat(local_spec->btf,
5182 local_spec->root_type_id,
5183 targ_btf, targ_id);
5184 }
5185
5186 local_acc = &local_spec->spec[0];
5187 targ_acc = &targ_spec->spec[0];
5188
5189 if (core_relo_is_enumval_based(local_spec->relo_kind)) {
5190 size_t local_essent_len, targ_essent_len;
5191 const struct btf_enum *e;
5192 const char *targ_name;
5193
5194
5195 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
5196 if (!btf_is_enum(targ_type))
5197 return 0;
5198
5199 local_essent_len = bpf_core_essential_name_len(local_acc->name);
5200
5201 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
5202 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
5203 targ_essent_len = bpf_core_essential_name_len(targ_name);
5204 if (targ_essent_len != local_essent_len)
5205 continue;
5206 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
5207 targ_acc->type_id = targ_id;
5208 targ_acc->idx = i;
5209 targ_acc->name = targ_name;
5210 targ_spec->len++;
5211 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5212 targ_spec->raw_len++;
5213 return 1;
5214 }
5215 }
5216 return 0;
5217 }
5218
5219 if (!core_relo_is_field_based(local_spec->relo_kind))
5220 return -EINVAL;
5221
5222 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
5223 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
5224 &targ_id);
5225 if (!targ_type)
5226 return -EINVAL;
5227
5228 if (local_acc->name) {
5229 matched = bpf_core_match_member(local_spec->btf,
5230 local_acc,
5231 targ_btf, targ_id,
5232 targ_spec, &targ_id);
5233 if (matched <= 0)
5234 return matched;
5235 } else {
5236
5237
5238
5239
5240 if (i > 0) {
5241 const struct btf_array *a;
5242 bool flex;
5243
5244 if (!btf_is_array(targ_type))
5245 return 0;
5246
5247 a = btf_array(targ_type);
5248 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
5249 if (!flex && local_acc->idx >= a->nelems)
5250 return 0;
5251 if (!skip_mods_and_typedefs(targ_btf, a->type,
5252 &targ_id))
5253 return -EINVAL;
5254 }
5255
5256
5257 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5258 return -E2BIG;
5259
5260 targ_acc->type_id = targ_id;
5261 targ_acc->idx = local_acc->idx;
5262 targ_acc->name = NULL;
5263 targ_spec->len++;
5264 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5265 targ_spec->raw_len++;
5266
5267 sz = btf__resolve_size(targ_btf, targ_id);
5268 if (sz < 0)
5269 return sz;
5270 targ_spec->bit_offset += local_acc->idx * sz * 8;
5271 }
5272 }
5273
5274 return 1;
5275}
5276
5277static int bpf_core_calc_field_relo(const struct bpf_program *prog,
5278 const struct bpf_core_relo *relo,
5279 const struct bpf_core_spec *spec,
5280 __u32 *val, __u32 *field_sz, __u32 *type_id,
5281 bool *validate)
5282{
5283 const struct bpf_core_accessor *acc;
5284 const struct btf_type *t;
5285 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
5286 const struct btf_member *m;
5287 const struct btf_type *mt;
5288 bool bitfield;
5289 __s64 sz;
5290
5291 *field_sz = 0;
5292
5293 if (relo->kind == BPF_FIELD_EXISTS) {
5294 *val = spec ? 1 : 0;
5295 return 0;
5296 }
5297
5298 if (!spec)
5299 return -EUCLEAN;
5300
5301 acc = &spec->spec[spec->len - 1];
5302 t = btf__type_by_id(spec->btf, acc->type_id);
5303
5304
5305 if (!acc->name) {
5306 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
5307 *val = spec->bit_offset / 8;
5308
5309 sz = btf__resolve_size(spec->btf, acc->type_id);
5310 if (sz < 0)
5311 return -EINVAL;
5312 *field_sz = sz;
5313 *type_id = acc->type_id;
5314 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
5315 sz = btf__resolve_size(spec->btf, acc->type_id);
5316 if (sz < 0)
5317 return -EINVAL;
5318 *val = sz;
5319 } else {
5320 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
5321 prog->name, relo->kind, relo->insn_off / 8);
5322 return -EINVAL;
5323 }
5324 if (validate)
5325 *validate = true;
5326 return 0;
5327 }
5328
5329 m = btf_members(t) + acc->idx;
5330 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
5331 bit_off = spec->bit_offset;
5332 bit_sz = btf_member_bitfield_size(t, acc->idx);
5333
5334 bitfield = bit_sz > 0;
5335 if (bitfield) {
5336 byte_sz = mt->size;
5337 byte_off = bit_off / 8 / byte_sz * byte_sz;
5338
5339 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
5340 if (byte_sz >= 8) {
5341
5342 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
5343 prog->name, relo->kind, relo->insn_off / 8);
5344 return -E2BIG;
5345 }
5346 byte_sz *= 2;
5347 byte_off = bit_off / 8 / byte_sz * byte_sz;
5348 }
5349 } else {
5350 sz = btf__resolve_size(spec->btf, field_type_id);
5351 if (sz < 0)
5352 return -EINVAL;
5353 byte_sz = sz;
5354 byte_off = spec->bit_offset / 8;
5355 bit_sz = byte_sz * 8;
5356 }
5357
5358
5359
5360
5361
5362 if (validate)
5363 *validate = !bitfield;
5364
5365 switch (relo->kind) {
5366 case BPF_FIELD_BYTE_OFFSET:
5367 *val = byte_off;
5368 if (!bitfield) {
5369 *field_sz = byte_sz;
5370 *type_id = field_type_id;
5371 }
5372 break;
5373 case BPF_FIELD_BYTE_SIZE:
5374 *val = byte_sz;
5375 break;
5376 case BPF_FIELD_SIGNED:
5377
5378 *val = btf_is_enum(mt) ||
5379 (btf_int_encoding(mt) & BTF_INT_SIGNED);
5380 if (validate)
5381 *validate = true;
5382 break;
5383 case BPF_FIELD_LSHIFT_U64:
5384#if __BYTE_ORDER == __LITTLE_ENDIAN
5385 *val = 64 - (bit_off + bit_sz - byte_off * 8);
5386#else
5387 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
5388#endif
5389 break;
5390 case BPF_FIELD_RSHIFT_U64:
5391 *val = 64 - bit_sz;
5392 if (validate)
5393 *validate = true;
5394 break;
5395 case BPF_FIELD_EXISTS:
5396 default:
5397 return -EOPNOTSUPP;
5398 }
5399
5400 return 0;
5401}
5402
5403static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
5404 const struct bpf_core_spec *spec,
5405 __u32 *val)
5406{
5407 __s64 sz;
5408
5409
5410 if (!spec) {
5411 *val = 0;
5412 return 0;
5413 }
5414
5415 switch (relo->kind) {
5416 case BPF_TYPE_ID_TARGET:
5417 *val = spec->root_type_id;
5418 break;
5419 case BPF_TYPE_EXISTS:
5420 *val = 1;
5421 break;
5422 case BPF_TYPE_SIZE:
5423 sz = btf__resolve_size(spec->btf, spec->root_type_id);
5424 if (sz < 0)
5425 return -EINVAL;
5426 *val = sz;
5427 break;
5428 case BPF_TYPE_ID_LOCAL:
5429
5430 default:
5431 return -EOPNOTSUPP;
5432 }
5433
5434 return 0;
5435}
5436
5437static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
5438 const struct bpf_core_spec *spec,
5439 __u32 *val)
5440{
5441 const struct btf_type *t;
5442 const struct btf_enum *e;
5443
5444 switch (relo->kind) {
5445 case BPF_ENUMVAL_EXISTS:
5446 *val = spec ? 1 : 0;
5447 break;
5448 case BPF_ENUMVAL_VALUE:
5449 if (!spec)
5450 return -EUCLEAN;
5451 t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
5452 e = btf_enum(t) + spec->spec[0].idx;
5453 *val = e->val;
5454 break;
5455 default:
5456 return -EOPNOTSUPP;
5457 }
5458
5459 return 0;
5460}
5461
5462struct bpf_core_relo_res
5463{
5464
5465 __u32 orig_val;
5466
5467 __u32 new_val;
5468
5469 bool poison;
5470
5471 bool validate;
5472
5473
5474
5475
5476
5477
5478
5479
5480 bool fail_memsz_adjust;
5481 __u32 orig_sz;
5482 __u32 orig_type_id;
5483 __u32 new_sz;
5484 __u32 new_type_id;
5485};
5486
5487
5488
5489
5490
5491
5492
5493static int bpf_core_calc_relo(const struct bpf_program *prog,
5494 const struct bpf_core_relo *relo,
5495 int relo_idx,
5496 const struct bpf_core_spec *local_spec,
5497 const struct bpf_core_spec *targ_spec,
5498 struct bpf_core_relo_res *res)
5499{
5500 int err = -EOPNOTSUPP;
5501
5502 res->orig_val = 0;
5503 res->new_val = 0;
5504 res->poison = false;
5505 res->validate = true;
5506 res->fail_memsz_adjust = false;
5507 res->orig_sz = res->new_sz = 0;
5508 res->orig_type_id = res->new_type_id = 0;
5509
5510 if (core_relo_is_field_based(relo->kind)) {
5511 err = bpf_core_calc_field_relo(prog, relo, local_spec,
5512 &res->orig_val, &res->orig_sz,
5513 &res->orig_type_id, &res->validate);
5514 err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
5515 &res->new_val, &res->new_sz,
5516 &res->new_type_id, NULL);
5517 if (err)
5518 goto done;
5519
5520
5521
5522
5523 res->fail_memsz_adjust = false;
5524 if (res->orig_sz != res->new_sz) {
5525 const struct btf_type *orig_t, *new_t;
5526
5527 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
5528 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
5545 goto done;
5546 if (btf_is_int(orig_t) && btf_is_int(new_t) &&
5547 btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
5548 btf_int_encoding(new_t) != BTF_INT_SIGNED)
5549 goto done;
5550
5551
5552
5553
5554 res->fail_memsz_adjust = true;
5555 }
5556 } else if (core_relo_is_type_based(relo->kind)) {
5557 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
5558 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
5559 } else if (core_relo_is_enumval_based(relo->kind)) {
5560 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
5561 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
5562 }
5563
5564done:
5565 if (err == -EUCLEAN) {
5566
5567 res->poison = true;
5568 err = 0;
5569 } else if (err == -EOPNOTSUPP) {
5570
5571 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
5572 prog->name, relo_idx, core_relo_kind_str(relo->kind),
5573 relo->kind, relo->insn_off / 8);
5574 }
5575
5576 return err;
5577}
5578
5579
5580
5581
5582
5583static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
5584 int insn_idx, struct bpf_insn *insn)
5585{
5586 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
5587 prog->name, relo_idx, insn_idx);
5588 insn->code = BPF_JMP | BPF_CALL;
5589 insn->dst_reg = 0;
5590 insn->src_reg = 0;
5591 insn->off = 0;
5592
5593
5594
5595
5596 insn->imm = 195896080;
5597}
5598
5599static bool is_ldimm64(struct bpf_insn *insn)
5600{
5601 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
5602}
5603
5604static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
5605{
5606 switch (BPF_SIZE(insn->code)) {
5607 case BPF_DW: return 8;
5608 case BPF_W: return 4;
5609 case BPF_H: return 2;
5610 case BPF_B: return 1;
5611 default: return -1;
5612 }
5613}
5614
5615static int insn_bytes_to_bpf_size(__u32 sz)
5616{
5617 switch (sz) {
5618 case 8: return BPF_DW;
5619 case 4: return BPF_W;
5620 case 2: return BPF_H;
5621 case 1: return BPF_B;
5622 default: return -1;
5623 }
5624}
5625
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643static int bpf_core_patch_insn(struct bpf_program *prog,
5644 const struct bpf_core_relo *relo,
5645 int relo_idx,
5646 const struct bpf_core_relo_res *res)
5647{
5648 __u32 orig_val, new_val;
5649 struct bpf_insn *insn;
5650 int insn_idx;
5651 __u8 class;
5652
5653 if (relo->insn_off % BPF_INSN_SZ)
5654 return -EINVAL;
5655 insn_idx = relo->insn_off / BPF_INSN_SZ;
5656
5657
5658
5659
5660 insn_idx = insn_idx - prog->sec_insn_off;
5661 insn = &prog->insns[insn_idx];
5662 class = BPF_CLASS(insn->code);
5663
5664 if (res->poison) {
5665poison:
5666
5667
5668
5669 if (is_ldimm64(insn))
5670 bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
5671 bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
5672 return 0;
5673 }
5674
5675 orig_val = res->orig_val;
5676 new_val = res->new_val;
5677
5678 switch (class) {
5679 case BPF_ALU:
5680 case BPF_ALU64:
5681 if (BPF_SRC(insn->code) != BPF_K)
5682 return -EINVAL;
5683 if (res->validate && insn->imm != orig_val) {
5684 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
5685 prog->name, relo_idx,
5686 insn_idx, insn->imm, orig_val, new_val);
5687 return -EINVAL;
5688 }
5689 orig_val = insn->imm;
5690 insn->imm = new_val;
5691 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
5692 prog->name, relo_idx, insn_idx,
5693 orig_val, new_val);
5694 break;
5695 case BPF_LDX:
5696 case BPF_ST:
5697 case BPF_STX:
5698 if (res->validate && insn->off != orig_val) {
5699 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
5700 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
5701 return -EINVAL;
5702 }
5703 if (new_val > SHRT_MAX) {
5704 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
5705 prog->name, relo_idx, insn_idx, new_val);
5706 return -ERANGE;
5707 }
5708 if (res->fail_memsz_adjust) {
5709 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
5710 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
5711 prog->name, relo_idx, insn_idx);
5712 goto poison;
5713 }
5714
5715 orig_val = insn->off;
5716 insn->off = new_val;
5717 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
5718 prog->name, relo_idx, insn_idx, orig_val, new_val);
5719
5720 if (res->new_sz != res->orig_sz) {
5721 int insn_bytes_sz, insn_bpf_sz;
5722
5723 insn_bytes_sz = insn_bpf_size_to_bytes(insn);
5724 if (insn_bytes_sz != res->orig_sz) {
5725 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
5726 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
5727 return -EINVAL;
5728 }
5729
5730 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
5731 if (insn_bpf_sz < 0) {
5732 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
5733 prog->name, relo_idx, insn_idx, res->new_sz);
5734 return -EINVAL;
5735 }
5736
5737 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
5738 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
5739 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
5740 }
5741 break;
5742 case BPF_LD: {
5743 __u64 imm;
5744
5745 if (!is_ldimm64(insn) ||
5746 insn[0].src_reg != 0 || insn[0].off != 0 ||
5747 insn_idx + 1 >= prog->insns_cnt ||
5748 insn[1].code != 0 || insn[1].dst_reg != 0 ||
5749 insn[1].src_reg != 0 || insn[1].off != 0) {
5750 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
5751 prog->name, relo_idx, insn_idx);
5752 return -EINVAL;
5753 }
5754
5755 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
5756 if (res->validate && imm != orig_val) {
5757 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
5758 prog->name, relo_idx,
5759 insn_idx, (unsigned long long)imm,
5760 orig_val, new_val);
5761 return -EINVAL;
5762 }
5763
5764 insn[0].imm = new_val;
5765 insn[1].imm = 0;
5766 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
5767 prog->name, relo_idx, insn_idx,
5768 (unsigned long long)imm, new_val);
5769 break;
5770 }
5771 default:
5772 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
5773 prog->name, relo_idx, insn_idx, insn->code,
5774 insn->src_reg, insn->dst_reg, insn->off, insn->imm);
5775 return -EINVAL;
5776 }
5777
5778 return 0;
5779}
5780
5781
5782
5783
5784
5785static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
5786{
5787 const struct btf_type *t;
5788 const struct btf_enum *e;
5789 const char *s;
5790 __u32 type_id;
5791 int i;
5792
5793 type_id = spec->root_type_id;
5794 t = btf__type_by_id(spec->btf, type_id);
5795 s = btf__name_by_offset(spec->btf, t->name_off);
5796
5797 libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
5798
5799 if (core_relo_is_type_based(spec->relo_kind))
5800 return;
5801
5802 if (core_relo_is_enumval_based(spec->relo_kind)) {
5803 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
5804 e = btf_enum(t) + spec->raw_spec[0];
5805 s = btf__name_by_offset(spec->btf, e->name_off);
5806
5807 libbpf_print(level, "::%s = %u", s, e->val);
5808 return;
5809 }
5810
5811 if (core_relo_is_field_based(spec->relo_kind)) {
5812 for (i = 0; i < spec->len; i++) {
5813 if (spec->spec[i].name)
5814 libbpf_print(level, ".%s", spec->spec[i].name);
5815 else if (i > 0 || spec->spec[i].idx > 0)
5816 libbpf_print(level, "[%u]", spec->spec[i].idx);
5817 }
5818
5819 libbpf_print(level, " (");
5820 for (i = 0; i < spec->raw_len; i++)
5821 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
5822
5823 if (spec->bit_offset % 8)
5824 libbpf_print(level, " @ offset %u.%u)",
5825 spec->bit_offset / 8, spec->bit_offset % 8);
5826 else
5827 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
5828 return;
5829 }
5830}
5831
5832static size_t bpf_core_hash_fn(const void *key, void *ctx)
5833{
5834 return (size_t)key;
5835}
5836
5837static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5838{
5839 return k1 == k2;
5840}
5841
5842static void *u32_as_hash_key(__u32 x)
5843{
5844 return (void *)(uintptr_t)x;
5845}
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889
5890
5891
5892
5893
5894
5895
5896
5897static int bpf_core_apply_relo(struct bpf_program *prog,
5898 const struct bpf_core_relo *relo,
5899 int relo_idx,
5900 const struct btf *local_btf,
5901 struct hashmap *cand_cache)
5902{
5903 struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
5904 const void *type_key = u32_as_hash_key(relo->type_id);
5905 struct bpf_core_relo_res cand_res, targ_res;
5906 const struct btf_type *local_type;
5907 const char *local_name;
5908 struct core_cand_list *cands = NULL;
5909 __u32 local_id;
5910 const char *spec_str;
5911 int i, j, err;
5912
5913 local_id = relo->type_id;
5914 local_type = btf__type_by_id(local_btf, local_id);
5915 if (!local_type)
5916 return -EINVAL;
5917
5918 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5919 if (!local_name)
5920 return -EINVAL;
5921
5922 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
5923 if (str_is_empty(spec_str))
5924 return -EINVAL;
5925
5926 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
5927 if (err) {
5928 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
5929 prog->name, relo_idx, local_id, btf_kind_str(local_type),
5930 str_is_empty(local_name) ? "<anon>" : local_name,
5931 spec_str, err);
5932 return -EINVAL;
5933 }
5934
5935 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
5936 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5937 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
5938 libbpf_print(LIBBPF_DEBUG, "\n");
5939
5940
5941 if (relo->kind == BPF_TYPE_ID_LOCAL) {
5942 targ_res.validate = true;
5943 targ_res.poison = false;
5944 targ_res.orig_val = local_spec.root_type_id;
5945 targ_res.new_val = local_spec.root_type_id;
5946 goto patch_insn;
5947 }
5948
5949
5950 if (str_is_empty(spec_str)) {
5951 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
5952 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5953 return -EOPNOTSUPP;
5954 }
5955
5956 if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
5957 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5958 if (IS_ERR(cands)) {
5959 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5960 prog->name, relo_idx, local_id, btf_kind_str(local_type),
5961 local_name, PTR_ERR(cands));
5962 return PTR_ERR(cands);
5963 }
5964 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
5965 if (err) {
5966 bpf_core_free_cands(cands);
5967 return err;
5968 }
5969 }
5970
5971 for (i = 0, j = 0; i < cands->len; i++) {
5972 err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
5973 cands->cands[i].id, &cand_spec);
5974 if (err < 0) {
5975 pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
5976 prog->name, relo_idx, i);
5977 bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
5978 libbpf_print(LIBBPF_WARN, ": %d\n", err);
5979 return err;
5980 }
5981
5982 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
5983 relo_idx, err == 0 ? "non-matching" : "matching", i);
5984 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
5985 libbpf_print(LIBBPF_DEBUG, "\n");
5986
5987 if (err == 0)
5988 continue;
5989
5990 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
5991 if (err)
5992 return err;
5993
5994 if (j == 0) {
5995 targ_res = cand_res;
5996 targ_spec = cand_spec;
5997 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
5998
5999
6000
6001 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
6002 prog->name, relo_idx, cand_spec.bit_offset,
6003 targ_spec.bit_offset);
6004 return -EINVAL;
6005 } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
6006
6007
6008
6009
6010 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
6011 prog->name, relo_idx,
6012 cand_res.poison ? "failure" : "success", cand_res.new_val,
6013 targ_res.poison ? "failure" : "success", targ_res.new_val);
6014 return -EINVAL;
6015 }
6016
6017 cands->cands[j++] = cands->cands[i];
6018 }
6019
6020
6021
6022
6023
6024
6025
6026
6027
6028 if (j > 0)
6029 cands->len = j;
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042 if (j == 0) {
6043 pr_debug("prog '%s': relo #%d: no matching targets found\n",
6044 prog->name, relo_idx);
6045
6046
6047 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
6048 if (err)
6049 return err;
6050 }
6051
6052patch_insn:
6053
6054 err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
6055 if (err) {
6056 pr_warn("prog '%s': relo #%d: failed to patch insn #%zu: %d\n",
6057 prog->name, relo_idx, relo->insn_off / BPF_INSN_SZ, err);
6058 return -EINVAL;
6059 }
6060
6061 return 0;
6062}
6063
6064static int
6065bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
6066{
6067 const struct btf_ext_info_sec *sec;
6068 const struct bpf_core_relo *rec;
6069 const struct btf_ext_info *seg;
6070 struct hashmap_entry *entry;
6071 struct hashmap *cand_cache = NULL;
6072 struct bpf_program *prog;
6073 const char *sec_name;
6074 int i, err = 0, insn_idx, sec_idx;
6075
6076 if (obj->btf_ext->core_relo_info.len == 0)
6077 return 0;
6078
6079 if (targ_btf_path) {
6080 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6081 if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) {
6082 err = PTR_ERR(obj->btf_vmlinux_override);
6083 pr_warn("failed to parse target BTF: %d\n", err);
6084 return err;
6085 }
6086 }
6087
6088 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
6089 if (IS_ERR(cand_cache)) {
6090 err = PTR_ERR(cand_cache);
6091 goto out;
6092 }
6093
6094 seg = &obj->btf_ext->core_relo_info;
6095 for_each_btf_ext_sec(seg, sec) {
6096 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6097 if (str_is_empty(sec_name)) {
6098 err = -EINVAL;
6099 goto out;
6100 }
6101
6102
6103
6104
6105
6106
6107 prog = NULL;
6108 for (i = 0; i < obj->nr_programs; i++) {
6109 prog = &obj->programs[i];
6110 if (strcmp(prog->sec_name, sec_name) == 0)
6111 break;
6112 }
6113 if (!prog) {
6114 pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
6115 return -ENOENT;
6116 }
6117 sec_idx = prog->sec_idx;
6118
6119 pr_debug("sec '%s': found %d CO-RE relocations\n",
6120 sec_name, sec->num_info);
6121
6122 for_each_btf_ext_rec(seg, sec, i, rec) {
6123 insn_idx = rec->insn_off / BPF_INSN_SZ;
6124 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6125 if (!prog) {
6126 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
6127 sec_name, insn_idx, i);
6128 err = -EINVAL;
6129 goto out;
6130 }
6131
6132
6133
6134 if (!prog->load)
6135 continue;
6136
6137 err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
6138 if (err) {
6139 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
6140 prog->name, i, err);
6141 goto out;
6142 }
6143 }
6144 }
6145
6146out:
6147
6148 btf__free(obj->btf_vmlinux_override);
6149 obj->btf_vmlinux_override = NULL;
6150
6151 if (!IS_ERR_OR_NULL(cand_cache)) {
6152 hashmap__for_each_entry(cand_cache, entry, i) {
6153 bpf_core_free_cands(entry->value);
6154 }
6155 hashmap__free(cand_cache);
6156 }
6157 return err;
6158}
6159
6160
6161
6162
6163
6164
6165static int
6166bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6167{
6168 int i;
6169
6170 for (i = 0; i < prog->nr_reloc; i++) {
6171 struct reloc_desc *relo = &prog->reloc_desc[i];
6172 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6173 struct extern_desc *ext;
6174
6175 switch (relo->type) {
6176 case RELO_LD64:
6177 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6178 insn[0].imm = obj->maps[relo->map_idx].fd;
6179 relo->processed = true;
6180 break;
6181 case RELO_DATA:
6182 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6183 insn[1].imm = insn[0].imm + relo->sym_off;
6184 insn[0].imm = obj->maps[relo->map_idx].fd;
6185 relo->processed = true;
6186 break;
6187 case RELO_EXTERN:
6188 ext = &obj->externs[relo->sym_off];
6189 if (ext->type == EXT_KCFG) {
6190 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6191 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6192 insn[1].imm = ext->kcfg.data_off;
6193 } else {
6194 if (ext->ksym.type_id) {
6195 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
6196 insn[0].imm = ext->ksym.kernel_btf_id;
6197 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6198 } else {
6199 insn[0].imm = (__u32)ext->ksym.addr;
6200 insn[1].imm = ext->ksym.addr >> 32;
6201 }
6202 }
6203 relo->processed = true;
6204 break;
6205 case RELO_CALL:
6206
6207 break;
6208 default:
6209 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6210 prog->name, i, relo->type);
6211 return -EINVAL;
6212 }
6213 }
6214
6215 return 0;
6216}
6217
6218static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6219 const struct bpf_program *prog,
6220 const struct btf_ext_info *ext_info,
6221 void **prog_info, __u32 *prog_rec_cnt,
6222 __u32 *prog_rec_sz)
6223{
6224 void *copy_start = NULL, *copy_end = NULL;
6225 void *rec, *rec_end, *new_prog_info;
6226 const struct btf_ext_info_sec *sec;
6227 size_t old_sz, new_sz;
6228 const char *sec_name;
6229 int i, off_adj;
6230
6231 for_each_btf_ext_sec(ext_info, sec) {
6232 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6233 if (!sec_name)
6234 return -EINVAL;
6235 if (strcmp(sec_name, prog->sec_name) != 0)
6236 continue;
6237
6238 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6239 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6240
6241 if (insn_off < prog->sec_insn_off)
6242 continue;
6243 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6244 break;
6245
6246 if (!copy_start)
6247 copy_start = rec;
6248 copy_end = rec + ext_info->rec_size;
6249 }
6250
6251 if (!copy_start)
6252 return -ENOENT;
6253
6254
6255
6256
6257 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6258 new_sz = old_sz + (copy_end - copy_start);
6259 new_prog_info = realloc(*prog_info, new_sz);
6260 if (!new_prog_info)
6261 return -ENOMEM;
6262 *prog_info = new_prog_info;
6263 *prog_rec_cnt = new_sz / ext_info->rec_size;
6264 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6265
6266
6267
6268
6269
6270
6271
6272 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6273 rec = new_prog_info + old_sz;
6274 rec_end = new_prog_info + new_sz;
6275 for (; rec < rec_end; rec += ext_info->rec_size) {
6276 __u32 *insn_off = rec;
6277
6278 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6279 }
6280 *prog_rec_sz = ext_info->rec_size;
6281 return 0;
6282 }
6283
6284 return -ENOENT;
6285}
6286
6287static int
6288reloc_prog_func_and_line_info(const struct bpf_object *obj,
6289 struct bpf_program *main_prog,
6290 const struct bpf_program *prog)
6291{
6292 int err;
6293
6294
6295
6296
6297 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
6298 return 0;
6299
6300
6301
6302
6303 if (main_prog != prog && !main_prog->func_info)
6304 goto line_info;
6305
6306 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6307 &main_prog->func_info,
6308 &main_prog->func_info_cnt,
6309 &main_prog->func_info_rec_size);
6310 if (err) {
6311 if (err != -ENOENT) {
6312 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6313 prog->name, err);
6314 return err;
6315 }
6316 if (main_prog->func_info) {
6317
6318
6319
6320
6321 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6322 return err;
6323 }
6324
6325 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6326 prog->name);
6327 }
6328
6329line_info:
6330
6331 if (main_prog != prog && !main_prog->line_info)
6332 return 0;
6333
6334 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6335 &main_prog->line_info,
6336 &main_prog->line_info_cnt,
6337 &main_prog->line_info_rec_size);
6338 if (err) {
6339 if (err != -ENOENT) {
6340 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6341 prog->name, err);
6342 return err;
6343 }
6344 if (main_prog->line_info) {
6345
6346
6347
6348
6349 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6350 return err;
6351 }
6352
6353 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6354 prog->name);
6355 }
6356 return 0;
6357}
6358
6359static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6360{
6361 size_t insn_idx = *(const size_t *)key;
6362 const struct reloc_desc *relo = elem;
6363
6364 if (insn_idx == relo->insn_idx)
6365 return 0;
6366 return insn_idx < relo->insn_idx ? -1 : 1;
6367}
6368
6369static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6370{
6371 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6372 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6373}
6374
6375static int
6376bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6377 struct bpf_program *prog)
6378{
6379 size_t sub_insn_idx, insn_idx, new_cnt;
6380 struct bpf_program *subprog;
6381 struct bpf_insn *insns, *insn;
6382 struct reloc_desc *relo;
6383 int err;
6384
6385 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6386 if (err)
6387 return err;
6388
6389 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6390 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6391 if (!insn_is_subprog_call(insn))
6392 continue;
6393
6394 relo = find_prog_insn_relo(prog, insn_idx);
6395 if (relo && relo->type != RELO_CALL) {
6396 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6397 prog->name, insn_idx, relo->type);
6398 return -LIBBPF_ERRNO__RELOC;
6399 }
6400 if (relo) {
6401
6402
6403
6404
6405
6406
6407
6408 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6409 } else {
6410
6411
6412
6413
6414
6415
6416 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6417 }
6418
6419
6420 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6421 if (!subprog) {
6422 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6423 prog->name);
6424 return -LIBBPF_ERRNO__RELOC;
6425 }
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437 if (subprog->sub_insn_off == 0) {
6438 subprog->sub_insn_off = main_prog->insns_cnt;
6439
6440 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6441 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6442 if (!insns) {
6443 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6444 return -ENOMEM;
6445 }
6446 main_prog->insns = insns;
6447 main_prog->insns_cnt = new_cnt;
6448
6449 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6450 subprog->insns_cnt * sizeof(*insns));
6451
6452 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6453 main_prog->name, subprog->insns_cnt, subprog->name);
6454
6455 err = bpf_object__reloc_code(obj, main_prog, subprog);
6456 if (err)
6457 return err;
6458 }
6459
6460
6461
6462
6463 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6464
6465
6466
6467
6468
6469 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6470
6471 if (relo)
6472 relo->processed = true;
6473
6474 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6475 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6476 }
6477
6478 return 0;
6479}
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558
6559
6560
6561
6562static int
6563bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6564{
6565 struct bpf_program *subprog;
6566 int i, j, err;
6567
6568
6569
6570
6571 for (i = 0; i < obj->nr_programs; i++) {
6572 subprog = &obj->programs[i];
6573 if (!prog_is_subprog(obj, subprog))
6574 continue;
6575
6576 subprog->sub_insn_off = 0;
6577 for (j = 0; j < subprog->nr_reloc; j++)
6578 if (subprog->reloc_desc[j].type == RELO_CALL)
6579 subprog->reloc_desc[j].processed = false;
6580 }
6581
6582 err = bpf_object__reloc_code(obj, prog, prog);
6583 if (err)
6584 return err;
6585
6586
6587 return 0;
6588}
6589
6590static int
6591bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6592{
6593 struct bpf_program *prog;
6594 size_t i;
6595 int err;
6596
6597 if (obj->btf_ext) {
6598 err = bpf_object__relocate_core(obj, targ_btf_path);
6599 if (err) {
6600 pr_warn("failed to perform CO-RE relocations: %d\n",
6601 err);
6602 return err;
6603 }
6604 }
6605
6606
6607
6608
6609 for (i = 0; i < obj->nr_programs; i++) {
6610 prog = &obj->programs[i];
6611 err = bpf_object__relocate_data(obj, prog);
6612 if (err) {
6613 pr_warn("prog '%s': failed to relocate data references: %d\n",
6614 prog->name, err);
6615 return err;
6616 }
6617 }
6618
6619
6620
6621
6622
6623 for (i = 0; i < obj->nr_programs; i++) {
6624 prog = &obj->programs[i];
6625
6626
6627
6628 if (prog_is_subprog(obj, prog))
6629 continue;
6630
6631 err = bpf_object__relocate_calls(obj, prog);
6632 if (err) {
6633 pr_warn("prog '%s': failed to relocate calls: %d\n",
6634 prog->name, err);
6635 return err;
6636 }
6637 }
6638
6639 for (i = 0; i < obj->nr_programs; i++) {
6640 prog = &obj->programs[i];
6641 zfree(&prog->reloc_desc);
6642 prog->nr_reloc = 0;
6643 }
6644 return 0;
6645}
6646
6647static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6648 GElf_Shdr *shdr, Elf_Data *data);
6649
6650static int bpf_object__collect_map_relos(struct bpf_object *obj,
6651 GElf_Shdr *shdr, Elf_Data *data)
6652{
6653 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6654 int i, j, nrels, new_sz;
6655 const struct btf_var_secinfo *vi = NULL;
6656 const struct btf_type *sec, *var, *def;
6657 struct bpf_map *map = NULL, *targ_map;
6658 const struct btf_member *member;
6659 const char *name, *mname;
6660 Elf_Data *symbols;
6661 unsigned int moff;
6662 GElf_Sym sym;
6663 GElf_Rel rel;
6664 void *tmp;
6665
6666 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6667 return -EINVAL;
6668 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6669 if (!sec)
6670 return -EINVAL;
6671
6672 symbols = obj->efile.symbols;
6673 nrels = shdr->sh_size / shdr->sh_entsize;
6674 for (i = 0; i < nrels; i++) {
6675 if (!gelf_getrel(data, i, &rel)) {
6676 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6677 return -LIBBPF_ERRNO__FORMAT;
6678 }
6679 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
6680 pr_warn(".maps relo #%d: symbol %zx not found\n",
6681 i, (size_t)GELF_R_SYM(rel.r_info));
6682 return -LIBBPF_ERRNO__FORMAT;
6683 }
6684 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
6685 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
6686 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6687 i, name);
6688 return -LIBBPF_ERRNO__RELOC;
6689 }
6690
6691 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
6692 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
6693 (size_t)rel.r_offset, sym.st_name, name);
6694
6695 for (j = 0; j < obj->nr_maps; j++) {
6696 map = &obj->maps[j];
6697 if (map->sec_idx != obj->efile.btf_maps_shndx)
6698 continue;
6699
6700 vi = btf_var_secinfos(sec) + map->btf_var_idx;
6701 if (vi->offset <= rel.r_offset &&
6702 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6703 break;
6704 }
6705 if (j == obj->nr_maps) {
6706 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
6707 i, name, (size_t)rel.r_offset);
6708 return -EINVAL;
6709 }
6710
6711 if (!bpf_map_type__is_map_in_map(map->def.type))
6712 return -EINVAL;
6713 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6714 map->def.key_size != sizeof(int)) {
6715 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6716 i, map->name, sizeof(int));
6717 return -EINVAL;
6718 }
6719
6720 targ_map = bpf_object__find_map_by_name(obj, name);
6721 if (!targ_map)
6722 return -ESRCH;
6723
6724 var = btf__type_by_id(obj->btf, vi->type);
6725 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6726 if (btf_vlen(def) == 0)
6727 return -EINVAL;
6728 member = btf_members(def) + btf_vlen(def) - 1;
6729 mname = btf__name_by_offset(obj->btf, member->name_off);
6730 if (strcmp(mname, "values"))
6731 return -EINVAL;
6732
6733 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6734 if (rel.r_offset - vi->offset < moff)
6735 return -EINVAL;
6736
6737 moff = rel.r_offset - vi->offset - moff;
6738
6739
6740
6741 if (moff % bpf_ptr_sz)
6742 return -EINVAL;
6743 moff /= bpf_ptr_sz;
6744 if (moff >= map->init_slots_sz) {
6745 new_sz = moff + 1;
6746 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6747 if (!tmp)
6748 return -ENOMEM;
6749 map->init_slots = tmp;
6750 memset(map->init_slots + map->init_slots_sz, 0,
6751 (new_sz - map->init_slots_sz) * host_ptr_sz);
6752 map->init_slots_sz = new_sz;
6753 }
6754 map->init_slots[moff] = targ_map;
6755
6756 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
6757 i, map->name, moff, name);
6758 }
6759
6760 return 0;
6761}
6762
6763static int cmp_relocs(const void *_a, const void *_b)
6764{
6765 const struct reloc_desc *a = _a;
6766 const struct reloc_desc *b = _b;
6767
6768 if (a->insn_idx != b->insn_idx)
6769 return a->insn_idx < b->insn_idx ? -1 : 1;
6770
6771
6772 if (a->type != b->type)
6773 return a->type < b->type ? -1 : 1;
6774
6775 return 0;
6776}
6777
6778static int bpf_object__collect_relos(struct bpf_object *obj)
6779{
6780 int i, err;
6781
6782 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
6783 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
6784 Elf_Data *data = obj->efile.reloc_sects[i].data;
6785 int idx = shdr->sh_info;
6786
6787 if (shdr->sh_type != SHT_REL) {
6788 pr_warn("internal error at %d\n", __LINE__);
6789 return -LIBBPF_ERRNO__INTERNAL;
6790 }
6791
6792 if (idx == obj->efile.st_ops_shndx)
6793 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6794 else if (idx == obj->efile.btf_maps_shndx)
6795 err = bpf_object__collect_map_relos(obj, shdr, data);
6796 else
6797 err = bpf_object__collect_prog_relos(obj, shdr, data);
6798 if (err)
6799 return err;
6800 }
6801
6802 for (i = 0; i < obj->nr_programs; i++) {
6803 struct bpf_program *p = &obj->programs[i];
6804
6805 if (!p->nr_reloc)
6806 continue;
6807
6808 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6809 }
6810 return 0;
6811}
6812
6813static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6814{
6815 if (BPF_CLASS(insn->code) == BPF_JMP &&
6816 BPF_OP(insn->code) == BPF_CALL &&
6817 BPF_SRC(insn->code) == BPF_K &&
6818 insn->src_reg == 0 &&
6819 insn->dst_reg == 0) {
6820 *func_id = insn->imm;
6821 return true;
6822 }
6823 return false;
6824}
6825
6826static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
6827{
6828 struct bpf_insn *insn = prog->insns;
6829 enum bpf_func_id func_id;
6830 int i;
6831
6832 for (i = 0; i < prog->insns_cnt; i++, insn++) {
6833 if (!insn_is_helper_call(insn, &func_id))
6834 continue;
6835
6836
6837
6838
6839
6840 switch (func_id) {
6841 case BPF_FUNC_probe_read_kernel:
6842 case BPF_FUNC_probe_read_user:
6843 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6844 insn->imm = BPF_FUNC_probe_read;
6845 break;
6846 case BPF_FUNC_probe_read_kernel_str:
6847 case BPF_FUNC_probe_read_user_str:
6848 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6849 insn->imm = BPF_FUNC_probe_read_str;
6850 break;
6851 default:
6852 break;
6853 }
6854 }
6855 return 0;
6856}
6857
6858static int
6859load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
6860 char *license, __u32 kern_version, int *pfd)
6861{
6862 struct bpf_prog_load_params load_attr = {};
6863 char *cp, errmsg[STRERR_BUFSIZE];
6864 size_t log_buf_size = 0;
6865 char *log_buf = NULL;
6866 int btf_fd, ret;
6867
6868 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6869
6870
6871
6872
6873 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6874 prog->name, prog->sec_name);
6875 return -EINVAL;
6876 }
6877
6878 if (!insns || !insns_cnt)
6879 return -EINVAL;
6880
6881 load_attr.prog_type = prog->type;
6882
6883 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
6884 prog->sec_def->is_exp_attach_type_optional)
6885 load_attr.expected_attach_type = 0;
6886 else
6887 load_attr.expected_attach_type = prog->expected_attach_type;
6888 if (kernel_supports(FEAT_PROG_NAME))
6889 load_attr.name = prog->name;
6890 load_attr.insns = insns;
6891 load_attr.insn_cnt = insns_cnt;
6892 load_attr.license = license;
6893 load_attr.attach_btf_id = prog->attach_btf_id;
6894 if (prog->attach_prog_fd)
6895 load_attr.attach_prog_fd = prog->attach_prog_fd;
6896 else
6897 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6898 load_attr.attach_btf_id = prog->attach_btf_id;
6899 load_attr.kern_version = kern_version;
6900 load_attr.prog_ifindex = prog->prog_ifindex;
6901
6902
6903 btf_fd = bpf_object__btf_fd(prog->obj);
6904 if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
6905 load_attr.prog_btf_fd = btf_fd;
6906 load_attr.func_info = prog->func_info;
6907 load_attr.func_info_rec_size = prog->func_info_rec_size;
6908 load_attr.func_info_cnt = prog->func_info_cnt;
6909 load_attr.line_info = prog->line_info;
6910 load_attr.line_info_rec_size = prog->line_info_rec_size;
6911 load_attr.line_info_cnt = prog->line_info_cnt;
6912 }
6913 load_attr.log_level = prog->log_level;
6914 load_attr.prog_flags = prog->prog_flags;
6915
6916retry_load:
6917 if (log_buf_size) {
6918 log_buf = malloc(log_buf_size);
6919 if (!log_buf)
6920 return -ENOMEM;
6921
6922 *log_buf = 0;
6923 }
6924
6925 load_attr.log_buf = log_buf;
6926 load_attr.log_buf_sz = log_buf_size;
6927 ret = libbpf__bpf_prog_load(&load_attr);
6928
6929 if (ret >= 0) {
6930 if (log_buf && load_attr.log_level)
6931 pr_debug("verifier log:\n%s", log_buf);
6932
6933 if (prog->obj->rodata_map_idx >= 0 &&
6934 kernel_supports(FEAT_PROG_BIND_MAP)) {
6935 struct bpf_map *rodata_map =
6936 &prog->obj->maps[prog->obj->rodata_map_idx];
6937
6938 if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
6939 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6940 pr_warn("prog '%s': failed to bind .rodata map: %s\n",
6941 prog->name, cp);
6942
6943 }
6944 }
6945
6946 *pfd = ret;
6947 ret = 0;
6948 goto out;
6949 }
6950
6951 if (!log_buf || errno == ENOSPC) {
6952 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
6953 log_buf_size << 1);
6954
6955 free(log_buf);
6956 goto retry_load;
6957 }
6958 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
6959 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6960 pr_warn("load bpf program failed: %s\n", cp);
6961 pr_perm_msg(ret);
6962
6963 if (log_buf && log_buf[0] != '\0') {
6964 ret = -LIBBPF_ERRNO__VERIFY;
6965 pr_warn("-- BEGIN DUMP LOG ---\n");
6966 pr_warn("\n%s\n", log_buf);
6967 pr_warn("-- END LOG --\n");
6968 } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
6969 pr_warn("Program too large (%zu insns), at most %d insns\n",
6970 load_attr.insn_cnt, BPF_MAXINSNS);
6971 ret = -LIBBPF_ERRNO__PROG2BIG;
6972 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
6973
6974 int fd;
6975
6976 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
6977 load_attr.expected_attach_type = 0;
6978 load_attr.log_buf = NULL;
6979 load_attr.log_buf_sz = 0;
6980 fd = libbpf__bpf_prog_load(&load_attr);
6981 if (fd >= 0) {
6982 close(fd);
6983 ret = -LIBBPF_ERRNO__PROGTYPE;
6984 goto out;
6985 }
6986 }
6987
6988out:
6989 free(log_buf);
6990 return ret;
6991}
6992
6993static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
6994
6995int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
6996{
6997 int err = 0, fd, i;
6998
6999 if (prog->obj->loaded) {
7000 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
7001 return -EINVAL;
7002 }
7003
7004 if ((prog->type == BPF_PROG_TYPE_TRACING ||
7005 prog->type == BPF_PROG_TYPE_LSM ||
7006 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
7007 int btf_obj_fd = 0, btf_type_id = 0;
7008
7009 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
7010 if (err)
7011 return err;
7012
7013 prog->attach_btf_obj_fd = btf_obj_fd;
7014 prog->attach_btf_id = btf_type_id;
7015 }
7016
7017 if (prog->instances.nr < 0 || !prog->instances.fds) {
7018 if (prog->preprocessor) {
7019 pr_warn("Internal error: can't load program '%s'\n",
7020 prog->name);
7021 return -LIBBPF_ERRNO__INTERNAL;
7022 }
7023
7024 prog->instances.fds = malloc(sizeof(int));
7025 if (!prog->instances.fds) {
7026 pr_warn("Not enough memory for BPF fds\n");
7027 return -ENOMEM;
7028 }
7029 prog->instances.nr = 1;
7030 prog->instances.fds[0] = -1;
7031 }
7032
7033 if (!prog->preprocessor) {
7034 if (prog->instances.nr != 1) {
7035 pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
7036 prog->name, prog->instances.nr);
7037 }
7038 err = load_program(prog, prog->insns, prog->insns_cnt,
7039 license, kern_ver, &fd);
7040 if (!err)
7041 prog->instances.fds[0] = fd;
7042 goto out;
7043 }
7044
7045 for (i = 0; i < prog->instances.nr; i++) {
7046 struct bpf_prog_prep_result result;
7047 bpf_program_prep_t preprocessor = prog->preprocessor;
7048
7049 memset(&result, 0, sizeof(result));
7050 err = preprocessor(prog, i, prog->insns,
7051 prog->insns_cnt, &result);
7052 if (err) {
7053 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
7054 i, prog->name);
7055 goto out;
7056 }
7057
7058 if (!result.new_insn_ptr || !result.new_insn_cnt) {
7059 pr_debug("Skip loading the %dth instance of program '%s'\n",
7060 i, prog->name);
7061 prog->instances.fds[i] = -1;
7062 if (result.pfd)
7063 *result.pfd = -1;
7064 continue;
7065 }
7066
7067 err = load_program(prog, result.new_insn_ptr,
7068 result.new_insn_cnt, license, kern_ver, &fd);
7069 if (err) {
7070 pr_warn("Loading the %dth instance of program '%s' failed\n",
7071 i, prog->name);
7072 goto out;
7073 }
7074
7075 if (result.pfd)
7076 *result.pfd = fd;
7077 prog->instances.fds[i] = fd;
7078 }
7079out:
7080 if (err)
7081 pr_warn("failed to load program '%s'\n", prog->name);
7082 zfree(&prog->insns);
7083 prog->insns_cnt = 0;
7084 return err;
7085}
7086
7087static int
7088bpf_object__load_progs(struct bpf_object *obj, int log_level)
7089{
7090 struct bpf_program *prog;
7091 size_t i;
7092 int err;
7093
7094 for (i = 0; i < obj->nr_programs; i++) {
7095 prog = &obj->programs[i];
7096 err = bpf_object__sanitize_prog(obj, prog);
7097 if (err)
7098 return err;
7099 }
7100
7101 for (i = 0; i < obj->nr_programs; i++) {
7102 prog = &obj->programs[i];
7103 if (prog_is_subprog(obj, prog))
7104 continue;
7105 if (!prog->load) {
7106 pr_debug("prog '%s': skipped loading\n", prog->name);
7107 continue;
7108 }
7109 prog->log_level |= log_level;
7110 err = bpf_program__load(prog, obj->license, obj->kern_version);
7111 if (err)
7112 return err;
7113 }
7114 return 0;
7115}
7116
7117static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7118
7119static struct bpf_object *
7120__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7121 const struct bpf_object_open_opts *opts)
7122{
7123 const char *obj_name, *kconfig;
7124 struct bpf_program *prog;
7125 struct bpf_object *obj;
7126 char tmp_name[64];
7127 int err;
7128
7129 if (elf_version(EV_CURRENT) == EV_NONE) {
7130 pr_warn("failed to init libelf for %s\n",
7131 path ? : "(mem buf)");
7132 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7133 }
7134
7135 if (!OPTS_VALID(opts, bpf_object_open_opts))
7136 return ERR_PTR(-EINVAL);
7137
7138 obj_name = OPTS_GET(opts, object_name, NULL);
7139 if (obj_buf) {
7140 if (!obj_name) {
7141 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7142 (unsigned long)obj_buf,
7143 (unsigned long)obj_buf_sz);
7144 obj_name = tmp_name;
7145 }
7146 path = obj_name;
7147 pr_debug("loading object '%s' from buffer\n", obj_name);
7148 }
7149
7150 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7151 if (IS_ERR(obj))
7152 return obj;
7153
7154 kconfig = OPTS_GET(opts, kconfig, NULL);
7155 if (kconfig) {
7156 obj->kconfig = strdup(kconfig);
7157 if (!obj->kconfig)
7158 return ERR_PTR(-ENOMEM);
7159 }
7160
7161 err = bpf_object__elf_init(obj);
7162 err = err ? : bpf_object__check_endianness(obj);
7163 err = err ? : bpf_object__elf_collect(obj);
7164 err = err ? : bpf_object__collect_externs(obj);
7165 err = err ? : bpf_object__finalize_btf(obj);
7166 err = err ? : bpf_object__init_maps(obj, opts);
7167 err = err ? : bpf_object__collect_relos(obj);
7168 if (err)
7169 goto out;
7170 bpf_object__elf_finish(obj);
7171
7172 bpf_object__for_each_program(prog, obj) {
7173 prog->sec_def = find_sec_def(prog->sec_name);
7174 if (!prog->sec_def) {
7175
7176 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7177 prog->name, prog->sec_name);
7178 continue;
7179 }
7180
7181 if (prog->sec_def->is_sleepable)
7182 prog->prog_flags |= BPF_F_SLEEPABLE;
7183 bpf_program__set_type(prog, prog->sec_def->prog_type);
7184 bpf_program__set_expected_attach_type(prog,
7185 prog->sec_def->expected_attach_type);
7186
7187 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
7188 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
7189 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
7190 }
7191
7192 return obj;
7193out:
7194 bpf_object__close(obj);
7195 return ERR_PTR(err);
7196}
7197
7198static struct bpf_object *
7199__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
7200{
7201 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7202 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7203 );
7204
7205
7206 if (!attr->file)
7207 return NULL;
7208
7209 pr_debug("loading %s\n", attr->file);
7210 return __bpf_object__open(attr->file, NULL, 0, &opts);
7211}
7212
7213struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7214{
7215 return __bpf_object__open_xattr(attr, 0);
7216}
7217
7218struct bpf_object *bpf_object__open(const char *path)
7219{
7220 struct bpf_object_open_attr attr = {
7221 .file = path,
7222 .prog_type = BPF_PROG_TYPE_UNSPEC,
7223 };
7224
7225 return bpf_object__open_xattr(&attr);
7226}
7227
7228struct bpf_object *
7229bpf_object__open_file_v0_0_4(const char *path,
7230 const struct bpf_object_open_opts *opts)
7231{
7232 if (!path)
7233 return ERR_PTR(-EINVAL);
7234
7235 pr_debug("loading %s\n", path);
7236
7237 return __bpf_object__open(path, NULL, 0, opts);
7238}
7239
7240struct bpf_object *
7241bpf_object__open_mem_v0_0_4(const void *obj_buf, size_t obj_buf_sz,
7242 const struct bpf_object_open_opts *opts)
7243{
7244 if (!obj_buf || obj_buf_sz == 0)
7245 return ERR_PTR(-EINVAL);
7246
7247 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
7248}
7249
7250struct bpf_object *
7251bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7252 const char *name)
7253{
7254 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7255 .object_name = name,
7256
7257 .relaxed_maps = true,
7258 );
7259
7260
7261 if (!obj_buf || obj_buf_sz == 0)
7262 return NULL;
7263
7264 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
7265}
7266
7267int bpf_object__unload(struct bpf_object *obj)
7268{
7269 size_t i;
7270
7271 if (!obj)
7272 return -EINVAL;
7273
7274 for (i = 0; i < obj->nr_maps; i++) {
7275 zclose(obj->maps[i].fd);
7276 if (obj->maps[i].st_ops)
7277 zfree(&obj->maps[i].st_ops->kern_vdata);
7278 }
7279
7280 for (i = 0; i < obj->nr_programs; i++)
7281 bpf_program__unload(&obj->programs[i]);
7282
7283 return 0;
7284}
7285
7286static int bpf_object__sanitize_maps(struct bpf_object *obj)
7287{
7288 struct bpf_map *m;
7289
7290 bpf_object__for_each_map(m, obj) {
7291 if (!bpf_map__is_internal(m))
7292 continue;
7293 if (!kernel_supports(FEAT_GLOBAL_DATA)) {
7294 pr_warn("kernel doesn't support global data\n");
7295 return -ENOTSUP;
7296 }
7297 if (!kernel_supports(FEAT_ARRAY_MMAP))
7298 m->def.map_flags ^= BPF_F_MMAPABLE;
7299 }
7300
7301 return 0;
7302}
7303
7304static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7305{
7306 char sym_type, sym_name[500];
7307 unsigned long long sym_addr;
7308 struct extern_desc *ext;
7309 int ret, err = 0;
7310 FILE *f;
7311
7312 f = fopen("/proc/kallsyms", "r");
7313 if (!f) {
7314 err = -errno;
7315 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7316 return err;
7317 }
7318
7319 while (true) {
7320 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7321 &sym_addr, &sym_type, sym_name);
7322 if (ret == EOF && feof(f))
7323 break;
7324 if (ret != 3) {
7325 pr_warn("failed to read kallsyms entry: %d\n", ret);
7326 err = -EINVAL;
7327 goto out;
7328 }
7329
7330 ext = find_extern_by_name(obj, sym_name);
7331 if (!ext || ext->type != EXT_KSYM)
7332 continue;
7333
7334 if (ext->is_set && ext->ksym.addr != sym_addr) {
7335 pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7336 sym_name, ext->ksym.addr, sym_addr);
7337 err = -EINVAL;
7338 goto out;
7339 }
7340 if (!ext->is_set) {
7341 ext->is_set = true;
7342 ext->ksym.addr = sym_addr;
7343 pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7344 }
7345 }
7346
7347out:
7348 fclose(f);
7349 return err;
7350}
7351
7352static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7353{
7354 struct extern_desc *ext;
7355 struct btf *btf;
7356 int i, j, id, btf_fd, err;
7357
7358 for (i = 0; i < obj->nr_extern; i++) {
7359 const struct btf_type *targ_var, *targ_type;
7360 __u32 targ_type_id, local_type_id;
7361 const char *targ_var_name;
7362 int ret;
7363
7364 ext = &obj->externs[i];
7365 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7366 continue;
7367
7368 btf = obj->btf_vmlinux;
7369 btf_fd = 0;
7370 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7371 if (id == -ENOENT) {
7372 err = load_module_btfs(obj);
7373 if (err)
7374 return err;
7375
7376 for (j = 0; j < obj->btf_module_cnt; j++) {
7377 btf = obj->btf_modules[j].btf;
7378
7379 btf_fd = obj->btf_modules[j].fd;
7380 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7381 if (id != -ENOENT)
7382 break;
7383 }
7384 }
7385 if (id <= 0) {
7386 pr_warn("extern (ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
7387 ext->name);
7388 return -ESRCH;
7389 }
7390
7391
7392 local_type_id = ext->ksym.type_id;
7393
7394
7395 targ_var = btf__type_by_id(btf, id);
7396 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7397 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7398
7399 ret = bpf_core_types_are_compat(obj->btf, local_type_id,
7400 btf, targ_type_id);
7401 if (ret <= 0) {
7402 const struct btf_type *local_type;
7403 const char *targ_name, *local_name;
7404
7405 local_type = btf__type_by_id(obj->btf, local_type_id);
7406 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7407 targ_name = btf__name_by_offset(btf, targ_type->name_off);
7408
7409 pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7410 ext->name, local_type_id,
7411 btf_kind_str(local_type), local_name, targ_type_id,
7412 btf_kind_str(targ_type), targ_name);
7413 return -EINVAL;
7414 }
7415
7416 ext->is_set = true;
7417 ext->ksym.kernel_btf_obj_fd = btf_fd;
7418 ext->ksym.kernel_btf_id = id;
7419 pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n",
7420 ext->name, id, btf_kind_str(targ_var), targ_var_name);
7421 }
7422 return 0;
7423}
7424
7425static int bpf_object__resolve_externs(struct bpf_object *obj,
7426 const char *extra_kconfig)
7427{
7428 bool need_config = false, need_kallsyms = false;
7429 bool need_vmlinux_btf = false;
7430 struct extern_desc *ext;
7431 void *kcfg_data = NULL;
7432 int err, i;
7433
7434 if (obj->nr_extern == 0)
7435 return 0;
7436
7437 if (obj->kconfig_map_idx >= 0)
7438 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7439
7440 for (i = 0; i < obj->nr_extern; i++) {
7441 ext = &obj->externs[i];
7442
7443 if (ext->type == EXT_KCFG &&
7444 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7445 void *ext_val = kcfg_data + ext->kcfg.data_off;
7446 __u32 kver = get_kernel_version();
7447
7448 if (!kver) {
7449 pr_warn("failed to get kernel version\n");
7450 return -EINVAL;
7451 }
7452 err = set_kcfg_value_num(ext, ext_val, kver);
7453 if (err)
7454 return err;
7455 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7456 } else if (ext->type == EXT_KCFG &&
7457 strncmp(ext->name, "CONFIG_", 7) == 0) {
7458 need_config = true;
7459 } else if (ext->type == EXT_KSYM) {
7460 if (ext->ksym.type_id)
7461 need_vmlinux_btf = true;
7462 else
7463 need_kallsyms = true;
7464 } else {
7465 pr_warn("unrecognized extern '%s'\n", ext->name);
7466 return -EINVAL;
7467 }
7468 }
7469 if (need_config && extra_kconfig) {
7470 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7471 if (err)
7472 return -EINVAL;
7473 need_config = false;
7474 for (i = 0; i < obj->nr_extern; i++) {
7475 ext = &obj->externs[i];
7476 if (ext->type == EXT_KCFG && !ext->is_set) {
7477 need_config = true;
7478 break;
7479 }
7480 }
7481 }
7482 if (need_config) {
7483 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7484 if (err)
7485 return -EINVAL;
7486 }
7487 if (need_kallsyms) {
7488 err = bpf_object__read_kallsyms_file(obj);
7489 if (err)
7490 return -EINVAL;
7491 }
7492 if (need_vmlinux_btf) {
7493 err = bpf_object__resolve_ksyms_btf_id(obj);
7494 if (err)
7495 return -EINVAL;
7496 }
7497 for (i = 0; i < obj->nr_extern; i++) {
7498 ext = &obj->externs[i];
7499
7500 if (!ext->is_set && !ext->is_weak) {
7501 pr_warn("extern %s (strong) not resolved\n", ext->name);
7502 return -ESRCH;
7503 } else if (!ext->is_set) {
7504 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7505 ext->name);
7506 }
7507 }
7508
7509 return 0;
7510}
7511
7512int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
7513{
7514 struct bpf_object *obj;
7515 int err, i;
7516
7517 if (!attr)
7518 return -EINVAL;
7519 obj = attr->obj;
7520 if (!obj)
7521 return -EINVAL;
7522
7523 if (obj->loaded) {
7524 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7525 return -EINVAL;
7526 }
7527
7528 err = bpf_object__probe_loading(obj);
7529 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
7530 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7531 err = err ? : bpf_object__sanitize_and_load_btf(obj);
7532 err = err ? : bpf_object__sanitize_maps(obj);
7533 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7534 err = err ? : bpf_object__create_maps(obj);
7535 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
7536 err = err ? : bpf_object__load_progs(obj, attr->log_level);
7537
7538
7539 for (i = 0; i < obj->btf_module_cnt; i++) {
7540 close(obj->btf_modules[i].fd);
7541 btf__free(obj->btf_modules[i].btf);
7542 free(obj->btf_modules[i].name);
7543 }
7544 free(obj->btf_modules);
7545
7546
7547 btf__free(obj->btf_vmlinux);
7548 obj->btf_vmlinux = NULL;
7549
7550 obj->loaded = true;
7551
7552 if (err)
7553 goto out;
7554
7555 return 0;
7556out:
7557
7558 for (i = 0; i < obj->nr_maps; i++)
7559 if (obj->maps[i].pinned && !obj->maps[i].reused)
7560 bpf_map__unpin(&obj->maps[i], NULL);
7561
7562 bpf_object__unload(obj);
7563 pr_warn("failed to load object '%s'\n", obj->path);
7564 return err;
7565}
7566
7567int bpf_object__load(struct bpf_object *obj)
7568{
7569 struct bpf_object_load_attr attr = {
7570 .obj = obj,
7571 };
7572
7573 return bpf_object__load_xattr(&attr);
7574}
7575
7576static int make_parent_dir(const char *path)
7577{
7578 char *cp, errmsg[STRERR_BUFSIZE];
7579 char *dname, *dir;
7580 int err = 0;
7581
7582 dname = strdup(path);
7583 if (dname == NULL)
7584 return -ENOMEM;
7585
7586 dir = dirname(dname);
7587 if (mkdir(dir, 0700) && errno != EEXIST)
7588 err = -errno;
7589
7590 free(dname);
7591 if (err) {
7592 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7593 pr_warn("failed to mkdir %s: %s\n", path, cp);
7594 }
7595 return err;
7596}
7597
7598static int check_path(const char *path)
7599{
7600 char *cp, errmsg[STRERR_BUFSIZE];
7601 struct statfs st_fs;
7602 char *dname, *dir;
7603 int err = 0;
7604
7605 if (path == NULL)
7606 return -EINVAL;
7607
7608 dname = strdup(path);
7609 if (dname == NULL)
7610 return -ENOMEM;
7611
7612 dir = dirname(dname);
7613 if (statfs(dir, &st_fs)) {
7614 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7615 pr_warn("failed to statfs %s: %s\n", dir, cp);
7616 err = -errno;
7617 }
7618 free(dname);
7619
7620 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7621 pr_warn("specified path %s is not on BPF FS\n", path);
7622 err = -EINVAL;
7623 }
7624
7625 return err;
7626}
7627
7628int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
7629 int instance)
7630{
7631 char *cp, errmsg[STRERR_BUFSIZE];
7632 int err;
7633
7634 err = make_parent_dir(path);
7635 if (err)
7636 return err;
7637
7638 err = check_path(path);
7639 if (err)
7640 return err;
7641
7642 if (prog == NULL) {
7643 pr_warn("invalid program pointer\n");
7644 return -EINVAL;
7645 }
7646
7647 if (instance < 0 || instance >= prog->instances.nr) {
7648 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7649 instance, prog->name, prog->instances.nr);
7650 return -EINVAL;
7651 }
7652
7653 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
7654 err = -errno;
7655 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7656 pr_warn("failed to pin program: %s\n", cp);
7657 return err;
7658 }
7659 pr_debug("pinned program '%s'\n", path);
7660
7661 return 0;
7662}
7663
7664int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
7665 int instance)
7666{
7667 int err;
7668
7669 err = check_path(path);
7670 if (err)
7671 return err;
7672
7673 if (prog == NULL) {
7674 pr_warn("invalid program pointer\n");
7675 return -EINVAL;
7676 }
7677
7678 if (instance < 0 || instance >= prog->instances.nr) {
7679 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7680 instance, prog->name, prog->instances.nr);
7681 return -EINVAL;
7682 }
7683
7684 err = unlink(path);
7685 if (err != 0)
7686 return -errno;
7687 pr_debug("unpinned program '%s'\n", path);
7688
7689 return 0;
7690}
7691
7692int bpf_program__pin(struct bpf_program *prog, const char *path)
7693{
7694 int i, err;
7695
7696 err = make_parent_dir(path);
7697 if (err)
7698 return err;
7699
7700 err = check_path(path);
7701 if (err)
7702 return err;
7703
7704 if (prog == NULL) {
7705 pr_warn("invalid program pointer\n");
7706 return -EINVAL;
7707 }
7708
7709 if (prog->instances.nr <= 0) {
7710 pr_warn("no instances of prog %s to pin\n", prog->name);
7711 return -EINVAL;
7712 }
7713
7714 if (prog->instances.nr == 1) {
7715
7716 return bpf_program__pin_instance(prog, path, 0);
7717 }
7718
7719 for (i = 0; i < prog->instances.nr; i++) {
7720 char buf[PATH_MAX];
7721 int len;
7722
7723 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7724 if (len < 0) {
7725 err = -EINVAL;
7726 goto err_unpin;
7727 } else if (len >= PATH_MAX) {
7728 err = -ENAMETOOLONG;
7729 goto err_unpin;
7730 }
7731
7732 err = bpf_program__pin_instance(prog, buf, i);
7733 if (err)
7734 goto err_unpin;
7735 }
7736
7737 return 0;
7738
7739err_unpin:
7740 for (i = i - 1; i >= 0; i--) {
7741 char buf[PATH_MAX];
7742 int len;
7743
7744 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7745 if (len < 0)
7746 continue;
7747 else if (len >= PATH_MAX)
7748 continue;
7749
7750 bpf_program__unpin_instance(prog, buf, i);
7751 }
7752
7753 rmdir(path);
7754
7755 return err;
7756}
7757
7758int bpf_program__unpin(struct bpf_program *prog, const char *path)
7759{
7760 int i, err;
7761
7762 err = check_path(path);
7763 if (err)
7764 return err;
7765
7766 if (prog == NULL) {
7767 pr_warn("invalid program pointer\n");
7768 return -EINVAL;
7769 }
7770
7771 if (prog->instances.nr <= 0) {
7772 pr_warn("no instances of prog %s to pin\n", prog->name);
7773 return -EINVAL;
7774 }
7775
7776 if (prog->instances.nr == 1) {
7777
7778 return bpf_program__unpin_instance(prog, path, 0);
7779 }
7780
7781 for (i = 0; i < prog->instances.nr; i++) {
7782 char buf[PATH_MAX];
7783 int len;
7784
7785 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7786 if (len < 0)
7787 return -EINVAL;
7788 else if (len >= PATH_MAX)
7789 return -ENAMETOOLONG;
7790
7791 err = bpf_program__unpin_instance(prog, buf, i);
7792 if (err)
7793 return err;
7794 }
7795
7796 err = rmdir(path);
7797 if (err)
7798 return -errno;
7799
7800 return 0;
7801}
7802
7803int bpf_map__pin(struct bpf_map *map, const char *path)
7804{
7805 char *cp, errmsg[STRERR_BUFSIZE];
7806 int err;
7807
7808 if (map == NULL) {
7809 pr_warn("invalid map pointer\n");
7810 return -EINVAL;
7811 }
7812
7813 if (map->pin_path) {
7814 if (path && strcmp(path, map->pin_path)) {
7815 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7816 bpf_map__name(map), map->pin_path, path);
7817 return -EINVAL;
7818 } else if (map->pinned) {
7819 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7820 bpf_map__name(map), map->pin_path);
7821 return 0;
7822 }
7823 } else {
7824 if (!path) {
7825 pr_warn("missing a path to pin map '%s' at\n",
7826 bpf_map__name(map));
7827 return -EINVAL;
7828 } else if (map->pinned) {
7829 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7830 return -EEXIST;
7831 }
7832
7833 map->pin_path = strdup(path);
7834 if (!map->pin_path) {
7835 err = -errno;
7836 goto out_err;
7837 }
7838 }
7839
7840 err = make_parent_dir(map->pin_path);
7841 if (err)
7842 return err;
7843
7844 err = check_path(map->pin_path);
7845 if (err)
7846 return err;
7847
7848 if (bpf_obj_pin(map->fd, map->pin_path)) {
7849 err = -errno;
7850 goto out_err;
7851 }
7852
7853 map->pinned = true;
7854 pr_debug("pinned map '%s'\n", map->pin_path);
7855
7856 return 0;
7857
7858out_err:
7859 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7860 pr_warn("failed to pin map: %s\n", cp);
7861 return err;
7862}
7863
7864int bpf_map__unpin(struct bpf_map *map, const char *path)
7865{
7866 int err;
7867
7868 if (map == NULL) {
7869 pr_warn("invalid map pointer\n");
7870 return -EINVAL;
7871 }
7872
7873 if (map->pin_path) {
7874 if (path && strcmp(path, map->pin_path)) {
7875 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7876 bpf_map__name(map), map->pin_path, path);
7877 return -EINVAL;
7878 }
7879 path = map->pin_path;
7880 } else if (!path) {
7881 pr_warn("no path to unpin map '%s' from\n",
7882 bpf_map__name(map));
7883 return -EINVAL;
7884 }
7885
7886 err = check_path(path);
7887 if (err)
7888 return err;
7889
7890 err = unlink(path);
7891 if (err != 0)
7892 return -errno;
7893
7894 map->pinned = false;
7895 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
7896
7897 return 0;
7898}
7899
7900int bpf_map__set_pin_path_v0_0_4(struct bpf_map *map, const char *path)
7901{
7902 char *new = NULL;
7903
7904 if (path) {
7905 new = strdup(path);
7906 if (!new)
7907 return -errno;
7908 }
7909
7910 free(map->pin_path);
7911 map->pin_path = new;
7912 return 0;
7913}
7914
7915const char *bpf_map__get_pin_path_v0_0_4(const struct bpf_map *map)
7916{
7917 return map->pin_path;
7918}
7919
7920bool bpf_map__is_pinned_v0_0_4(const struct bpf_map *map)
7921{
7922 return map->pinned;
7923}
7924
7925static void sanitize_pin_path(char *s)
7926{
7927
7928 while (*s) {
7929 if (*s == '.')
7930 *s = '_';
7931 s++;
7932 }
7933}
7934
7935int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7936{
7937 struct bpf_map *map;
7938 int err;
7939
7940 if (!obj)
7941 return -ENOENT;
7942
7943 if (!obj->loaded) {
7944 pr_warn("object not yet loaded; load it first\n");
7945 return -ENOENT;
7946 }
7947
7948 bpf_object__for_each_map(map, obj) {
7949 char *pin_path = NULL;
7950 char buf[PATH_MAX];
7951
7952 if (path) {
7953 int len;
7954
7955 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7956 bpf_map__name(map));
7957 if (len < 0) {
7958 err = -EINVAL;
7959 goto err_unpin_maps;
7960 } else if (len >= PATH_MAX) {
7961 err = -ENAMETOOLONG;
7962 goto err_unpin_maps;
7963 }
7964 sanitize_pin_path(buf);
7965 pin_path = buf;
7966 } else if (!map->pin_path) {
7967 continue;
7968 }
7969
7970 err = bpf_map__pin(map, pin_path);
7971 if (err)
7972 goto err_unpin_maps;
7973 }
7974
7975 return 0;
7976
7977err_unpin_maps:
7978 while ((map = bpf_map__prev(map, obj))) {
7979 if (!map->pin_path)
7980 continue;
7981
7982 bpf_map__unpin(map, NULL);
7983 }
7984
7985 return err;
7986}
7987
7988int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
7989{
7990 struct bpf_map *map;
7991 int err;
7992
7993 if (!obj)
7994 return -ENOENT;
7995
7996 bpf_object__for_each_map(map, obj) {
7997 char *pin_path = NULL;
7998 char buf[PATH_MAX];
7999
8000 if (path) {
8001 int len;
8002
8003 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8004 bpf_map__name(map));
8005 if (len < 0)
8006 return -EINVAL;
8007 else if (len >= PATH_MAX)
8008 return -ENAMETOOLONG;
8009 sanitize_pin_path(buf);
8010 pin_path = buf;
8011 } else if (!map->pin_path) {
8012 continue;
8013 }
8014
8015 err = bpf_map__unpin(map, pin_path);
8016 if (err)
8017 return err;
8018 }
8019
8020 return 0;
8021}
8022
8023int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8024{
8025 struct bpf_program *prog;
8026 int err;
8027
8028 if (!obj)
8029 return -ENOENT;
8030
8031 if (!obj->loaded) {
8032 pr_warn("object not yet loaded; load it first\n");
8033 return -ENOENT;
8034 }
8035
8036 bpf_object__for_each_program(prog, obj) {
8037 char buf[PATH_MAX];
8038 int len;
8039
8040 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8041 prog->pin_name);
8042 if (len < 0) {
8043 err = -EINVAL;
8044 goto err_unpin_programs;
8045 } else if (len >= PATH_MAX) {
8046 err = -ENAMETOOLONG;
8047 goto err_unpin_programs;
8048 }
8049
8050 err = bpf_program__pin(prog, buf);
8051 if (err)
8052 goto err_unpin_programs;
8053 }
8054
8055 return 0;
8056
8057err_unpin_programs:
8058 while ((prog = bpf_program__prev(prog, obj))) {
8059 char buf[PATH_MAX];
8060 int len;
8061
8062 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8063 prog->pin_name);
8064 if (len < 0)
8065 continue;
8066 else if (len >= PATH_MAX)
8067 continue;
8068
8069 bpf_program__unpin(prog, buf);
8070 }
8071
8072 return err;
8073}
8074
8075int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8076{
8077 struct bpf_program *prog;
8078 int err;
8079
8080 if (!obj)
8081 return -ENOENT;
8082
8083 bpf_object__for_each_program(prog, obj) {
8084 char buf[PATH_MAX];
8085 int len;
8086
8087 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8088 prog->pin_name);
8089 if (len < 0)
8090 return -EINVAL;
8091 else if (len >= PATH_MAX)
8092 return -ENAMETOOLONG;
8093
8094 err = bpf_program__unpin(prog, buf);
8095 if (err)
8096 return err;
8097 }
8098
8099 return 0;
8100}
8101
8102int bpf_object__pin(struct bpf_object *obj, const char *path)
8103{
8104 int err;
8105
8106 err = bpf_object__pin_maps(obj, path);
8107 if (err)
8108 return err;
8109
8110 err = bpf_object__pin_programs(obj, path);
8111 if (err) {
8112 bpf_object__unpin_maps(obj, path);
8113 return err;
8114 }
8115
8116 return 0;
8117}
8118
8119static void bpf_map__destroy(struct bpf_map *map)
8120{
8121 if (map->clear_priv)
8122 map->clear_priv(map, map->priv);
8123 map->priv = NULL;
8124 map->clear_priv = NULL;
8125
8126 if (map->inner_map) {
8127 bpf_map__destroy(map->inner_map);
8128 zfree(&map->inner_map);
8129 }
8130
8131 zfree(&map->init_slots);
8132 map->init_slots_sz = 0;
8133
8134 if (map->mmaped) {
8135 munmap(map->mmaped, bpf_map_mmap_sz(map));
8136 map->mmaped = NULL;
8137 }
8138
8139 if (map->st_ops) {
8140 zfree(&map->st_ops->data);
8141 zfree(&map->st_ops->progs);
8142 zfree(&map->st_ops->kern_func_off);
8143 zfree(&map->st_ops);
8144 }
8145
8146 zfree(&map->name);
8147 zfree(&map->pin_path);
8148
8149 if (map->fd >= 0)
8150 zclose(map->fd);
8151}
8152
8153void bpf_object__close(struct bpf_object *obj)
8154{
8155 size_t i;
8156
8157 if (IS_ERR_OR_NULL(obj))
8158 return;
8159
8160 if (obj->clear_priv)
8161 obj->clear_priv(obj, obj->priv);
8162
8163 bpf_object__elf_finish(obj);
8164 bpf_object__unload(obj);
8165 btf__free(obj->btf);
8166 btf_ext__free(obj->btf_ext);
8167
8168 for (i = 0; i < obj->nr_maps; i++)
8169 bpf_map__destroy(&obj->maps[i]);
8170
8171 zfree(&obj->kconfig);
8172 zfree(&obj->externs);
8173 obj->nr_extern = 0;
8174
8175 zfree(&obj->maps);
8176 obj->nr_maps = 0;
8177
8178 if (obj->programs && obj->nr_programs) {
8179 for (i = 0; i < obj->nr_programs; i++)
8180 bpf_program__exit(&obj->programs[i]);
8181 }
8182 zfree(&obj->programs);
8183
8184 list_del(&obj->list);
8185 free(obj);
8186}
8187
8188struct bpf_object *
8189bpf_object__next(struct bpf_object *prev)
8190{
8191 struct bpf_object *next;
8192
8193 if (!prev)
8194 next = list_first_entry(&bpf_objects_list,
8195 struct bpf_object,
8196 list);
8197 else
8198 next = list_next_entry(prev, list);
8199
8200
8201 if (&next->list == &bpf_objects_list)
8202 return NULL;
8203
8204 return next;
8205}
8206
8207const char *bpf_object__name(const struct bpf_object *obj)
8208{
8209 return obj ? obj->name : ERR_PTR(-EINVAL);
8210}
8211
8212unsigned int bpf_object__kversion(const struct bpf_object *obj)
8213{
8214 return obj ? obj->kern_version : 0;
8215}
8216
8217struct btf *bpf_object__btf(const struct bpf_object *obj)
8218{
8219 return obj ? obj->btf : NULL;
8220}
8221
8222int bpf_object__btf_fd(const struct bpf_object *obj)
8223{
8224 return obj->btf ? btf__fd(obj->btf) : -1;
8225}
8226
8227int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8228 bpf_object_clear_priv_t clear_priv)
8229{
8230 if (obj->priv && obj->clear_priv)
8231 obj->clear_priv(obj, obj->priv);
8232
8233 obj->priv = priv;
8234 obj->clear_priv = clear_priv;
8235 return 0;
8236}
8237
8238void *bpf_object__priv(const struct bpf_object *obj)
8239{
8240 return obj ? obj->priv : ERR_PTR(-EINVAL);
8241}
8242
8243static struct bpf_program *
8244__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8245 bool forward)
8246{
8247 size_t nr_programs = obj->nr_programs;
8248 ssize_t idx;
8249
8250 if (!nr_programs)
8251 return NULL;
8252
8253 if (!p)
8254
8255 return forward ? &obj->programs[0] :
8256 &obj->programs[nr_programs - 1];
8257
8258 if (p->obj != obj) {
8259 pr_warn("error: program handler doesn't match object\n");
8260 return NULL;
8261 }
8262
8263 idx = (p - obj->programs) + (forward ? 1 : -1);
8264 if (idx >= obj->nr_programs || idx < 0)
8265 return NULL;
8266 return &obj->programs[idx];
8267}
8268
8269struct bpf_program *
8270bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8271{
8272 struct bpf_program *prog = prev;
8273
8274 do {
8275 prog = __bpf_program__iter(prog, obj, true);
8276 } while (prog && prog_is_subprog(obj, prog));
8277
8278 return prog;
8279}
8280
8281struct bpf_program *
8282bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8283{
8284 struct bpf_program *prog = next;
8285
8286 do {
8287 prog = __bpf_program__iter(prog, obj, false);
8288 } while (prog && prog_is_subprog(obj, prog));
8289
8290 return prog;
8291}
8292
8293int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8294 bpf_program_clear_priv_t clear_priv)
8295{
8296 if (prog->priv && prog->clear_priv)
8297 prog->clear_priv(prog, prog->priv);
8298
8299 prog->priv = priv;
8300 prog->clear_priv = clear_priv;
8301 return 0;
8302}
8303
8304void *bpf_program__priv(const struct bpf_program *prog)
8305{
8306 return prog ? prog->priv : ERR_PTR(-EINVAL);
8307}
8308
8309void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8310{
8311 prog->prog_ifindex = ifindex;
8312}
8313
8314const char *bpf_program__name(const struct bpf_program *prog)
8315{
8316 return prog->name;
8317}
8318
8319const char *bpf_program__section_name(const struct bpf_program *prog)
8320{
8321 return prog->sec_name;
8322}
8323
8324const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8325{
8326 const char *title;
8327
8328 title = prog->sec_name;
8329 if (needs_copy) {
8330 title = strdup(title);
8331 if (!title) {
8332 pr_warn("failed to strdup program title\n");
8333 return ERR_PTR(-ENOMEM);
8334 }
8335 }
8336
8337 return title;
8338}
8339
8340bool bpf_program__autoload(const struct bpf_program *prog)
8341{
8342 return prog->load;
8343}
8344
8345int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8346{
8347 if (prog->obj->loaded)
8348 return -EINVAL;
8349
8350 prog->load = autoload;
8351 return 0;
8352}
8353
8354int bpf_program__fd(const struct bpf_program *prog)
8355{
8356 return bpf_program__nth_fd(prog, 0);
8357}
8358
8359size_t bpf_program__size_v0_0_4(const struct bpf_program *prog)
8360{
8361 return prog->insns_cnt * BPF_INSN_SZ;
8362}
8363
8364int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8365 bpf_program_prep_t prep)
8366{
8367 int *instances_fds;
8368
8369 if (nr_instances <= 0 || !prep)
8370 return -EINVAL;
8371
8372 if (prog->instances.nr > 0 || prog->instances.fds) {
8373 pr_warn("Can't set pre-processor after loading\n");
8374 return -EINVAL;
8375 }
8376
8377 instances_fds = malloc(sizeof(int) * nr_instances);
8378 if (!instances_fds) {
8379 pr_warn("alloc memory failed for fds\n");
8380 return -ENOMEM;
8381 }
8382
8383
8384 memset(instances_fds, -1, sizeof(int) * nr_instances);
8385
8386 prog->instances.nr = nr_instances;
8387 prog->instances.fds = instances_fds;
8388 prog->preprocessor = prep;
8389 return 0;
8390}
8391
8392int bpf_program__nth_fd(const struct bpf_program *prog, int n)
8393{
8394 int fd;
8395
8396 if (!prog)
8397 return -EINVAL;
8398
8399 if (n >= prog->instances.nr || n < 0) {
8400 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8401 n, prog->name, prog->instances.nr);
8402 return -EINVAL;
8403 }
8404
8405 fd = prog->instances.fds[n];
8406 if (fd < 0) {
8407 pr_warn("%dth instance of program '%s' is invalid\n",
8408 n, prog->name);
8409 return -ENOENT;
8410 }
8411
8412 return fd;
8413}
8414
8415enum bpf_prog_type bpf_program__get_type_v0_0_4(struct bpf_program *prog)
8416{
8417 return prog->type;
8418}
8419
8420void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8421{
8422 prog->type = type;
8423}
8424
8425static bool bpf_program__is_type(const struct bpf_program *prog,
8426 enum bpf_prog_type type)
8427{
8428 return prog ? (prog->type == type) : false;
8429}
8430
8431#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
8432int bpf_program__set_##NAME(struct bpf_program *prog) \
8433{ \
8434 if (!prog) \
8435 return -EINVAL; \
8436 bpf_program__set_type(prog, TYPE); \
8437 return 0; \
8438} \
8439 \
8440bool bpf_program__is_##NAME(const struct bpf_program *prog) \
8441{ \
8442 return bpf_program__is_type(prog, TYPE); \
8443} \
8444
8445BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
8446BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
8447BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
8448BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8449BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
8450BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
8451BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
8452BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8453BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
8454BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
8455BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
8456BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
8457BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
8458
8459enum bpf_attach_type
8460bpf_program__get_expected_attach_type_v0_0_4(struct bpf_program *prog)
8461{
8462 return prog->expected_attach_type;
8463}
8464
8465void bpf_program__set_expected_attach_type(struct bpf_program *prog,
8466 enum bpf_attach_type type)
8467{
8468 prog->expected_attach_type = type;
8469}
8470
8471#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \
8472 attachable, attach_btf) \
8473 { \
8474 .sec = string, \
8475 .len = sizeof(string) - 1, \
8476 .prog_type = ptype, \
8477 .expected_attach_type = eatype, \
8478 .is_exp_attach_type_optional = eatype_optional, \
8479 .is_attachable = attachable, \
8480 .is_attach_btf = attach_btf, \
8481 }
8482
8483
8484#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
8485
8486
8487#define BPF_APROG_SEC(string, ptype, atype) \
8488 BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
8489
8490
8491#define BPF_EAPROG_SEC(string, ptype, eatype) \
8492 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
8493
8494
8495#define BPF_PROG_BTF(string, ptype, eatype) \
8496 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
8497
8498
8499
8500
8501#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
8502
8503#define SEC_DEF(sec_pfx, ptype, ...) { \
8504 .sec = sec_pfx, \
8505 .len = sizeof(sec_pfx) - 1, \
8506 .prog_type = BPF_PROG_TYPE_##ptype, \
8507 __VA_ARGS__ \
8508}
8509
8510static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
8511 struct bpf_program *prog);
8512static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
8513 struct bpf_program *prog);
8514static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
8515 struct bpf_program *prog);
8516static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
8517 struct bpf_program *prog);
8518static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
8519 struct bpf_program *prog);
8520static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
8521 struct bpf_program *prog);
8522
8523static const struct bpf_sec_def section_defs[] = {
8524 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
8525 BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
8526 SEC_DEF("kprobe/", KPROBE,
8527 .attach_fn = attach_kprobe),
8528 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
8529 SEC_DEF("kretprobe/", KPROBE,
8530 .attach_fn = attach_kprobe),
8531 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
8532 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
8533 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
8534 SEC_DEF("tracepoint/", TRACEPOINT,
8535 .attach_fn = attach_tp),
8536 SEC_DEF("tp/", TRACEPOINT,
8537 .attach_fn = attach_tp),
8538 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
8539 .attach_fn = attach_raw_tp),
8540 SEC_DEF("raw_tp/", RAW_TRACEPOINT,
8541 .attach_fn = attach_raw_tp),
8542 SEC_DEF("tp_btf/", TRACING,
8543 .expected_attach_type = BPF_TRACE_RAW_TP,
8544 .is_attach_btf = true,
8545 .attach_fn = attach_trace),
8546 SEC_DEF("fentry/", TRACING,
8547 .expected_attach_type = BPF_TRACE_FENTRY,
8548 .is_attach_btf = true,
8549 .attach_fn = attach_trace),
8550 SEC_DEF("fmod_ret/", TRACING,
8551 .expected_attach_type = BPF_MODIFY_RETURN,
8552 .is_attach_btf = true,
8553 .attach_fn = attach_trace),
8554 SEC_DEF("fexit/", TRACING,
8555 .expected_attach_type = BPF_TRACE_FEXIT,
8556 .is_attach_btf = true,
8557 .attach_fn = attach_trace),
8558 SEC_DEF("fentry.s/", TRACING,
8559 .expected_attach_type = BPF_TRACE_FENTRY,
8560 .is_attach_btf = true,
8561 .is_sleepable = true,
8562 .attach_fn = attach_trace),
8563 SEC_DEF("fmod_ret.s/", TRACING,
8564 .expected_attach_type = BPF_MODIFY_RETURN,
8565 .is_attach_btf = true,
8566 .is_sleepable = true,
8567 .attach_fn = attach_trace),
8568 SEC_DEF("fexit.s/", TRACING,
8569 .expected_attach_type = BPF_TRACE_FEXIT,
8570 .is_attach_btf = true,
8571 .is_sleepable = true,
8572 .attach_fn = attach_trace),
8573 SEC_DEF("freplace/", EXT,
8574 .is_attach_btf = true,
8575 .attach_fn = attach_trace),
8576 SEC_DEF("lsm/", LSM,
8577 .is_attach_btf = true,
8578 .expected_attach_type = BPF_LSM_MAC,
8579 .attach_fn = attach_lsm),
8580 SEC_DEF("lsm.s/", LSM,
8581 .is_attach_btf = true,
8582 .is_sleepable = true,
8583 .expected_attach_type = BPF_LSM_MAC,
8584 .attach_fn = attach_lsm),
8585 SEC_DEF("iter/", TRACING,
8586 .expected_attach_type = BPF_TRACE_ITER,
8587 .is_attach_btf = true,
8588 .attach_fn = attach_iter),
8589 BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
8590 BPF_XDP_DEVMAP),
8591 BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
8592 BPF_XDP_CPUMAP),
8593 BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP,
8594 BPF_XDP),
8595 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
8596 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
8597 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
8598 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
8599 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
8600 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
8601 BPF_CGROUP_INET_INGRESS),
8602 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
8603 BPF_CGROUP_INET_EGRESS),
8604 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
8605 BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK,
8606 BPF_CGROUP_INET_SOCK_CREATE),
8607 BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK,
8608 BPF_CGROUP_INET_SOCK_RELEASE),
8609 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
8610 BPF_CGROUP_INET_SOCK_CREATE),
8611 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
8612 BPF_CGROUP_INET4_POST_BIND),
8613 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
8614 BPF_CGROUP_INET6_POST_BIND),
8615 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
8616 BPF_CGROUP_DEVICE),
8617 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
8618 BPF_CGROUP_SOCK_OPS),
8619 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
8620 BPF_SK_SKB_STREAM_PARSER),
8621 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
8622 BPF_SK_SKB_STREAM_VERDICT),
8623 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
8624 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
8625 BPF_SK_MSG_VERDICT),
8626 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
8627 BPF_LIRC_MODE2),
8628 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
8629 BPF_FLOW_DISSECTOR),
8630 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8631 BPF_CGROUP_INET4_BIND),
8632 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8633 BPF_CGROUP_INET6_BIND),
8634 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8635 BPF_CGROUP_INET4_CONNECT),
8636 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8637 BPF_CGROUP_INET6_CONNECT),
8638 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8639 BPF_CGROUP_UDP4_SENDMSG),
8640 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8641 BPF_CGROUP_UDP6_SENDMSG),
8642 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8643 BPF_CGROUP_UDP4_RECVMSG),
8644 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8645 BPF_CGROUP_UDP6_RECVMSG),
8646 BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8647 BPF_CGROUP_INET4_GETPEERNAME),
8648 BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8649 BPF_CGROUP_INET6_GETPEERNAME),
8650 BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8651 BPF_CGROUP_INET4_GETSOCKNAME),
8652 BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8653 BPF_CGROUP_INET6_GETSOCKNAME),
8654 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
8655 BPF_CGROUP_SYSCTL),
8656 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8657 BPF_CGROUP_GETSOCKOPT),
8658 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8659 BPF_CGROUP_SETSOCKOPT),
8660 BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
8661 BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP,
8662 BPF_SK_LOOKUP),
8663};
8664
8665#undef BPF_PROG_SEC_IMPL
8666#undef BPF_PROG_SEC
8667#undef BPF_APROG_SEC
8668#undef BPF_EAPROG_SEC
8669#undef BPF_APROG_COMPAT
8670#undef SEC_DEF
8671
8672#define MAX_TYPE_NAME_SIZE 32
8673
8674static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8675{
8676 int i, n = ARRAY_SIZE(section_defs);
8677
8678 for (i = 0; i < n; i++) {
8679 if (strncmp(sec_name,
8680 section_defs[i].sec, section_defs[i].len))
8681 continue;
8682 return §ion_defs[i];
8683 }
8684 return NULL;
8685}
8686
8687static char *libbpf_get_type_names(bool attach_type)
8688{
8689 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8690 char *buf;
8691
8692 buf = malloc(len);
8693 if (!buf)
8694 return NULL;
8695
8696 buf[0] = '\0';
8697
8698 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8699 if (attach_type && !section_defs[i].is_attachable)
8700 continue;
8701
8702 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
8703 free(buf);
8704 return NULL;
8705 }
8706 strcat(buf, " ");
8707 strcat(buf, section_defs[i].sec);
8708 }
8709
8710 return buf;
8711}
8712
8713int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8714 enum bpf_attach_type *expected_attach_type)
8715{
8716 const struct bpf_sec_def *sec_def;
8717 char *type_names;
8718
8719 if (!name)
8720 return -EINVAL;
8721
8722 sec_def = find_sec_def(name);
8723 if (sec_def) {
8724 *prog_type = sec_def->prog_type;
8725 *expected_attach_type = sec_def->expected_attach_type;
8726 return 0;
8727 }
8728
8729 pr_debug("failed to guess program type from ELF section '%s'\n", name);
8730 type_names = libbpf_get_type_names(false);
8731 if (type_names != NULL) {
8732 pr_debug("supported section(type) names are:%s\n", type_names);
8733 free(type_names);
8734 }
8735
8736 return -ESRCH;
8737}
8738
8739static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8740 size_t offset)
8741{
8742 struct bpf_map *map;
8743 size_t i;
8744
8745 for (i = 0; i < obj->nr_maps; i++) {
8746 map = &obj->maps[i];
8747 if (!bpf_map__is_struct_ops(map))
8748 continue;
8749 if (map->sec_offset <= offset &&
8750 offset - map->sec_offset < map->def.value_size)
8751 return map;
8752 }
8753
8754 return NULL;
8755}
8756
8757
8758static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8759 GElf_Shdr *shdr, Elf_Data *data)
8760{
8761 const struct btf_member *member;
8762 struct bpf_struct_ops *st_ops;
8763 struct bpf_program *prog;
8764 unsigned int shdr_idx;
8765 const struct btf *btf;
8766 struct bpf_map *map;
8767 Elf_Data *symbols;
8768 unsigned int moff, insn_idx;
8769 const char *name;
8770 __u32 member_idx;
8771 GElf_Sym sym;
8772 GElf_Rel rel;
8773 int i, nrels;
8774
8775 symbols = obj->efile.symbols;
8776 btf = obj->btf;
8777 nrels = shdr->sh_size / shdr->sh_entsize;
8778 for (i = 0; i < nrels; i++) {
8779 if (!gelf_getrel(data, i, &rel)) {
8780 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8781 return -LIBBPF_ERRNO__FORMAT;
8782 }
8783
8784 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
8785 pr_warn("struct_ops reloc: symbol %zx not found\n",
8786 (size_t)GELF_R_SYM(rel.r_info));
8787 return -LIBBPF_ERRNO__FORMAT;
8788 }
8789
8790 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
8791 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
8792 if (!map) {
8793 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
8794 (size_t)rel.r_offset);
8795 return -EINVAL;
8796 }
8797
8798 moff = rel.r_offset - map->sec_offset;
8799 shdr_idx = sym.st_shndx;
8800 st_ops = map->st_ops;
8801 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
8802 map->name,
8803 (long long)(rel.r_info >> 32),
8804 (long long)sym.st_value,
8805 shdr_idx, (size_t)rel.r_offset,
8806 map->sec_offset, sym.st_name, name);
8807
8808 if (shdr_idx >= SHN_LORESERVE) {
8809 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
8810 map->name, (size_t)rel.r_offset, shdr_idx);
8811 return -LIBBPF_ERRNO__RELOC;
8812 }
8813 if (sym.st_value % BPF_INSN_SZ) {
8814 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
8815 map->name, (unsigned long long)sym.st_value);
8816 return -LIBBPF_ERRNO__FORMAT;
8817 }
8818 insn_idx = sym.st_value / BPF_INSN_SZ;
8819
8820 member = find_member_by_offset(st_ops->type, moff * 8);
8821 if (!member) {
8822 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
8823 map->name, moff);
8824 return -EINVAL;
8825 }
8826 member_idx = member - btf_members(st_ops->type);
8827 name = btf__name_by_offset(btf, member->name_off);
8828
8829 if (!resolve_func_ptr(btf, member->type, NULL)) {
8830 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
8831 map->name, name);
8832 return -EINVAL;
8833 }
8834
8835 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
8836 if (!prog) {
8837 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
8838 map->name, shdr_idx, name);
8839 return -EINVAL;
8840 }
8841
8842 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
8843 const struct bpf_sec_def *sec_def;
8844
8845 sec_def = find_sec_def(prog->sec_name);
8846 if (sec_def &&
8847 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
8848
8849 prog->type = sec_def->prog_type;
8850 goto invalid_prog;
8851 }
8852
8853 prog->type = BPF_PROG_TYPE_STRUCT_OPS;
8854 prog->attach_btf_id = st_ops->type_id;
8855 prog->expected_attach_type = member_idx;
8856 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
8857 prog->attach_btf_id != st_ops->type_id ||
8858 prog->expected_attach_type != member_idx) {
8859 goto invalid_prog;
8860 }
8861 st_ops->progs[member_idx] = prog;
8862 }
8863
8864 return 0;
8865
8866invalid_prog:
8867 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
8868 map->name, prog->name, prog->sec_name, prog->type,
8869 prog->attach_btf_id, prog->expected_attach_type, name);
8870 return -EINVAL;
8871}
8872
8873#define BTF_TRACE_PREFIX "btf_trace_"
8874#define BTF_LSM_PREFIX "bpf_lsm_"
8875#define BTF_ITER_PREFIX "bpf_iter_"
8876#define BTF_MAX_NAME_SIZE 128
8877
8878static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
8879 const char *name, __u32 kind)
8880{
8881 char btf_type_name[BTF_MAX_NAME_SIZE];
8882 int ret;
8883
8884 ret = snprintf(btf_type_name, sizeof(btf_type_name),
8885 "%s%s", prefix, name);
8886
8887
8888
8889
8890 if (ret < 0 || ret >= sizeof(btf_type_name))
8891 return -ENAMETOOLONG;
8892 return btf__find_by_name_kind(btf, btf_type_name, kind);
8893}
8894
8895static inline int find_attach_btf_id(struct btf *btf, const char *name,
8896 enum bpf_attach_type attach_type)
8897{
8898 int err;
8899
8900 if (attach_type == BPF_TRACE_RAW_TP)
8901 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
8902 BTF_KIND_TYPEDEF);
8903 else if (attach_type == BPF_LSM_MAC)
8904 err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
8905 BTF_KIND_FUNC);
8906 else if (attach_type == BPF_TRACE_ITER)
8907 err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
8908 BTF_KIND_FUNC);
8909 else
8910 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8911
8912 return err;
8913}
8914
8915int libbpf_find_vmlinux_btf_id(const char *name,
8916 enum bpf_attach_type attach_type)
8917{
8918 struct btf *btf;
8919 int err;
8920
8921 btf = libbpf_find_kernel_btf();
8922 if (IS_ERR(btf)) {
8923 pr_warn("vmlinux BTF is not found\n");
8924 return -EINVAL;
8925 }
8926
8927 err = find_attach_btf_id(btf, name, attach_type);
8928 if (err <= 0)
8929 pr_warn("%s is not found in vmlinux BTF\n", name);
8930
8931 btf__free(btf);
8932 return err;
8933}
8934
8935static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
8936{
8937 struct bpf_prog_info_linear *info_linear;
8938 struct bpf_prog_info *info;
8939 struct btf *btf = NULL;
8940 int err = -EINVAL;
8941
8942 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
8943 if (IS_ERR_OR_NULL(info_linear)) {
8944 pr_warn("failed get_prog_info_linear for FD %d\n",
8945 attach_prog_fd);
8946 return -EINVAL;
8947 }
8948 info = &info_linear->info;
8949 if (!info->btf_id) {
8950 pr_warn("The target program doesn't have BTF\n");
8951 goto out;
8952 }
8953 if (btf__get_from_id(info->btf_id, &btf)) {
8954 pr_warn("Failed to get BTF of the program\n");
8955 goto out;
8956 }
8957 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8958 btf__free(btf);
8959 if (err <= 0) {
8960 pr_warn("%s is not found in prog's BTF\n", name);
8961 goto out;
8962 }
8963out:
8964 free(info_linear);
8965 return err;
8966}
8967
8968static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
8969 enum bpf_attach_type attach_type,
8970 int *btf_obj_fd, int *btf_type_id)
8971{
8972 int ret, i;
8973
8974 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
8975 if (ret > 0) {
8976 *btf_obj_fd = 0;
8977 *btf_type_id = ret;
8978 return 0;
8979 }
8980 if (ret != -ENOENT)
8981 return ret;
8982
8983 ret = load_module_btfs(obj);
8984 if (ret)
8985 return ret;
8986
8987 for (i = 0; i < obj->btf_module_cnt; i++) {
8988 const struct module_btf *mod = &obj->btf_modules[i];
8989
8990 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
8991 if (ret > 0) {
8992 *btf_obj_fd = mod->fd;
8993 *btf_type_id = ret;
8994 return 0;
8995 }
8996 if (ret == -ENOENT)
8997 continue;
8998
8999 return ret;
9000 }
9001
9002 return -ESRCH;
9003}
9004
9005static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
9006{
9007 enum bpf_attach_type attach_type = prog->expected_attach_type;
9008 __u32 attach_prog_fd = prog->attach_prog_fd;
9009 const char *name = prog->sec_name, *attach_name;
9010 const struct bpf_sec_def *sec = NULL;
9011 int i, err;
9012
9013 if (!name)
9014 return -EINVAL;
9015
9016 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9017 if (!section_defs[i].is_attach_btf)
9018 continue;
9019 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
9020 continue;
9021
9022 sec = §ion_defs[i];
9023 break;
9024 }
9025
9026 if (!sec) {
9027 pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
9028 return -ESRCH;
9029 }
9030 attach_name = name + sec->len;
9031
9032
9033 if (attach_prog_fd) {
9034 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9035 if (err < 0) {
9036 pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9037 attach_prog_fd, attach_name, err);
9038 return err;
9039 }
9040 *btf_obj_fd = 0;
9041 *btf_type_id = err;
9042 return 0;
9043 }
9044
9045
9046 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9047 if (err) {
9048 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
9049 return err;
9050 }
9051 return 0;
9052}
9053
9054int libbpf_attach_type_by_name(const char *name,
9055 enum bpf_attach_type *attach_type)
9056{
9057 char *type_names;
9058 int i;
9059
9060 if (!name)
9061 return -EINVAL;
9062
9063 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9064 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
9065 continue;
9066 if (!section_defs[i].is_attachable)
9067 return -EINVAL;
9068 *attach_type = section_defs[i].expected_attach_type;
9069 return 0;
9070 }
9071 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9072 type_names = libbpf_get_type_names(true);
9073 if (type_names != NULL) {
9074 pr_debug("attachable section(type) names are:%s\n", type_names);
9075 free(type_names);
9076 }
9077
9078 return -EINVAL;
9079}
9080
9081int bpf_map__fd(const struct bpf_map *map)
9082{
9083 return map ? map->fd : -EINVAL;
9084}
9085
9086const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9087{
9088 return map ? &map->def : ERR_PTR(-EINVAL);
9089}
9090
9091const char *bpf_map__name(const struct bpf_map *map)
9092{
9093 return map ? map->name : NULL;
9094}
9095
9096enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9097{
9098 return map->def.type;
9099}
9100
9101int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9102{
9103 if (map->fd >= 0)
9104 return -EBUSY;
9105 map->def.type = type;
9106 return 0;
9107}
9108
9109__u32 bpf_map__map_flags(const struct bpf_map *map)
9110{
9111 return map->def.map_flags;
9112}
9113
9114int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9115{
9116 if (map->fd >= 0)
9117 return -EBUSY;
9118 map->def.map_flags = flags;
9119 return 0;
9120}
9121
9122__u32 bpf_map__numa_node(const struct bpf_map *map)
9123{
9124 return map->numa_node;
9125}
9126
9127int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9128{
9129 if (map->fd >= 0)
9130 return -EBUSY;
9131 map->numa_node = numa_node;
9132 return 0;
9133}
9134
9135__u32 bpf_map__key_size(const struct bpf_map *map)
9136{
9137 return map->def.key_size;
9138}
9139
9140int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9141{
9142 if (map->fd >= 0)
9143 return -EBUSY;
9144 map->def.key_size = size;
9145 return 0;
9146}
9147
9148__u32 bpf_map__value_size(const struct bpf_map *map)
9149{
9150 return map->def.value_size;
9151}
9152
9153int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9154{
9155 if (map->fd >= 0)
9156 return -EBUSY;
9157 map->def.value_size = size;
9158 return 0;
9159}
9160
9161__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9162{
9163 return map ? map->btf_key_type_id : 0;
9164}
9165
9166__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9167{
9168 return map ? map->btf_value_type_id : 0;
9169}
9170
9171int bpf_map__set_priv(struct bpf_map *map, void *priv,
9172 bpf_map_clear_priv_t clear_priv)
9173{
9174 if (!map)
9175 return -EINVAL;
9176
9177 if (map->priv) {
9178 if (map->clear_priv)
9179 map->clear_priv(map, map->priv);
9180 }
9181
9182 map->priv = priv;
9183 map->clear_priv = clear_priv;
9184 return 0;
9185}
9186
9187void *bpf_map__priv(const struct bpf_map *map)
9188{
9189 return map ? map->priv : ERR_PTR(-EINVAL);
9190}
9191
9192int bpf_map__set_initial_value(struct bpf_map *map,
9193 const void *data, size_t size)
9194{
9195 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9196 size != map->def.value_size || map->fd >= 0)
9197 return -EINVAL;
9198
9199 memcpy(map->mmaped, data, size);
9200 return 0;
9201}
9202
9203bool bpf_map__is_offload_neutral(const struct bpf_map *map)
9204{
9205 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9206}
9207
9208bool bpf_map__is_internal(const struct bpf_map *map)
9209{
9210 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9211}
9212
9213__u32 bpf_map__ifindex(const struct bpf_map *map)
9214{
9215 return map->map_ifindex;
9216}
9217
9218int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9219{
9220 if (map->fd >= 0)
9221 return -EBUSY;
9222 map->map_ifindex = ifindex;
9223 return 0;
9224}
9225
9226int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9227{
9228 if (!bpf_map_type__is_map_in_map(map->def.type)) {
9229 pr_warn("error: unsupported map type\n");
9230 return -EINVAL;
9231 }
9232 if (map->inner_map_fd != -1) {
9233 pr_warn("error: inner_map_fd already specified\n");
9234 return -EINVAL;
9235 }
9236 map->inner_map_fd = fd;
9237 return 0;
9238}
9239
9240static struct bpf_map *
9241__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9242{
9243 ssize_t idx;
9244 struct bpf_map *s, *e;
9245
9246 if (!obj || !obj->maps)
9247 return NULL;
9248
9249 s = obj->maps;
9250 e = obj->maps + obj->nr_maps;
9251
9252 if ((m < s) || (m >= e)) {
9253 pr_warn("error in %s: map handler doesn't belong to object\n",
9254 __func__);
9255 return NULL;
9256 }
9257
9258 idx = (m - obj->maps) + i;
9259 if (idx >= obj->nr_maps || idx < 0)
9260 return NULL;
9261 return &obj->maps[idx];
9262}
9263
9264struct bpf_map *
9265bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9266{
9267 if (prev == NULL)
9268 return obj->maps;
9269
9270 return __bpf_map__iter(prev, obj, 1);
9271}
9272
9273struct bpf_map *
9274bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9275{
9276 if (next == NULL) {
9277 if (!obj->nr_maps)
9278 return NULL;
9279 return obj->maps + obj->nr_maps - 1;
9280 }
9281
9282 return __bpf_map__iter(next, obj, -1);
9283}
9284
9285struct bpf_map *
9286bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9287{
9288 struct bpf_map *pos;
9289
9290 bpf_object__for_each_map(pos, obj) {
9291 if (pos->name && !strcmp(pos->name, name))
9292 return pos;
9293 }
9294 return NULL;
9295}
9296
9297int
9298bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9299{
9300 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9301}
9302
9303struct bpf_map *
9304bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9305{
9306 return ERR_PTR(-ENOTSUP);
9307}
9308
9309long libbpf_get_error(const void *ptr)
9310{
9311 return PTR_ERR_OR_ZERO(ptr);
9312}
9313
9314int bpf_prog_load(const char *file, enum bpf_prog_type type,
9315 struct bpf_object **pobj, int *prog_fd)
9316{
9317 struct bpf_prog_load_attr attr;
9318
9319 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
9320 attr.file = file;
9321 attr.prog_type = type;
9322 attr.expected_attach_type = 0;
9323
9324 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
9325}
9326
9327int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
9328 struct bpf_object **pobj, int *prog_fd)
9329{
9330 struct bpf_object_open_attr open_attr = {};
9331 struct bpf_program *prog, *first_prog = NULL;
9332 struct bpf_object *obj;
9333 struct bpf_map *map;
9334 int err;
9335
9336 if (!attr)
9337 return -EINVAL;
9338 if (!attr->file)
9339 return -EINVAL;
9340
9341 open_attr.file = attr->file;
9342 open_attr.prog_type = attr->prog_type;
9343
9344 obj = bpf_object__open_xattr(&open_attr);
9345 if (IS_ERR_OR_NULL(obj))
9346 return -ENOENT;
9347
9348 bpf_object__for_each_program(prog, obj) {
9349 enum bpf_attach_type attach_type = attr->expected_attach_type;
9350
9351
9352
9353
9354
9355 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9356 bpf_program__set_type(prog, attr->prog_type);
9357 bpf_program__set_expected_attach_type(prog,
9358 attach_type);
9359 }
9360 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
9361
9362
9363
9364
9365 bpf_object__close(obj);
9366 return -EINVAL;
9367 }
9368
9369 prog->prog_ifindex = attr->ifindex;
9370 prog->log_level = attr->log_level;
9371 prog->prog_flags |= attr->prog_flags;
9372 if (!first_prog)
9373 first_prog = prog;
9374 }
9375
9376 bpf_object__for_each_map(map, obj) {
9377 if (!bpf_map__is_offload_neutral(map))
9378 map->map_ifindex = attr->ifindex;
9379 }
9380
9381 if (!first_prog) {
9382 pr_warn("object file doesn't contain bpf program\n");
9383 bpf_object__close(obj);
9384 return -ENOENT;
9385 }
9386
9387 err = bpf_object__load(obj);
9388 if (err) {
9389 bpf_object__close(obj);
9390 return err;
9391 }
9392
9393 *pobj = obj;
9394 *prog_fd = bpf_program__fd(first_prog);
9395 return 0;
9396}
9397
9398struct bpf_link {
9399 int (*detach)(struct bpf_link *link);
9400 int (*destroy)(struct bpf_link *link);
9401 char *pin_path;
9402 int fd;
9403 bool disconnected;
9404};
9405
9406
9407int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9408{
9409 return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9410}
9411
9412
9413
9414
9415
9416
9417
9418
9419
9420
9421
9422void bpf_link__disconnect(struct bpf_link *link)
9423{
9424 link->disconnected = true;
9425}
9426
9427int bpf_link__destroy(struct bpf_link *link)
9428{
9429 int err = 0;
9430
9431 if (IS_ERR_OR_NULL(link))
9432 return 0;
9433
9434 if (!link->disconnected && link->detach)
9435 err = link->detach(link);
9436 if (link->destroy)
9437 link->destroy(link);
9438 if (link->pin_path)
9439 free(link->pin_path);
9440 free(link);
9441
9442 return err;
9443}
9444
9445int bpf_link__fd(const struct bpf_link *link)
9446{
9447 return link->fd;
9448}
9449
9450const char *bpf_link__pin_path(const struct bpf_link *link)
9451{
9452 return link->pin_path;
9453}
9454
9455static int bpf_link__detach_fd(struct bpf_link *link)
9456{
9457 return close(link->fd);
9458}
9459
9460struct bpf_link *bpf_link__open(const char *path)
9461{
9462 struct bpf_link *link;
9463 int fd;
9464
9465 fd = bpf_obj_get(path);
9466 if (fd < 0) {
9467 fd = -errno;
9468 pr_warn("failed to open link at %s: %d\n", path, fd);
9469 return ERR_PTR(fd);
9470 }
9471
9472 link = calloc(1, sizeof(*link));
9473 if (!link) {
9474 close(fd);
9475 return ERR_PTR(-ENOMEM);
9476 }
9477 link->detach = &bpf_link__detach_fd;
9478 link->fd = fd;
9479
9480 link->pin_path = strdup(path);
9481 if (!link->pin_path) {
9482 bpf_link__destroy(link);
9483 return ERR_PTR(-ENOMEM);
9484 }
9485
9486 return link;
9487}
9488
9489int bpf_link__detach(struct bpf_link *link)
9490{
9491 return bpf_link_detach(link->fd) ? -errno : 0;
9492}
9493
9494int bpf_link__pin(struct bpf_link *link, const char *path)
9495{
9496 int err;
9497
9498 if (link->pin_path)
9499 return -EBUSY;
9500 err = make_parent_dir(path);
9501 if (err)
9502 return err;
9503 err = check_path(path);
9504 if (err)
9505 return err;
9506
9507 link->pin_path = strdup(path);
9508 if (!link->pin_path)
9509 return -ENOMEM;
9510
9511 if (bpf_obj_pin(link->fd, link->pin_path)) {
9512 err = -errno;
9513 zfree(&link->pin_path);
9514 return err;
9515 }
9516
9517 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9518 return 0;
9519}
9520
9521int bpf_link__unpin(struct bpf_link *link)
9522{
9523 int err;
9524
9525 if (!link->pin_path)
9526 return -EINVAL;
9527
9528 err = unlink(link->pin_path);
9529 if (err != 0)
9530 return -errno;
9531
9532 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9533 zfree(&link->pin_path);
9534 return 0;
9535}
9536
9537static int bpf_link__detach_perf_event(struct bpf_link *link)
9538{
9539 int err;
9540
9541 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
9542 if (err)
9543 err = -errno;
9544
9545 close(link->fd);
9546 return err;
9547}
9548
9549struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
9550 int pfd)
9551{
9552 char errmsg[STRERR_BUFSIZE];
9553 struct bpf_link *link;
9554 int prog_fd, err;
9555
9556 if (pfd < 0) {
9557 pr_warn("prog '%s': invalid perf event FD %d\n",
9558 prog->name, pfd);
9559 return ERR_PTR(-EINVAL);
9560 }
9561 prog_fd = bpf_program__fd(prog);
9562 if (prog_fd < 0) {
9563 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9564 prog->name);
9565 return ERR_PTR(-EINVAL);
9566 }
9567
9568 link = calloc(1, sizeof(*link));
9569 if (!link)
9570 return ERR_PTR(-ENOMEM);
9571 link->detach = &bpf_link__detach_perf_event;
9572 link->fd = pfd;
9573
9574 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9575 err = -errno;
9576 free(link);
9577 pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
9578 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9579 if (err == -EPROTO)
9580 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9581 prog->name, pfd);
9582 return ERR_PTR(err);
9583 }
9584 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9585 err = -errno;
9586 free(link);
9587 pr_warn("prog '%s': failed to enable pfd %d: %s\n",
9588 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9589 return ERR_PTR(err);
9590 }
9591 return link;
9592}
9593
9594
9595
9596
9597
9598
9599static int parse_uint_from_file(const char *file, const char *fmt)
9600{
9601 char buf[STRERR_BUFSIZE];
9602 int err, ret;
9603 FILE *f;
9604
9605 f = fopen(file, "r");
9606 if (!f) {
9607 err = -errno;
9608 pr_debug("failed to open '%s': %s\n", file,
9609 libbpf_strerror_r(err, buf, sizeof(buf)));
9610 return err;
9611 }
9612 err = fscanf(f, fmt, &ret);
9613 if (err != 1) {
9614 err = err == EOF ? -EIO : -errno;
9615 pr_debug("failed to parse '%s': %s\n", file,
9616 libbpf_strerror_r(err, buf, sizeof(buf)));
9617 fclose(f);
9618 return err;
9619 }
9620 fclose(f);
9621 return ret;
9622}
9623
9624static int determine_kprobe_perf_type(void)
9625{
9626 const char *file = "/sys/bus/event_source/devices/kprobe/type";
9627
9628 return parse_uint_from_file(file, "%d\n");
9629}
9630
9631static int determine_uprobe_perf_type(void)
9632{
9633 const char *file = "/sys/bus/event_source/devices/uprobe/type";
9634
9635 return parse_uint_from_file(file, "%d\n");
9636}
9637
9638static int determine_kprobe_retprobe_bit(void)
9639{
9640 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9641
9642 return parse_uint_from_file(file, "config:%d\n");
9643}
9644
9645static int determine_uprobe_retprobe_bit(void)
9646{
9647 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9648
9649 return parse_uint_from_file(file, "config:%d\n");
9650}
9651
9652static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9653 uint64_t offset, int pid)
9654{
9655 struct perf_event_attr attr = {};
9656 char errmsg[STRERR_BUFSIZE];
9657 int type, pfd, err;
9658
9659 type = uprobe ? determine_uprobe_perf_type()
9660 : determine_kprobe_perf_type();
9661 if (type < 0) {
9662 pr_warn("failed to determine %s perf type: %s\n",
9663 uprobe ? "uprobe" : "kprobe",
9664 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9665 return type;
9666 }
9667 if (retprobe) {
9668 int bit = uprobe ? determine_uprobe_retprobe_bit()
9669 : determine_kprobe_retprobe_bit();
9670
9671 if (bit < 0) {
9672 pr_warn("failed to determine %s retprobe bit: %s\n",
9673 uprobe ? "uprobe" : "kprobe",
9674 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
9675 return bit;
9676 }
9677 attr.config |= 1 << bit;
9678 }
9679 attr.size = sizeof(attr);
9680 attr.type = type;
9681 attr.config1 = ptr_to_u64(name);
9682 attr.config2 = offset;
9683
9684
9685 pfd = syscall(__NR_perf_event_open, &attr,
9686 pid < 0 ? -1 : pid ,
9687 pid == -1 ? 0 : -1 ,
9688 -1 , PERF_FLAG_FD_CLOEXEC);
9689 if (pfd < 0) {
9690 err = -errno;
9691 pr_warn("%s perf_event_open() failed: %s\n",
9692 uprobe ? "uprobe" : "kprobe",
9693 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9694 return err;
9695 }
9696 return pfd;
9697}
9698
9699struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
9700 bool retprobe,
9701 const char *func_name)
9702{
9703 char errmsg[STRERR_BUFSIZE];
9704 struct bpf_link *link;
9705 int pfd, err;
9706
9707 pfd = perf_event_open_probe(false , retprobe, func_name,
9708 0 , -1 );
9709 if (pfd < 0) {
9710 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
9711 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9712 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9713 return ERR_PTR(pfd);
9714 }
9715 link = bpf_program__attach_perf_event(prog, pfd);
9716 if (IS_ERR(link)) {
9717 close(pfd);
9718 err = PTR_ERR(link);
9719 pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
9720 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9721 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9722 return link;
9723 }
9724 return link;
9725}
9726
9727static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
9728 struct bpf_program *prog)
9729{
9730 const char *func_name;
9731 bool retprobe;
9732
9733 func_name = prog->sec_name + sec->len;
9734 retprobe = strcmp(sec->sec, "kretprobe/") == 0;
9735
9736 return bpf_program__attach_kprobe(prog, retprobe, func_name);
9737}
9738
9739struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
9740 bool retprobe, pid_t pid,
9741 const char *binary_path,
9742 size_t func_offset)
9743{
9744 char errmsg[STRERR_BUFSIZE];
9745 struct bpf_link *link;
9746 int pfd, err;
9747
9748 pfd = perf_event_open_probe(true , retprobe,
9749 binary_path, func_offset, pid);
9750 if (pfd < 0) {
9751 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
9752 prog->name, retprobe ? "uretprobe" : "uprobe",
9753 binary_path, func_offset,
9754 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9755 return ERR_PTR(pfd);
9756 }
9757 link = bpf_program__attach_perf_event(prog, pfd);
9758 if (IS_ERR(link)) {
9759 close(pfd);
9760 err = PTR_ERR(link);
9761 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
9762 prog->name, retprobe ? "uretprobe" : "uprobe",
9763 binary_path, func_offset,
9764 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9765 return link;
9766 }
9767 return link;
9768}
9769
9770static int determine_tracepoint_id(const char *tp_category,
9771 const char *tp_name)
9772{
9773 char file[PATH_MAX];
9774 int ret;
9775
9776 ret = snprintf(file, sizeof(file),
9777 "/sys/kernel/debug/tracing/events/%s/%s/id",
9778 tp_category, tp_name);
9779 if (ret < 0)
9780 return -errno;
9781 if (ret >= sizeof(file)) {
9782 pr_debug("tracepoint %s/%s path is too long\n",
9783 tp_category, tp_name);
9784 return -E2BIG;
9785 }
9786 return parse_uint_from_file(file, "%d\n");
9787}
9788
9789static int perf_event_open_tracepoint(const char *tp_category,
9790 const char *tp_name)
9791{
9792 struct perf_event_attr attr = {};
9793 char errmsg[STRERR_BUFSIZE];
9794 int tp_id, pfd, err;
9795
9796 tp_id = determine_tracepoint_id(tp_category, tp_name);
9797 if (tp_id < 0) {
9798 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
9799 tp_category, tp_name,
9800 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
9801 return tp_id;
9802 }
9803
9804 attr.type = PERF_TYPE_TRACEPOINT;
9805 attr.size = sizeof(attr);
9806 attr.config = tp_id;
9807
9808 pfd = syscall(__NR_perf_event_open, &attr, -1 , 0 ,
9809 -1 , PERF_FLAG_FD_CLOEXEC);
9810 if (pfd < 0) {
9811 err = -errno;
9812 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
9813 tp_category, tp_name,
9814 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9815 return err;
9816 }
9817 return pfd;
9818}
9819
9820struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
9821 const char *tp_category,
9822 const char *tp_name)
9823{
9824 char errmsg[STRERR_BUFSIZE];
9825 struct bpf_link *link;
9826 int pfd, err;
9827
9828 pfd = perf_event_open_tracepoint(tp_category, tp_name);
9829 if (pfd < 0) {
9830 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
9831 prog->name, tp_category, tp_name,
9832 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9833 return ERR_PTR(pfd);
9834 }
9835 link = bpf_program__attach_perf_event(prog, pfd);
9836 if (IS_ERR(link)) {
9837 close(pfd);
9838 err = PTR_ERR(link);
9839 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
9840 prog->name, tp_category, tp_name,
9841 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9842 return link;
9843 }
9844 return link;
9845}
9846
9847static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
9848 struct bpf_program *prog)
9849{
9850 char *sec_name, *tp_cat, *tp_name;
9851 struct bpf_link *link;
9852
9853 sec_name = strdup(prog->sec_name);
9854 if (!sec_name)
9855 return ERR_PTR(-ENOMEM);
9856
9857
9858 tp_cat = sec_name + sec->len;
9859 tp_name = strchr(tp_cat, '/');
9860 if (!tp_name) {
9861 link = ERR_PTR(-EINVAL);
9862 goto out;
9863 }
9864 *tp_name = '\0';
9865 tp_name++;
9866
9867 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
9868out:
9869 free(sec_name);
9870 return link;
9871}
9872
9873struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
9874 const char *tp_name)
9875{
9876 char errmsg[STRERR_BUFSIZE];
9877 struct bpf_link *link;
9878 int prog_fd, pfd;
9879
9880 prog_fd = bpf_program__fd(prog);
9881 if (prog_fd < 0) {
9882 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9883 return ERR_PTR(-EINVAL);
9884 }
9885
9886 link = calloc(1, sizeof(*link));
9887 if (!link)
9888 return ERR_PTR(-ENOMEM);
9889 link->detach = &bpf_link__detach_fd;
9890
9891 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
9892 if (pfd < 0) {
9893 pfd = -errno;
9894 free(link);
9895 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
9896 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9897 return ERR_PTR(pfd);
9898 }
9899 link->fd = pfd;
9900 return link;
9901}
9902
9903static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
9904 struct bpf_program *prog)
9905{
9906 const char *tp_name = prog->sec_name + sec->len;
9907
9908 return bpf_program__attach_raw_tracepoint(prog, tp_name);
9909}
9910
9911
9912static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
9913{
9914 char errmsg[STRERR_BUFSIZE];
9915 struct bpf_link *link;
9916 int prog_fd, pfd;
9917
9918 prog_fd = bpf_program__fd(prog);
9919 if (prog_fd < 0) {
9920 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9921 return ERR_PTR(-EINVAL);
9922 }
9923
9924 link = calloc(1, sizeof(*link));
9925 if (!link)
9926 return ERR_PTR(-ENOMEM);
9927 link->detach = &bpf_link__detach_fd;
9928
9929 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
9930 if (pfd < 0) {
9931 pfd = -errno;
9932 free(link);
9933 pr_warn("prog '%s': failed to attach: %s\n",
9934 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9935 return ERR_PTR(pfd);
9936 }
9937 link->fd = pfd;
9938 return (struct bpf_link *)link;
9939}
9940
9941struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
9942{
9943 return bpf_program__attach_btf_id(prog);
9944}
9945
9946struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
9947{
9948 return bpf_program__attach_btf_id(prog);
9949}
9950
9951static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
9952 struct bpf_program *prog)
9953{
9954 return bpf_program__attach_trace(prog);
9955}
9956
9957static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
9958 struct bpf_program *prog)
9959{
9960 return bpf_program__attach_lsm(prog);
9961}
9962
9963static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
9964 struct bpf_program *prog)
9965{
9966 return bpf_program__attach_iter(prog, NULL);
9967}
9968
9969static struct bpf_link *
9970bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
9971 const char *target_name)
9972{
9973 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
9974 .target_btf_id = btf_id);
9975 enum bpf_attach_type attach_type;
9976 char errmsg[STRERR_BUFSIZE];
9977 struct bpf_link *link;
9978 int prog_fd, link_fd;
9979
9980 prog_fd = bpf_program__fd(prog);
9981 if (prog_fd < 0) {
9982 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9983 return ERR_PTR(-EINVAL);
9984 }
9985
9986 link = calloc(1, sizeof(*link));
9987 if (!link)
9988 return ERR_PTR(-ENOMEM);
9989 link->detach = &bpf_link__detach_fd;
9990
9991 attach_type = bpf_program__get_expected_attach_type(prog);
9992 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
9993 if (link_fd < 0) {
9994 link_fd = -errno;
9995 free(link);
9996 pr_warn("prog '%s': failed to attach to %s: %s\n",
9997 prog->name, target_name,
9998 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
9999 return ERR_PTR(link_fd);
10000 }
10001 link->fd = link_fd;
10002 return link;
10003}
10004
10005struct bpf_link *
10006bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
10007{
10008 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
10009}
10010
10011struct bpf_link *
10012bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
10013{
10014 return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
10015}
10016
10017struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
10018{
10019
10020 return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
10021}
10022
10023struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
10024 int target_fd,
10025 const char *attach_func_name)
10026{
10027 int btf_id;
10028
10029 if (!!target_fd != !!attach_func_name) {
10030 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10031 prog->name);
10032 return ERR_PTR(-EINVAL);
10033 }
10034
10035 if (prog->type != BPF_PROG_TYPE_EXT) {
10036 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10037 prog->name);
10038 return ERR_PTR(-EINVAL);
10039 }
10040
10041 if (target_fd) {
10042 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10043 if (btf_id < 0)
10044 return ERR_PTR(btf_id);
10045
10046 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10047 } else {
10048
10049
10050
10051 return bpf_program__attach_trace(prog);
10052 }
10053}
10054
10055struct bpf_link *
10056bpf_program__attach_iter(struct bpf_program *prog,
10057 const struct bpf_iter_attach_opts *opts)
10058{
10059 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
10060 char errmsg[STRERR_BUFSIZE];
10061 struct bpf_link *link;
10062 int prog_fd, link_fd;
10063 __u32 target_fd = 0;
10064
10065 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10066 return ERR_PTR(-EINVAL);
10067
10068 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10069 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
10070
10071 prog_fd = bpf_program__fd(prog);
10072 if (prog_fd < 0) {
10073 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10074 return ERR_PTR(-EINVAL);
10075 }
10076
10077 link = calloc(1, sizeof(*link));
10078 if (!link)
10079 return ERR_PTR(-ENOMEM);
10080 link->detach = &bpf_link__detach_fd;
10081
10082 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10083 &link_create_opts);
10084 if (link_fd < 0) {
10085 link_fd = -errno;
10086 free(link);
10087 pr_warn("prog '%s': failed to attach to iterator: %s\n",
10088 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10089 return ERR_PTR(link_fd);
10090 }
10091 link->fd = link_fd;
10092 return link;
10093}
10094
10095struct bpf_link *bpf_program__attach(struct bpf_program *prog)
10096{
10097 const struct bpf_sec_def *sec_def;
10098
10099 sec_def = find_sec_def(prog->sec_name);
10100 if (!sec_def || !sec_def->attach_fn)
10101 return ERR_PTR(-ESRCH);
10102
10103 return sec_def->attach_fn(sec_def, prog);
10104}
10105
10106static int bpf_link__detach_struct_ops(struct bpf_link *link)
10107{
10108 __u32 zero = 0;
10109
10110 if (bpf_map_delete_elem(link->fd, &zero))
10111 return -errno;
10112
10113 return 0;
10114}
10115
10116struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
10117{
10118 struct bpf_struct_ops *st_ops;
10119 struct bpf_link *link;
10120 __u32 i, zero = 0;
10121 int err;
10122
10123 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
10124 return ERR_PTR(-EINVAL);
10125
10126 link = calloc(1, sizeof(*link));
10127 if (!link)
10128 return ERR_PTR(-EINVAL);
10129
10130 st_ops = map->st_ops;
10131 for (i = 0; i < btf_vlen(st_ops->type); i++) {
10132 struct bpf_program *prog = st_ops->progs[i];
10133 void *kern_data;
10134 int prog_fd;
10135
10136 if (!prog)
10137 continue;
10138
10139 prog_fd = bpf_program__fd(prog);
10140 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
10141 *(unsigned long *)kern_data = prog_fd;
10142 }
10143
10144 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
10145 if (err) {
10146 err = -errno;
10147 free(link);
10148 return ERR_PTR(err);
10149 }
10150
10151 link->detach = bpf_link__detach_struct_ops;
10152 link->fd = map->fd;
10153
10154 return link;
10155}
10156
10157enum bpf_perf_event_ret
10158bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10159 void **copy_mem, size_t *copy_size,
10160 bpf_perf_event_print_t fn, void *private_data)
10161{
10162 struct perf_event_mmap_page *header = mmap_mem;
10163 __u64 data_head = ring_buffer_read_head(header);
10164 __u64 data_tail = header->data_tail;
10165 void *base = ((__u8 *)header) + page_size;
10166 int ret = LIBBPF_PERF_EVENT_CONT;
10167 struct perf_event_header *ehdr;
10168 size_t ehdr_size;
10169
10170 while (data_head != data_tail) {
10171 ehdr = base + (data_tail & (mmap_size - 1));
10172 ehdr_size = ehdr->size;
10173
10174 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
10175 void *copy_start = ehdr;
10176 size_t len_first = base + mmap_size - copy_start;
10177 size_t len_secnd = ehdr_size - len_first;
10178
10179 if (*copy_size < ehdr_size) {
10180 free(*copy_mem);
10181 *copy_mem = malloc(ehdr_size);
10182 if (!*copy_mem) {
10183 *copy_size = 0;
10184 ret = LIBBPF_PERF_EVENT_ERROR;
10185 break;
10186 }
10187 *copy_size = ehdr_size;
10188 }
10189
10190 memcpy(*copy_mem, copy_start, len_first);
10191 memcpy(*copy_mem + len_first, base, len_secnd);
10192 ehdr = *copy_mem;
10193 }
10194
10195 ret = fn(ehdr, private_data);
10196 data_tail += ehdr_size;
10197 if (ret != LIBBPF_PERF_EVENT_CONT)
10198 break;
10199 }
10200
10201 ring_buffer_write_tail(header, data_tail);
10202 return ret;
10203}
10204
10205struct perf_buffer;
10206
10207struct perf_buffer_params {
10208 struct perf_event_attr *attr;
10209
10210 perf_buffer_event_fn event_cb;
10211
10212 perf_buffer_sample_fn sample_cb;
10213 perf_buffer_lost_fn lost_cb;
10214 void *ctx;
10215 int cpu_cnt;
10216 int *cpus;
10217 int *map_keys;
10218};
10219
10220struct perf_cpu_buf {
10221 struct perf_buffer *pb;
10222 void *base;
10223 void *buf;
10224 size_t buf_size;
10225 int fd;
10226 int cpu;
10227 int map_key;
10228};
10229
10230struct perf_buffer {
10231 perf_buffer_event_fn event_cb;
10232 perf_buffer_sample_fn sample_cb;
10233 perf_buffer_lost_fn lost_cb;
10234 void *ctx;
10235
10236 size_t page_size;
10237 size_t mmap_size;
10238 struct perf_cpu_buf **cpu_bufs;
10239 struct epoll_event *events;
10240 int cpu_cnt;
10241 int epoll_fd;
10242 int map_fd;
10243};
10244
10245static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
10246 struct perf_cpu_buf *cpu_buf)
10247{
10248 if (!cpu_buf)
10249 return;
10250 if (cpu_buf->base &&
10251 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
10252 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
10253 if (cpu_buf->fd >= 0) {
10254 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
10255 close(cpu_buf->fd);
10256 }
10257 free(cpu_buf->buf);
10258 free(cpu_buf);
10259}
10260
10261void perf_buffer__free(struct perf_buffer *pb)
10262{
10263 int i;
10264
10265 if (IS_ERR_OR_NULL(pb))
10266 return;
10267 if (pb->cpu_bufs) {
10268 for (i = 0; i < pb->cpu_cnt; i++) {
10269 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10270
10271 if (!cpu_buf)
10272 continue;
10273
10274 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
10275 perf_buffer__free_cpu_buf(pb, cpu_buf);
10276 }
10277 free(pb->cpu_bufs);
10278 }
10279 if (pb->epoll_fd >= 0)
10280 close(pb->epoll_fd);
10281 free(pb->events);
10282 free(pb);
10283}
10284
10285static struct perf_cpu_buf *
10286perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
10287 int cpu, int map_key)
10288{
10289 struct perf_cpu_buf *cpu_buf;
10290 char msg[STRERR_BUFSIZE];
10291 int err;
10292
10293 cpu_buf = calloc(1, sizeof(*cpu_buf));
10294 if (!cpu_buf)
10295 return ERR_PTR(-ENOMEM);
10296
10297 cpu_buf->pb = pb;
10298 cpu_buf->cpu = cpu;
10299 cpu_buf->map_key = map_key;
10300
10301 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 , cpu,
10302 -1, PERF_FLAG_FD_CLOEXEC);
10303 if (cpu_buf->fd < 0) {
10304 err = -errno;
10305 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
10306 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10307 goto error;
10308 }
10309
10310 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
10311 PROT_READ | PROT_WRITE, MAP_SHARED,
10312 cpu_buf->fd, 0);
10313 if (cpu_buf->base == MAP_FAILED) {
10314 cpu_buf->base = NULL;
10315 err = -errno;
10316 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
10317 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10318 goto error;
10319 }
10320
10321 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10322 err = -errno;
10323 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
10324 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10325 goto error;
10326 }
10327
10328 return cpu_buf;
10329
10330error:
10331 perf_buffer__free_cpu_buf(pb, cpu_buf);
10332 return (struct perf_cpu_buf *)ERR_PTR(err);
10333}
10334
10335static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10336 struct perf_buffer_params *p);
10337
10338struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
10339 const struct perf_buffer_opts *opts)
10340{
10341 struct perf_buffer_params p = {};
10342 struct perf_event_attr attr = { 0, };
10343
10344 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
10345 attr.type = PERF_TYPE_SOFTWARE;
10346 attr.sample_type = PERF_SAMPLE_RAW;
10347 attr.sample_period = 1;
10348 attr.wakeup_events = 1;
10349
10350 p.attr = &attr;
10351 p.sample_cb = opts ? opts->sample_cb : NULL;
10352 p.lost_cb = opts ? opts->lost_cb : NULL;
10353 p.ctx = opts ? opts->ctx : NULL;
10354
10355 return __perf_buffer__new(map_fd, page_cnt, &p);
10356}
10357
10358struct perf_buffer *
10359perf_buffer__new_raw(int map_fd, size_t page_cnt,
10360 const struct perf_buffer_raw_opts *opts)
10361{
10362 struct perf_buffer_params p = {};
10363
10364 p.attr = opts->attr;
10365 p.event_cb = opts->event_cb;
10366 p.ctx = opts->ctx;
10367 p.cpu_cnt = opts->cpu_cnt;
10368 p.cpus = opts->cpus;
10369 p.map_keys = opts->map_keys;
10370
10371 return __perf_buffer__new(map_fd, page_cnt, &p);
10372}
10373
10374static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10375 struct perf_buffer_params *p)
10376{
10377 const char *online_cpus_file = "/sys/devices/system/cpu/online";
10378 struct bpf_map_info map;
10379 char msg[STRERR_BUFSIZE];
10380 struct perf_buffer *pb;
10381 bool *online = NULL;
10382 __u32 map_info_len;
10383 int err, i, j, n;
10384
10385 if (page_cnt & (page_cnt - 1)) {
10386 pr_warn("page count should be power of two, but is %zu\n",
10387 page_cnt);
10388 return ERR_PTR(-EINVAL);
10389 }
10390
10391
10392 memset(&map, 0, sizeof(map));
10393 map_info_len = sizeof(map);
10394 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
10395 if (err) {
10396 err = -errno;
10397
10398
10399
10400 if (err != -EINVAL) {
10401 pr_warn("failed to get map info for map FD %d: %s\n",
10402 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
10403 return ERR_PTR(err);
10404 }
10405 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
10406 map_fd);
10407 } else {
10408 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
10409 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
10410 map.name);
10411 return ERR_PTR(-EINVAL);
10412 }
10413 }
10414
10415 pb = calloc(1, sizeof(*pb));
10416 if (!pb)
10417 return ERR_PTR(-ENOMEM);
10418
10419 pb->event_cb = p->event_cb;
10420 pb->sample_cb = p->sample_cb;
10421 pb->lost_cb = p->lost_cb;
10422 pb->ctx = p->ctx;
10423
10424 pb->page_size = getpagesize();
10425 pb->mmap_size = pb->page_size * page_cnt;
10426 pb->map_fd = map_fd;
10427
10428 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
10429 if (pb->epoll_fd < 0) {
10430 err = -errno;
10431 pr_warn("failed to create epoll instance: %s\n",
10432 libbpf_strerror_r(err, msg, sizeof(msg)));
10433 goto error;
10434 }
10435
10436 if (p->cpu_cnt > 0) {
10437 pb->cpu_cnt = p->cpu_cnt;
10438 } else {
10439 pb->cpu_cnt = libbpf_num_possible_cpus();
10440 if (pb->cpu_cnt < 0) {
10441 err = pb->cpu_cnt;
10442 goto error;
10443 }
10444 if (map.max_entries && map.max_entries < pb->cpu_cnt)
10445 pb->cpu_cnt = map.max_entries;
10446 }
10447
10448 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
10449 if (!pb->events) {
10450 err = -ENOMEM;
10451 pr_warn("failed to allocate events: out of memory\n");
10452 goto error;
10453 }
10454 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
10455 if (!pb->cpu_bufs) {
10456 err = -ENOMEM;
10457 pr_warn("failed to allocate buffers: out of memory\n");
10458 goto error;
10459 }
10460
10461 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
10462 if (err) {
10463 pr_warn("failed to get online CPU mask: %d\n", err);
10464 goto error;
10465 }
10466
10467 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
10468 struct perf_cpu_buf *cpu_buf;
10469 int cpu, map_key;
10470
10471 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
10472 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
10473
10474
10475
10476
10477 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
10478 continue;
10479
10480 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
10481 if (IS_ERR(cpu_buf)) {
10482 err = PTR_ERR(cpu_buf);
10483 goto error;
10484 }
10485
10486 pb->cpu_bufs[j] = cpu_buf;
10487
10488 err = bpf_map_update_elem(pb->map_fd, &map_key,
10489 &cpu_buf->fd, 0);
10490 if (err) {
10491 err = -errno;
10492 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
10493 cpu, map_key, cpu_buf->fd,
10494 libbpf_strerror_r(err, msg, sizeof(msg)));
10495 goto error;
10496 }
10497
10498 pb->events[j].events = EPOLLIN;
10499 pb->events[j].data.ptr = cpu_buf;
10500 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
10501 &pb->events[j]) < 0) {
10502 err = -errno;
10503 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
10504 cpu, cpu_buf->fd,
10505 libbpf_strerror_r(err, msg, sizeof(msg)));
10506 goto error;
10507 }
10508 j++;
10509 }
10510 pb->cpu_cnt = j;
10511 free(online);
10512
10513 return pb;
10514
10515error:
10516 free(online);
10517 if (pb)
10518 perf_buffer__free(pb);
10519 return ERR_PTR(err);
10520}
10521
10522struct perf_sample_raw {
10523 struct perf_event_header header;
10524 uint32_t size;
10525 char data[];
10526};
10527
10528struct perf_sample_lost {
10529 struct perf_event_header header;
10530 uint64_t id;
10531 uint64_t lost;
10532 uint64_t sample_id;
10533};
10534
10535static enum bpf_perf_event_ret
10536perf_buffer__process_record(struct perf_event_header *e, void *ctx)
10537{
10538 struct perf_cpu_buf *cpu_buf = ctx;
10539 struct perf_buffer *pb = cpu_buf->pb;
10540 void *data = e;
10541
10542
10543 if (pb->event_cb)
10544 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
10545
10546 switch (e->type) {
10547 case PERF_RECORD_SAMPLE: {
10548 struct perf_sample_raw *s = data;
10549
10550 if (pb->sample_cb)
10551 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
10552 break;
10553 }
10554 case PERF_RECORD_LOST: {
10555 struct perf_sample_lost *s = data;
10556
10557 if (pb->lost_cb)
10558 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
10559 break;
10560 }
10561 default:
10562 pr_warn("unknown perf sample type %d\n", e->type);
10563 return LIBBPF_PERF_EVENT_ERROR;
10564 }
10565 return LIBBPF_PERF_EVENT_CONT;
10566}
10567
10568static int perf_buffer__process_records(struct perf_buffer *pb,
10569 struct perf_cpu_buf *cpu_buf)
10570{
10571 enum bpf_perf_event_ret ret;
10572
10573 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
10574 pb->page_size, &cpu_buf->buf,
10575 &cpu_buf->buf_size,
10576 perf_buffer__process_record, cpu_buf);
10577 if (ret != LIBBPF_PERF_EVENT_CONT)
10578 return ret;
10579 return 0;
10580}
10581
10582int perf_buffer__epoll_fd(const struct perf_buffer *pb)
10583{
10584 return pb->epoll_fd;
10585}
10586
10587int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
10588{
10589 int i, cnt, err;
10590
10591 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
10592 for (i = 0; i < cnt; i++) {
10593 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
10594
10595 err = perf_buffer__process_records(pb, cpu_buf);
10596 if (err) {
10597 pr_warn("error while processing records: %d\n", err);
10598 return err;
10599 }
10600 }
10601 return cnt < 0 ? -errno : cnt;
10602}
10603
10604
10605
10606
10607size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
10608{
10609 return pb->cpu_cnt;
10610}
10611
10612
10613
10614
10615
10616
10617int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
10618{
10619 struct perf_cpu_buf *cpu_buf;
10620
10621 if (buf_idx >= pb->cpu_cnt)
10622 return -EINVAL;
10623
10624 cpu_buf = pb->cpu_bufs[buf_idx];
10625 if (!cpu_buf)
10626 return -ENOENT;
10627
10628 return cpu_buf->fd;
10629}
10630
10631
10632
10633
10634
10635
10636
10637
10638
10639int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
10640{
10641 struct perf_cpu_buf *cpu_buf;
10642
10643 if (buf_idx >= pb->cpu_cnt)
10644 return -EINVAL;
10645
10646 cpu_buf = pb->cpu_bufs[buf_idx];
10647 if (!cpu_buf)
10648 return -ENOENT;
10649
10650 return perf_buffer__process_records(pb, cpu_buf);
10651}
10652
10653int perf_buffer__consume(struct perf_buffer *pb)
10654{
10655 int i, err;
10656
10657 for (i = 0; i < pb->cpu_cnt; i++) {
10658 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10659
10660 if (!cpu_buf)
10661 continue;
10662
10663 err = perf_buffer__process_records(pb, cpu_buf);
10664 if (err) {
10665 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
10666 return err;
10667 }
10668 }
10669 return 0;
10670}
10671
10672struct bpf_prog_info_array_desc {
10673 int array_offset;
10674 int count_offset;
10675 int size_offset;
10676
10677
10678};
10679
10680static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
10681 [BPF_PROG_INFO_JITED_INSNS] = {
10682 offsetof(struct bpf_prog_info, jited_prog_insns),
10683 offsetof(struct bpf_prog_info, jited_prog_len),
10684 -1,
10685 },
10686 [BPF_PROG_INFO_XLATED_INSNS] = {
10687 offsetof(struct bpf_prog_info, xlated_prog_insns),
10688 offsetof(struct bpf_prog_info, xlated_prog_len),
10689 -1,
10690 },
10691 [BPF_PROG_INFO_MAP_IDS] = {
10692 offsetof(struct bpf_prog_info, map_ids),
10693 offsetof(struct bpf_prog_info, nr_map_ids),
10694 -(int)sizeof(__u32),
10695 },
10696 [BPF_PROG_INFO_JITED_KSYMS] = {
10697 offsetof(struct bpf_prog_info, jited_ksyms),
10698 offsetof(struct bpf_prog_info, nr_jited_ksyms),
10699 -(int)sizeof(__u64),
10700 },
10701 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
10702 offsetof(struct bpf_prog_info, jited_func_lens),
10703 offsetof(struct bpf_prog_info, nr_jited_func_lens),
10704 -(int)sizeof(__u32),
10705 },
10706 [BPF_PROG_INFO_FUNC_INFO] = {
10707 offsetof(struct bpf_prog_info, func_info),
10708 offsetof(struct bpf_prog_info, nr_func_info),
10709 offsetof(struct bpf_prog_info, func_info_rec_size),
10710 },
10711 [BPF_PROG_INFO_LINE_INFO] = {
10712 offsetof(struct bpf_prog_info, line_info),
10713 offsetof(struct bpf_prog_info, nr_line_info),
10714 offsetof(struct bpf_prog_info, line_info_rec_size),
10715 },
10716 [BPF_PROG_INFO_JITED_LINE_INFO] = {
10717 offsetof(struct bpf_prog_info, jited_line_info),
10718 offsetof(struct bpf_prog_info, nr_jited_line_info),
10719 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
10720 },
10721 [BPF_PROG_INFO_PROG_TAGS] = {
10722 offsetof(struct bpf_prog_info, prog_tags),
10723 offsetof(struct bpf_prog_info, nr_prog_tags),
10724 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10725 },
10726
10727};
10728
10729static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
10730 int offset)
10731{
10732 __u32 *array = (__u32 *)info;
10733
10734 if (offset >= 0)
10735 return array[offset / sizeof(__u32)];
10736 return -(int)offset;
10737}
10738
10739static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
10740 int offset)
10741{
10742 __u64 *array = (__u64 *)info;
10743
10744 if (offset >= 0)
10745 return array[offset / sizeof(__u64)];
10746 return -(int)offset;
10747}
10748
10749static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
10750 __u32 val)
10751{
10752 __u32 *array = (__u32 *)info;
10753
10754 if (offset >= 0)
10755 array[offset / sizeof(__u32)] = val;
10756}
10757
10758static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
10759 __u64 val)
10760{
10761 __u64 *array = (__u64 *)info;
10762
10763 if (offset >= 0)
10764 array[offset / sizeof(__u64)] = val;
10765}
10766
10767struct bpf_prog_info_linear *
10768bpf_program__get_prog_info_linear(int fd, __u64 arrays)
10769{
10770 struct bpf_prog_info_linear *info_linear;
10771 struct bpf_prog_info info = {};
10772 __u32 info_len = sizeof(info);
10773 __u32 data_len = 0;
10774 int i, err;
10775 void *ptr;
10776
10777 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
10778 return ERR_PTR(-EINVAL);
10779
10780
10781 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
10782 if (err) {
10783 pr_debug("can't get prog info: %s", strerror(errno));
10784 return ERR_PTR(-EFAULT);
10785 }
10786
10787
10788 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10789 bool include_array = (arrays & (1UL << i)) > 0;
10790 struct bpf_prog_info_array_desc *desc;
10791 __u32 count, size;
10792
10793 desc = bpf_prog_info_array_desc + i;
10794
10795
10796 if (info_len < desc->array_offset + sizeof(__u32) ||
10797 info_len < desc->count_offset + sizeof(__u32) ||
10798 (desc->size_offset > 0 && info_len < desc->size_offset))
10799 include_array = false;
10800
10801 if (!include_array) {
10802 arrays &= ~(1UL << i);
10803 continue;
10804 }
10805
10806 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10807 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10808
10809 data_len += count * size;
10810 }
10811
10812
10813 data_len = roundup(data_len, sizeof(__u64));
10814 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
10815 if (!info_linear)
10816 return ERR_PTR(-ENOMEM);
10817
10818
10819 info_linear->arrays = arrays;
10820 memset(&info_linear->info, 0, sizeof(info));
10821 ptr = info_linear->data;
10822
10823 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10824 struct bpf_prog_info_array_desc *desc;
10825 __u32 count, size;
10826
10827 if ((arrays & (1UL << i)) == 0)
10828 continue;
10829
10830 desc = bpf_prog_info_array_desc + i;
10831 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10832 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10833 bpf_prog_info_set_offset_u32(&info_linear->info,
10834 desc->count_offset, count);
10835 bpf_prog_info_set_offset_u32(&info_linear->info,
10836 desc->size_offset, size);
10837 bpf_prog_info_set_offset_u64(&info_linear->info,
10838 desc->array_offset,
10839 ptr_to_u64(ptr));
10840 ptr += count * size;
10841 }
10842
10843
10844 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
10845 if (err) {
10846 pr_debug("can't get prog info: %s", strerror(errno));
10847 free(info_linear);
10848 return ERR_PTR(-EFAULT);
10849 }
10850
10851
10852 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10853 struct bpf_prog_info_array_desc *desc;
10854 __u32 v1, v2;
10855
10856 if ((arrays & (1UL << i)) == 0)
10857 continue;
10858
10859 desc = bpf_prog_info_array_desc + i;
10860 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10861 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10862 desc->count_offset);
10863 if (v1 != v2)
10864 pr_warn("%s: mismatch in element count\n", __func__);
10865
10866 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10867 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10868 desc->size_offset);
10869 if (v1 != v2)
10870 pr_warn("%s: mismatch in rec size\n", __func__);
10871 }
10872
10873
10874 info_linear->info_len = sizeof(struct bpf_prog_info);
10875 info_linear->data_len = data_len;
10876
10877 return info_linear;
10878}
10879
10880void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
10881{
10882 int i;
10883
10884 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10885 struct bpf_prog_info_array_desc *desc;
10886 __u64 addr, offs;
10887
10888 if ((info_linear->arrays & (1UL << i)) == 0)
10889 continue;
10890
10891 desc = bpf_prog_info_array_desc + i;
10892 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
10893 desc->array_offset);
10894 offs = addr - ptr_to_u64(info_linear->data);
10895 bpf_prog_info_set_offset_u64(&info_linear->info,
10896 desc->array_offset, offs);
10897 }
10898}
10899
10900void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
10901{
10902 int i;
10903
10904 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10905 struct bpf_prog_info_array_desc *desc;
10906 __u64 addr, offs;
10907
10908 if ((info_linear->arrays & (1UL << i)) == 0)
10909 continue;
10910
10911 desc = bpf_prog_info_array_desc + i;
10912 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
10913 desc->array_offset);
10914 addr = offs + ptr_to_u64(info_linear->data);
10915 bpf_prog_info_set_offset_u64(&info_linear->info,
10916 desc->array_offset, addr);
10917 }
10918}
10919
10920int bpf_program__set_attach_target(struct bpf_program *prog,
10921 int attach_prog_fd,
10922 const char *attach_func_name)
10923{
10924 int btf_obj_fd = 0, btf_id = 0, err;
10925
10926 if (!prog || attach_prog_fd < 0 || !attach_func_name)
10927 return -EINVAL;
10928
10929 if (prog->obj->loaded)
10930 return -EINVAL;
10931
10932 if (attach_prog_fd) {
10933 btf_id = libbpf_find_prog_btf_id(attach_func_name,
10934 attach_prog_fd);
10935 if (btf_id < 0)
10936 return btf_id;
10937 } else {
10938
10939 err = bpf_object__load_vmlinux_btf(prog->obj, true);
10940 if (err)
10941 return err;
10942 err = find_kernel_btf_id(prog->obj, attach_func_name,
10943 prog->expected_attach_type,
10944 &btf_obj_fd, &btf_id);
10945 if (err)
10946 return err;
10947 }
10948
10949 prog->attach_btf_id = btf_id;
10950 prog->attach_btf_obj_fd = btf_obj_fd;
10951 prog->attach_prog_fd = attach_prog_fd;
10952 return 0;
10953}
10954
10955int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
10956{
10957 int err = 0, n, len, start, end = -1;
10958 bool *tmp;
10959
10960 *mask = NULL;
10961 *mask_sz = 0;
10962
10963
10964 while (*s) {
10965 if (*s == ',' || *s == '\n') {
10966 s++;
10967 continue;
10968 }
10969 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
10970 if (n <= 0 || n > 2) {
10971 pr_warn("Failed to get CPU range %s: %d\n", s, n);
10972 err = -EINVAL;
10973 goto cleanup;
10974 } else if (n == 1) {
10975 end = start;
10976 }
10977 if (start < 0 || start > end) {
10978 pr_warn("Invalid CPU range [%d,%d] in %s\n",
10979 start, end, s);
10980 err = -EINVAL;
10981 goto cleanup;
10982 }
10983 tmp = realloc(*mask, end + 1);
10984 if (!tmp) {
10985 err = -ENOMEM;
10986 goto cleanup;
10987 }
10988 *mask = tmp;
10989 memset(tmp + *mask_sz, 0, start - *mask_sz);
10990 memset(tmp + start, 1, end - start + 1);
10991 *mask_sz = end + 1;
10992 s += len;
10993 }
10994 if (!*mask_sz) {
10995 pr_warn("Empty CPU range\n");
10996 return -EINVAL;
10997 }
10998 return 0;
10999cleanup:
11000 free(*mask);
11001 *mask = NULL;
11002 return err;
11003}
11004
11005int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
11006{
11007 int fd, err = 0, len;
11008 char buf[128];
11009
11010 fd = open(fcpu, O_RDONLY);
11011 if (fd < 0) {
11012 err = -errno;
11013 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
11014 return err;
11015 }
11016 len = read(fd, buf, sizeof(buf));
11017 close(fd);
11018 if (len <= 0) {
11019 err = len ? -errno : -EINVAL;
11020 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
11021 return err;
11022 }
11023 if (len >= sizeof(buf)) {
11024 pr_warn("CPU mask is too big in file %s\n", fcpu);
11025 return -E2BIG;
11026 }
11027 buf[len] = '\0';
11028
11029 return parse_cpu_mask_str(buf, mask, mask_sz);
11030}
11031
11032int libbpf_num_possible_cpus(void)
11033{
11034 static const char *fcpu = "/sys/devices/system/cpu/possible";
11035 static int cpus;
11036 int err, n, i, tmp_cpus;
11037 bool *mask;
11038
11039 tmp_cpus = READ_ONCE(cpus);
11040 if (tmp_cpus > 0)
11041 return tmp_cpus;
11042
11043 err = parse_cpu_mask_file(fcpu, &mask, &n);
11044 if (err)
11045 return err;
11046
11047 tmp_cpus = 0;
11048 for (i = 0; i < n; i++) {
11049 if (mask[i])
11050 tmp_cpus++;
11051 }
11052 free(mask);
11053
11054 WRITE_ONCE(cpus, tmp_cpus);
11055 return tmp_cpus;
11056}
11057
11058int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
11059 const struct bpf_object_open_opts *opts)
11060{
11061 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
11062 .object_name = s->name,
11063 );
11064 struct bpf_object *obj;
11065 int i;
11066
11067
11068
11069
11070
11071
11072
11073 if (opts) {
11074 memcpy(&skel_opts, opts, sizeof(*opts));
11075 if (!opts->object_name)
11076 skel_opts.object_name = s->name;
11077 }
11078
11079 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
11080 if (IS_ERR(obj)) {
11081 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
11082 s->name, PTR_ERR(obj));
11083 return PTR_ERR(obj);
11084 }
11085
11086 *s->obj = obj;
11087
11088 for (i = 0; i < s->map_cnt; i++) {
11089 struct bpf_map **map = s->maps[i].map;
11090 const char *name = s->maps[i].name;
11091 void **mmaped = s->maps[i].mmaped;
11092
11093 *map = bpf_object__find_map_by_name(obj, name);
11094 if (!*map) {
11095 pr_warn("failed to find skeleton map '%s'\n", name);
11096 return -ESRCH;
11097 }
11098
11099
11100 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
11101 *mmaped = (*map)->mmaped;
11102 }
11103
11104 for (i = 0; i < s->prog_cnt; i++) {
11105 struct bpf_program **prog = s->progs[i].prog;
11106 const char *name = s->progs[i].name;
11107
11108 *prog = bpf_object__find_program_by_name(obj, name);
11109 if (!*prog) {
11110 pr_warn("failed to find skeleton program '%s'\n", name);
11111 return -ESRCH;
11112 }
11113 }
11114
11115 return 0;
11116}
11117
11118int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
11119{
11120 int i, err;
11121
11122 err = bpf_object__load(*s->obj);
11123 if (err) {
11124 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
11125 return err;
11126 }
11127
11128 for (i = 0; i < s->map_cnt; i++) {
11129 struct bpf_map *map = *s->maps[i].map;
11130 size_t mmap_sz = bpf_map_mmap_sz(map);
11131 int prot, map_fd = bpf_map__fd(map);
11132 void **mmaped = s->maps[i].mmaped;
11133
11134 if (!mmaped)
11135 continue;
11136
11137 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
11138 *mmaped = NULL;
11139 continue;
11140 }
11141
11142 if (map->def.map_flags & BPF_F_RDONLY_PROG)
11143 prot = PROT_READ;
11144 else
11145 prot = PROT_READ | PROT_WRITE;
11146
11147
11148
11149
11150
11151
11152
11153
11154
11155
11156
11157 *mmaped = mmap(map->mmaped, mmap_sz, prot,
11158 MAP_SHARED | MAP_FIXED, map_fd, 0);
11159 if (*mmaped == MAP_FAILED) {
11160 err = -errno;
11161 *mmaped = NULL;
11162 pr_warn("failed to re-mmap() map '%s': %d\n",
11163 bpf_map__name(map), err);
11164 return err;
11165 }
11166 }
11167
11168 return 0;
11169}
11170
11171int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
11172{
11173 int i;
11174
11175 for (i = 0; i < s->prog_cnt; i++) {
11176 struct bpf_program *prog = *s->progs[i].prog;
11177 struct bpf_link **link = s->progs[i].link;
11178 const struct bpf_sec_def *sec_def;
11179
11180 if (!prog->load)
11181 continue;
11182
11183 sec_def = find_sec_def(prog->sec_name);
11184 if (!sec_def || !sec_def->attach_fn)
11185 continue;
11186
11187 *link = sec_def->attach_fn(sec_def, prog);
11188 if (IS_ERR(*link)) {
11189 pr_warn("failed to auto-attach program '%s': %ld\n",
11190 bpf_program__name(prog), PTR_ERR(*link));
11191 return PTR_ERR(*link);
11192 }
11193 }
11194
11195 return 0;
11196}
11197
11198void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
11199{
11200 int i;
11201
11202 for (i = 0; i < s->prog_cnt; i++) {
11203 struct bpf_link **link = s->progs[i].link;
11204
11205 bpf_link__destroy(*link);
11206 *link = NULL;
11207 }
11208}
11209
11210void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
11211{
11212 if (s->progs)
11213 bpf_object__detach_skeleton(s);
11214 if (s->obj)
11215 bpf_object__close(*s->obj);
11216 free(s->maps);
11217 free(s->progs);
11218 free(s);
11219}
11220
11221
11222
11223extern const char *bpf_map__get_pin_path_v0_0_6(const struct bpf_map *map)
11224 __attribute__((alias("bpf_map__get_pin_path_v0_0_4")));
11225
11226extern bool bpf_map__is_pinned_v0_0_6(const struct bpf_map *map)
11227 __attribute__((alias("bpf_map__is_pinned_v0_0_4")));
11228
11229extern int bpf_map__set_pin_path_v0_0_6(struct bpf_map *map, const char *path)
11230 __attribute__((alias("bpf_map__set_pin_path_v0_0_4")));
11231
11232extern struct bpf_object *
11233bpf_object__open_file_v0_0_6(const char *path,
11234 const struct bpf_object_open_opts *opts)
11235 __attribute__((alias("bpf_object__open_file_v0_0_4")));
11236extern struct bpf_object *
11237bpf_object__open_mem_v0_0_6(const void *obj_buf, size_t obj_buf_sz,
11238 const struct bpf_object_open_opts *opts)
11239 __attribute__((alias("bpf_object__open_mem_v0_0_4")));
11240extern enum bpf_attach_type
11241bpf_program__get_expected_attach_type_v0_0_6(struct bpf_program *prog)
11242 __attribute__((alias("bpf_program__get_expected_attach_type_v0_0_4")));
11243
11244extern enum bpf_prog_type
11245bpf_program__get_type_v0_0_6(struct bpf_program *prog)
11246 __attribute__((alias("bpf_program__get_type_v0_0_4")));
11247
11248extern size_t bpf_program__size_v0_0_6(const struct bpf_program *prog)
11249 __attribute__((alias("bpf_program__size_v0_0_4")));
11250
11251COMPAT_VERSION(bpf_map__get_pin_path_v0_0_4,
11252 bpf_map__get_pin_path, LIBBPF_0.0.4)
11253DEFAULT_VERSION(bpf_map__get_pin_path_v0_0_6,
11254 bpf_map__get_pin_path, LIBBPF_0.0.6)
11255
11256COMPAT_VERSION(bpf_map__is_pinned_v0_0_4,
11257 bpf_map__is_pinned, LIBBPF_0.0.4)
11258DEFAULT_VERSION(bpf_map__is_pinned_v0_0_6,
11259 bpf_map__is_pinned, LIBBPF_0.0.6)
11260
11261COMPAT_VERSION(bpf_map__set_pin_path_v0_0_4,
11262 bpf_map__set_pin_path, LIBBPF_0.0.4)
11263DEFAULT_VERSION(bpf_map__set_pin_path_v0_0_6,
11264 bpf_map__set_pin_path, LIBBPF_0.0.6)
11265
11266COMPAT_VERSION(bpf_object__open_file_v0_0_4,
11267 bpf_object__open_file, LIBBPF_0.0.4)
11268DEFAULT_VERSION(bpf_object__open_file_v0_0_6,
11269 bpf_object__open_file, LIBBPF_0.0.6)
11270
11271COMPAT_VERSION(bpf_object__open_mem_v0_0_4,
11272 bpf_object__open_mem, LIBBPF_0.0.4)
11273DEFAULT_VERSION(bpf_object__open_mem_v0_0_6,
11274 bpf_object__open_mem, LIBBPF_0.0.6)
11275
11276COMPAT_VERSION(bpf_program__get_expected_attach_type_v0_0_4,
11277 bpf_program__get_expected_attach_type, LIBBPF_0.0.4)
11278DEFAULT_VERSION(bpf_program__get_expected_attach_type_v0_0_6,
11279 bpf_program__get_expected_attach_type, LIBBPF_0.0.6)
11280
11281COMPAT_VERSION(bpf_program__get_type_v0_0_4,
11282 bpf_program__get_type, LIBBPF_0.0.4)
11283DEFAULT_VERSION(bpf_program__get_type_v0_0_6,
11284 bpf_program__get_type, LIBBPF_0.0.6)
11285
11286COMPAT_VERSION(bpf_program__size_v0_0_4,
11287 bpf_program__size, LIBBPF_0.0.4)
11288DEFAULT_VERSION(bpf_program__size_v0_0_6,
11289 bpf_program__size, LIBBPF_0.0.6)
11290