1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _GNU_SOURCE
14#define _GNU_SOURCE
15#endif
16#include <stdlib.h>
17#include <stdio.h>
18#include <stdarg.h>
19#include <libgen.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <string.h>
23#include <unistd.h>
24#include <endian.h>
25#include <fcntl.h>
26#include <errno.h>
27#include <ctype.h>
28#include <asm/unistd.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
31#include <linux/bpf.h>
32#include <linux/btf.h>
33#include <linux/filter.h>
34#include <linux/list.h>
35#include <linux/limits.h>
36#include <linux/perf_event.h>
37#include <linux/ring_buffer.h>
38#include <linux/version.h>
39#include <sys/epoll.h>
40#include <sys/ioctl.h>
41#include <sys/mman.h>
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/vfs.h>
45#include <sys/utsname.h>
46#include <sys/resource.h>
47#include <libelf.h>
48#include <gelf.h>
49#include <zlib.h>
50
51#include "libbpf.h"
52#include "bpf.h"
53#include "btf.h"
54#include "str_error.h"
55#include "libbpf_internal.h"
56#include "hashmap.h"
57
58#ifndef EM_BPF
59#define EM_BPF 247
60#endif
61
62#ifndef BPF_FS_MAGIC
63#define BPF_FS_MAGIC 0xcafe4a11
64#endif
65
66#define BPF_INSN_SZ (sizeof(struct bpf_insn))
67
68
69
70
71#pragma GCC diagnostic ignored "-Wformat-nonliteral"
72
73#define __printf(a, b) __attribute__((format(printf, a, b)))
74
75static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
76static const struct btf_type *
77skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
78
79static int __base_pr(enum libbpf_print_level level, const char *format,
80 va_list args)
81{
82 if (level == LIBBPF_DEBUG)
83 return 0;
84
85 return vfprintf(stderr, format, args);
86}
87
88static libbpf_print_fn_t __libbpf_pr = __base_pr;
89
90libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
91{
92 libbpf_print_fn_t old_print_fn = __libbpf_pr;
93
94 __libbpf_pr = fn;
95 return old_print_fn;
96}
97
98__printf(2, 3)
99void libbpf_print(enum libbpf_print_level level, const char *format, ...)
100{
101 va_list args;
102
103 if (!__libbpf_pr)
104 return;
105
106 va_start(args, format);
107 __libbpf_pr(level, format, args);
108 va_end(args);
109}
110
111static void pr_perm_msg(int err)
112{
113 struct rlimit limit;
114 char buf[100];
115
116 if (err != -EPERM || geteuid() != 0)
117 return;
118
119 err = getrlimit(RLIMIT_MEMLOCK, &limit);
120 if (err)
121 return;
122
123 if (limit.rlim_cur == RLIM_INFINITY)
124 return;
125
126 if (limit.rlim_cur < 1024)
127 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
128 else if (limit.rlim_cur < 1024*1024)
129 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
130 else
131 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
132
133 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
134 buf);
135}
136
137#define STRERR_BUFSIZE 128
138
139
140#ifndef zfree
141# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
142#endif
143
144#ifndef zclose
145# define zclose(fd) ({ \
146 int ___err = 0; \
147 if ((fd) >= 0) \
148 ___err = close((fd)); \
149 fd = -1; \
150 ___err; })
151#endif
152
153static inline __u64 ptr_to_u64(const void *ptr)
154{
155 return (__u64) (unsigned long) ptr;
156}
157
158enum kern_feature_id {
159
160 FEAT_PROG_NAME,
161
162 FEAT_GLOBAL_DATA,
163
164 FEAT_BTF,
165
166 FEAT_BTF_FUNC,
167
168 FEAT_BTF_DATASEC,
169
170 FEAT_BTF_GLOBAL_FUNC,
171
172 FEAT_ARRAY_MMAP,
173
174 FEAT_EXP_ATTACH_TYPE,
175
176 FEAT_PROBE_READ_KERN,
177
178 FEAT_PROG_BIND_MAP,
179
180 FEAT_MODULE_BTF,
181 __FEAT_CNT,
182};
183
184static bool kernel_supports(enum kern_feature_id feat_id);
185
186enum reloc_type {
187 RELO_LD64,
188 RELO_CALL,
189 RELO_DATA,
190 RELO_EXTERN,
191};
192
193struct reloc_desc {
194 enum reloc_type type;
195 int insn_idx;
196 int map_idx;
197 int sym_off;
198 bool processed;
199};
200
201struct bpf_sec_def;
202
203typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
204 struct bpf_program *prog);
205
206struct bpf_sec_def {
207 const char *sec;
208 size_t len;
209 enum bpf_prog_type prog_type;
210 enum bpf_attach_type expected_attach_type;
211 bool is_exp_attach_type_optional;
212 bool is_attachable;
213 bool is_attach_btf;
214 bool is_sleepable;
215 attach_fn_t attach_fn;
216};
217
218
219
220
221
222struct bpf_program {
223 const struct bpf_sec_def *sec_def;
224 char *sec_name;
225 size_t sec_idx;
226
227
228
229 size_t sec_insn_off;
230
231
232
233
234 size_t sec_insn_cnt;
235
236
237
238
239
240
241
242
243 size_t sub_insn_off;
244
245 char *name;
246
247
248
249 char *pin_name;
250
251
252
253
254
255
256 struct bpf_insn *insns;
257
258
259
260
261 size_t insns_cnt;
262
263 struct reloc_desc *reloc_desc;
264 int nr_reloc;
265 int log_level;
266
267 struct {
268 int nr;
269 int *fds;
270 } instances;
271 bpf_program_prep_t preprocessor;
272
273 struct bpf_object *obj;
274 void *priv;
275 bpf_program_clear_priv_t clear_priv;
276
277 bool load;
278 enum bpf_prog_type type;
279 enum bpf_attach_type expected_attach_type;
280 int prog_ifindex;
281 __u32 attach_btf_obj_fd;
282 __u32 attach_btf_id;
283 __u32 attach_prog_fd;
284 void *func_info;
285 __u32 func_info_rec_size;
286 __u32 func_info_cnt;
287
288 void *line_info;
289 __u32 line_info_rec_size;
290 __u32 line_info_cnt;
291 __u32 prog_flags;
292};
293
294struct bpf_struct_ops {
295 const char *tname;
296 const struct btf_type *type;
297 struct bpf_program **progs;
298 __u32 *kern_func_off;
299
300 void *data;
301
302
303
304
305
306
307
308
309
310
311 void *kern_vdata;
312 __u32 type_id;
313};
314
315#define DATA_SEC ".data"
316#define BSS_SEC ".bss"
317#define RODATA_SEC ".rodata"
318#define KCONFIG_SEC ".kconfig"
319#define KSYMS_SEC ".ksyms"
320#define STRUCT_OPS_SEC ".struct_ops"
321
322enum libbpf_map_type {
323 LIBBPF_MAP_UNSPEC,
324 LIBBPF_MAP_DATA,
325 LIBBPF_MAP_BSS,
326 LIBBPF_MAP_RODATA,
327 LIBBPF_MAP_KCONFIG,
328};
329
330static const char * const libbpf_type_to_btf_name[] = {
331 [LIBBPF_MAP_DATA] = DATA_SEC,
332 [LIBBPF_MAP_BSS] = BSS_SEC,
333 [LIBBPF_MAP_RODATA] = RODATA_SEC,
334 [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
335};
336
337struct bpf_map {
338 char *name;
339 int fd;
340 int sec_idx;
341 size_t sec_offset;
342 int map_ifindex;
343 int inner_map_fd;
344 struct bpf_map_def def;
345 __u32 numa_node;
346 __u32 btf_var_idx;
347 __u32 btf_key_type_id;
348 __u32 btf_value_type_id;
349 __u32 btf_vmlinux_value_type_id;
350 void *priv;
351 bpf_map_clear_priv_t clear_priv;
352 enum libbpf_map_type libbpf_type;
353 void *mmaped;
354 struct bpf_struct_ops *st_ops;
355 struct bpf_map *inner_map;
356 void **init_slots;
357 int init_slots_sz;
358 char *pin_path;
359 bool pinned;
360 bool reused;
361};
362
363enum extern_type {
364 EXT_UNKNOWN,
365 EXT_KCFG,
366 EXT_KSYM,
367};
368
369enum kcfg_type {
370 KCFG_UNKNOWN,
371 KCFG_CHAR,
372 KCFG_BOOL,
373 KCFG_INT,
374 KCFG_TRISTATE,
375 KCFG_CHAR_ARR,
376};
377
378struct extern_desc {
379 enum extern_type type;
380 int sym_idx;
381 int btf_id;
382 int sec_btf_id;
383 const char *name;
384 bool is_set;
385 bool is_weak;
386 union {
387 struct {
388 enum kcfg_type type;
389 int sz;
390 int align;
391 int data_off;
392 bool is_signed;
393 } kcfg;
394 struct {
395 unsigned long long addr;
396
397
398 int kernel_btf_obj_fd;
399 int kernel_btf_id;
400
401
402 __u32 type_id;
403 } ksym;
404 };
405};
406
407static LIST_HEAD(bpf_objects_list);
408
409struct module_btf {
410 struct btf *btf;
411 char *name;
412 __u32 id;
413 int fd;
414};
415
416struct bpf_object {
417 char name[BPF_OBJ_NAME_LEN];
418 char license[64];
419 __u32 kern_version;
420
421 struct bpf_program *programs;
422 size_t nr_programs;
423 struct bpf_map *maps;
424 size_t nr_maps;
425 size_t maps_cap;
426
427 char *kconfig;
428 struct extern_desc *externs;
429 int nr_extern;
430 int kconfig_map_idx;
431 int rodata_map_idx;
432
433 bool loaded;
434 bool has_subcalls;
435
436
437
438
439
440 struct {
441 int fd;
442 const void *obj_buf;
443 size_t obj_buf_sz;
444 Elf *elf;
445 GElf_Ehdr ehdr;
446 Elf_Data *symbols;
447 Elf_Data *data;
448 Elf_Data *rodata;
449 Elf_Data *bss;
450 Elf_Data *st_ops_data;
451 size_t shstrndx;
452 size_t strtabidx;
453 struct {
454 GElf_Shdr shdr;
455 Elf_Data *data;
456 } *reloc_sects;
457 int nr_reloc_sects;
458 int maps_shndx;
459 int btf_maps_shndx;
460 __u32 btf_maps_sec_btf_id;
461 int text_shndx;
462 int symbols_shndx;
463 int data_shndx;
464 int rodata_shndx;
465 int bss_shndx;
466 int st_ops_shndx;
467 } efile;
468
469
470
471
472
473 struct list_head list;
474
475 struct btf *btf;
476 struct btf_ext *btf_ext;
477
478
479
480
481 struct btf *btf_vmlinux;
482
483 struct btf *btf_vmlinux_override;
484
485 struct module_btf *btf_modules;
486 bool btf_modules_loaded;
487 size_t btf_module_cnt;
488 size_t btf_module_cap;
489
490 void *priv;
491 bpf_object_clear_priv_t clear_priv;
492
493 char path[];
494};
495#define obj_elf_valid(o) ((o)->efile.elf)
496
497static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
498static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
499static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
500static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
501static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
502static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
503static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
504static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
505 size_t off, __u32 sym_type, GElf_Sym *sym);
506
507void bpf_program__unload(struct bpf_program *prog)
508{
509 int i;
510
511 if (!prog)
512 return;
513
514
515
516
517
518 if (prog->instances.nr > 0) {
519 for (i = 0; i < prog->instances.nr; i++)
520 zclose(prog->instances.fds[i]);
521 } else if (prog->instances.nr != -1) {
522 pr_warn("Internal error: instances.nr is %d\n",
523 prog->instances.nr);
524 }
525
526 prog->instances.nr = -1;
527 zfree(&prog->instances.fds);
528
529 zfree(&prog->func_info);
530 zfree(&prog->line_info);
531}
532
533static void bpf_program__exit(struct bpf_program *prog)
534{
535 if (!prog)
536 return;
537
538 if (prog->clear_priv)
539 prog->clear_priv(prog, prog->priv);
540
541 prog->priv = NULL;
542 prog->clear_priv = NULL;
543
544 bpf_program__unload(prog);
545 zfree(&prog->name);
546 zfree(&prog->sec_name);
547 zfree(&prog->pin_name);
548 zfree(&prog->insns);
549 zfree(&prog->reloc_desc);
550
551 prog->nr_reloc = 0;
552 prog->insns_cnt = 0;
553 prog->sec_idx = -1;
554}
555
556static char *__bpf_program__pin_name(struct bpf_program *prog)
557{
558 char *name, *p;
559
560 name = p = strdup(prog->sec_name);
561 while ((p = strchr(p, '/')))
562 *p = '_';
563
564 return name;
565}
566
567static bool insn_is_subprog_call(const struct bpf_insn *insn)
568{
569 return BPF_CLASS(insn->code) == BPF_JMP &&
570 BPF_OP(insn->code) == BPF_CALL &&
571 BPF_SRC(insn->code) == BPF_K &&
572 insn->src_reg == BPF_PSEUDO_CALL &&
573 insn->dst_reg == 0 &&
574 insn->off == 0;
575}
576
577static int
578bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
579 const char *name, size_t sec_idx, const char *sec_name,
580 size_t sec_off, void *insn_data, size_t insn_data_sz)
581{
582 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
583 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
584 sec_name, name, sec_off, insn_data_sz);
585 return -EINVAL;
586 }
587
588 memset(prog, 0, sizeof(*prog));
589 prog->obj = obj;
590
591 prog->sec_idx = sec_idx;
592 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
593 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
594
595 prog->insns_cnt = prog->sec_insn_cnt;
596
597 prog->type = BPF_PROG_TYPE_UNSPEC;
598 prog->load = true;
599
600 prog->instances.fds = NULL;
601 prog->instances.nr = -1;
602
603 prog->sec_name = strdup(sec_name);
604 if (!prog->sec_name)
605 goto errout;
606
607 prog->name = strdup(name);
608 if (!prog->name)
609 goto errout;
610
611 prog->pin_name = __bpf_program__pin_name(prog);
612 if (!prog->pin_name)
613 goto errout;
614
615 prog->insns = malloc(insn_data_sz);
616 if (!prog->insns)
617 goto errout;
618 memcpy(prog->insns, insn_data, insn_data_sz);
619
620 return 0;
621errout:
622 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
623 bpf_program__exit(prog);
624 return -ENOMEM;
625}
626
627static int
628bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
629 const char *sec_name, int sec_idx)
630{
631 struct bpf_program *prog, *progs;
632 void *data = sec_data->d_buf;
633 size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
634 int nr_progs, err;
635 const char *name;
636 GElf_Sym sym;
637
638 progs = obj->programs;
639 nr_progs = obj->nr_programs;
640 sec_off = 0;
641
642 while (sec_off < sec_sz) {
643 if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
644 pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
645 sec_name, sec_off);
646 return -LIBBPF_ERRNO__FORMAT;
647 }
648
649 prog_sz = sym.st_size;
650
651 name = elf_sym_str(obj, sym.st_name);
652 if (!name) {
653 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
654 sec_name, sec_off);
655 return -LIBBPF_ERRNO__FORMAT;
656 }
657
658 if (sec_off + prog_sz > sec_sz) {
659 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
660 sec_name, sec_off);
661 return -LIBBPF_ERRNO__FORMAT;
662 }
663
664 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
665 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
666
667 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
668 if (!progs) {
669
670
671
672
673
674 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
675 sec_name, name);
676 return -ENOMEM;
677 }
678 obj->programs = progs;
679
680 prog = &progs[nr_progs];
681
682 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
683 sec_off, data + sec_off, prog_sz);
684 if (err)
685 return err;
686
687 nr_progs++;
688 obj->nr_programs = nr_progs;
689
690 sec_off += prog_sz;
691 }
692
693 return 0;
694}
695
696static __u32 get_kernel_version(void)
697{
698 __u32 major, minor, patch;
699 struct utsname info;
700
701 uname(&info);
702 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
703 return 0;
704 return KERNEL_VERSION(major, minor, patch);
705}
706
707static const struct btf_member *
708find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
709{
710 struct btf_member *m;
711 int i;
712
713 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
714 if (btf_member_bit_offset(t, i) == bit_offset)
715 return m;
716 }
717
718 return NULL;
719}
720
721static const struct btf_member *
722find_member_by_name(const struct btf *btf, const struct btf_type *t,
723 const char *name)
724{
725 struct btf_member *m;
726 int i;
727
728 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
729 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
730 return m;
731 }
732
733 return NULL;
734}
735
736#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
737static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
738 const char *name, __u32 kind);
739
740static int
741find_struct_ops_kern_types(const struct btf *btf, const char *tname,
742 const struct btf_type **type, __u32 *type_id,
743 const struct btf_type **vtype, __u32 *vtype_id,
744 const struct btf_member **data_member)
745{
746 const struct btf_type *kern_type, *kern_vtype;
747 const struct btf_member *kern_data_member;
748 __s32 kern_vtype_id, kern_type_id;
749 __u32 i;
750
751 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
752 if (kern_type_id < 0) {
753 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
754 tname);
755 return kern_type_id;
756 }
757 kern_type = btf__type_by_id(btf, kern_type_id);
758
759
760
761
762
763
764 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
765 tname, BTF_KIND_STRUCT);
766 if (kern_vtype_id < 0) {
767 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
768 STRUCT_OPS_VALUE_PREFIX, tname);
769 return kern_vtype_id;
770 }
771 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
772
773
774
775
776
777
778
779 kern_data_member = btf_members(kern_vtype);
780 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
781 if (kern_data_member->type == kern_type_id)
782 break;
783 }
784 if (i == btf_vlen(kern_vtype)) {
785 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
786 tname, STRUCT_OPS_VALUE_PREFIX, tname);
787 return -EINVAL;
788 }
789
790 *type = kern_type;
791 *type_id = kern_type_id;
792 *vtype = kern_vtype;
793 *vtype_id = kern_vtype_id;
794 *data_member = kern_data_member;
795
796 return 0;
797}
798
799static bool bpf_map__is_struct_ops(const struct bpf_map *map)
800{
801 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
802}
803
804
805static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
806 const struct btf *btf,
807 const struct btf *kern_btf)
808{
809 const struct btf_member *member, *kern_member, *kern_data_member;
810 const struct btf_type *type, *kern_type, *kern_vtype;
811 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
812 struct bpf_struct_ops *st_ops;
813 void *data, *kern_data;
814 const char *tname;
815 int err;
816
817 st_ops = map->st_ops;
818 type = st_ops->type;
819 tname = st_ops->tname;
820 err = find_struct_ops_kern_types(kern_btf, tname,
821 &kern_type, &kern_type_id,
822 &kern_vtype, &kern_vtype_id,
823 &kern_data_member);
824 if (err)
825 return err;
826
827 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
828 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
829
830 map->def.value_size = kern_vtype->size;
831 map->btf_vmlinux_value_type_id = kern_vtype_id;
832
833 st_ops->kern_vdata = calloc(1, kern_vtype->size);
834 if (!st_ops->kern_vdata)
835 return -ENOMEM;
836
837 data = st_ops->data;
838 kern_data_off = kern_data_member->offset / 8;
839 kern_data = st_ops->kern_vdata + kern_data_off;
840
841 member = btf_members(type);
842 for (i = 0; i < btf_vlen(type); i++, member++) {
843 const struct btf_type *mtype, *kern_mtype;
844 __u32 mtype_id, kern_mtype_id;
845 void *mdata, *kern_mdata;
846 __s64 msize, kern_msize;
847 __u32 moff, kern_moff;
848 __u32 kern_member_idx;
849 const char *mname;
850
851 mname = btf__name_by_offset(btf, member->name_off);
852 kern_member = find_member_by_name(kern_btf, kern_type, mname);
853 if (!kern_member) {
854 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
855 map->name, mname);
856 return -ENOTSUP;
857 }
858
859 kern_member_idx = kern_member - btf_members(kern_type);
860 if (btf_member_bitfield_size(type, i) ||
861 btf_member_bitfield_size(kern_type, kern_member_idx)) {
862 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
863 map->name, mname);
864 return -ENOTSUP;
865 }
866
867 moff = member->offset / 8;
868 kern_moff = kern_member->offset / 8;
869
870 mdata = data + moff;
871 kern_mdata = kern_data + kern_moff;
872
873 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
874 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
875 &kern_mtype_id);
876 if (BTF_INFO_KIND(mtype->info) !=
877 BTF_INFO_KIND(kern_mtype->info)) {
878 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
879 map->name, mname, BTF_INFO_KIND(mtype->info),
880 BTF_INFO_KIND(kern_mtype->info));
881 return -ENOTSUP;
882 }
883
884 if (btf_is_ptr(mtype)) {
885 struct bpf_program *prog;
886
887 prog = st_ops->progs[i];
888 if (!prog)
889 continue;
890
891 kern_mtype = skip_mods_and_typedefs(kern_btf,
892 kern_mtype->type,
893 &kern_mtype_id);
894
895
896
897
898
899 if (!btf_is_func_proto(kern_mtype)) {
900 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
901 map->name, mname);
902 return -ENOTSUP;
903 }
904
905 prog->attach_btf_id = kern_type_id;
906 prog->expected_attach_type = kern_member_idx;
907
908 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
909
910 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
911 map->name, mname, prog->name, moff,
912 kern_moff);
913
914 continue;
915 }
916
917 msize = btf__resolve_size(btf, mtype_id);
918 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
919 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
920 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
921 map->name, mname, (ssize_t)msize,
922 (ssize_t)kern_msize);
923 return -ENOTSUP;
924 }
925
926 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
927 map->name, mname, (unsigned int)msize,
928 moff, kern_moff);
929 memcpy(kern_mdata, mdata, msize);
930 }
931
932 return 0;
933}
934
935static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
936{
937 struct bpf_map *map;
938 size_t i;
939 int err;
940
941 for (i = 0; i < obj->nr_maps; i++) {
942 map = &obj->maps[i];
943
944 if (!bpf_map__is_struct_ops(map))
945 continue;
946
947 err = bpf_map__init_kern_struct_ops(map, obj->btf,
948 obj->btf_vmlinux);
949 if (err)
950 return err;
951 }
952
953 return 0;
954}
955
956static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
957{
958 const struct btf_type *type, *datasec;
959 const struct btf_var_secinfo *vsi;
960 struct bpf_struct_ops *st_ops;
961 const char *tname, *var_name;
962 __s32 type_id, datasec_id;
963 const struct btf *btf;
964 struct bpf_map *map;
965 __u32 i;
966
967 if (obj->efile.st_ops_shndx == -1)
968 return 0;
969
970 btf = obj->btf;
971 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
972 BTF_KIND_DATASEC);
973 if (datasec_id < 0) {
974 pr_warn("struct_ops init: DATASEC %s not found\n",
975 STRUCT_OPS_SEC);
976 return -EINVAL;
977 }
978
979 datasec = btf__type_by_id(btf, datasec_id);
980 vsi = btf_var_secinfos(datasec);
981 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
982 type = btf__type_by_id(obj->btf, vsi->type);
983 var_name = btf__name_by_offset(obj->btf, type->name_off);
984
985 type_id = btf__resolve_type(obj->btf, vsi->type);
986 if (type_id < 0) {
987 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
988 vsi->type, STRUCT_OPS_SEC);
989 return -EINVAL;
990 }
991
992 type = btf__type_by_id(obj->btf, type_id);
993 tname = btf__name_by_offset(obj->btf, type->name_off);
994 if (!tname[0]) {
995 pr_warn("struct_ops init: anonymous type is not supported\n");
996 return -ENOTSUP;
997 }
998 if (!btf_is_struct(type)) {
999 pr_warn("struct_ops init: %s is not a struct\n", tname);
1000 return -EINVAL;
1001 }
1002
1003 map = bpf_object__add_map(obj);
1004 if (IS_ERR(map))
1005 return PTR_ERR(map);
1006
1007 map->sec_idx = obj->efile.st_ops_shndx;
1008 map->sec_offset = vsi->offset;
1009 map->name = strdup(var_name);
1010 if (!map->name)
1011 return -ENOMEM;
1012
1013 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1014 map->def.key_size = sizeof(int);
1015 map->def.value_size = type->size;
1016 map->def.max_entries = 1;
1017
1018 map->st_ops = calloc(1, sizeof(*map->st_ops));
1019 if (!map->st_ops)
1020 return -ENOMEM;
1021 st_ops = map->st_ops;
1022 st_ops->data = malloc(type->size);
1023 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1024 st_ops->kern_func_off = malloc(btf_vlen(type) *
1025 sizeof(*st_ops->kern_func_off));
1026 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1027 return -ENOMEM;
1028
1029 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1030 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1031 var_name, STRUCT_OPS_SEC);
1032 return -EINVAL;
1033 }
1034
1035 memcpy(st_ops->data,
1036 obj->efile.st_ops_data->d_buf + vsi->offset,
1037 type->size);
1038 st_ops->tname = tname;
1039 st_ops->type = type;
1040 st_ops->type_id = type_id;
1041
1042 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1043 tname, type_id, var_name, vsi->offset);
1044 }
1045
1046 return 0;
1047}
1048
1049static struct bpf_object *bpf_object__new(const char *path,
1050 const void *obj_buf,
1051 size_t obj_buf_sz,
1052 const char *obj_name)
1053{
1054 struct bpf_object *obj;
1055 char *end;
1056
1057 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1058 if (!obj) {
1059 pr_warn("alloc memory failed for %s\n", path);
1060 return ERR_PTR(-ENOMEM);
1061 }
1062
1063 strcpy(obj->path, path);
1064 if (obj_name) {
1065 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1066 obj->name[sizeof(obj->name) - 1] = 0;
1067 } else {
1068
1069 strncpy(obj->name, basename((void *)path),
1070 sizeof(obj->name) - 1);
1071 end = strchr(obj->name, '.');
1072 if (end)
1073 *end = 0;
1074 }
1075
1076 obj->efile.fd = -1;
1077
1078
1079
1080
1081
1082
1083 obj->efile.obj_buf = obj_buf;
1084 obj->efile.obj_buf_sz = obj_buf_sz;
1085 obj->efile.maps_shndx = -1;
1086 obj->efile.btf_maps_shndx = -1;
1087 obj->efile.data_shndx = -1;
1088 obj->efile.rodata_shndx = -1;
1089 obj->efile.bss_shndx = -1;
1090 obj->efile.st_ops_shndx = -1;
1091 obj->kconfig_map_idx = -1;
1092 obj->rodata_map_idx = -1;
1093
1094 obj->kern_version = get_kernel_version();
1095 obj->loaded = false;
1096
1097 INIT_LIST_HEAD(&obj->list);
1098 list_add(&obj->list, &bpf_objects_list);
1099 return obj;
1100}
1101
1102static void bpf_object__elf_finish(struct bpf_object *obj)
1103{
1104 if (!obj_elf_valid(obj))
1105 return;
1106
1107 if (obj->efile.elf) {
1108 elf_end(obj->efile.elf);
1109 obj->efile.elf = NULL;
1110 }
1111 obj->efile.symbols = NULL;
1112 obj->efile.data = NULL;
1113 obj->efile.rodata = NULL;
1114 obj->efile.bss = NULL;
1115 obj->efile.st_ops_data = NULL;
1116
1117 zfree(&obj->efile.reloc_sects);
1118 obj->efile.nr_reloc_sects = 0;
1119 zclose(obj->efile.fd);
1120 obj->efile.obj_buf = NULL;
1121 obj->efile.obj_buf_sz = 0;
1122}
1123
1124
1125#ifndef ELF_C_READ_MMAP
1126#define ELF_C_READ_MMAP ELF_C_READ
1127#endif
1128
1129static int bpf_object__elf_init(struct bpf_object *obj)
1130{
1131 int err = 0;
1132 GElf_Ehdr *ep;
1133
1134 if (obj_elf_valid(obj)) {
1135 pr_warn("elf: init internal error\n");
1136 return -LIBBPF_ERRNO__LIBELF;
1137 }
1138
1139 if (obj->efile.obj_buf_sz > 0) {
1140
1141
1142
1143
1144 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
1145 obj->efile.obj_buf_sz);
1146 } else {
1147 obj->efile.fd = open(obj->path, O_RDONLY);
1148 if (obj->efile.fd < 0) {
1149 char errmsg[STRERR_BUFSIZE], *cp;
1150
1151 err = -errno;
1152 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1153 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1154 return err;
1155 }
1156
1157 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1158 }
1159
1160 if (!obj->efile.elf) {
1161 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1162 err = -LIBBPF_ERRNO__LIBELF;
1163 goto errout;
1164 }
1165
1166 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
1167 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1168 err = -LIBBPF_ERRNO__FORMAT;
1169 goto errout;
1170 }
1171 ep = &obj->efile.ehdr;
1172
1173 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1174 pr_warn("elf: failed to get section names section index for %s: %s\n",
1175 obj->path, elf_errmsg(-1));
1176 err = -LIBBPF_ERRNO__FORMAT;
1177 goto errout;
1178 }
1179
1180
1181 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1182 pr_warn("elf: failed to get section names strings from %s: %s\n",
1183 obj->path, elf_errmsg(-1));
1184 err = -LIBBPF_ERRNO__FORMAT;
1185 goto errout;
1186 }
1187
1188
1189 if (ep->e_type != ET_REL ||
1190 (ep->e_machine && ep->e_machine != EM_BPF)) {
1191 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1192 err = -LIBBPF_ERRNO__FORMAT;
1193 goto errout;
1194 }
1195
1196 return 0;
1197errout:
1198 bpf_object__elf_finish(obj);
1199 return err;
1200}
1201
1202static int bpf_object__check_endianness(struct bpf_object *obj)
1203{
1204#if __BYTE_ORDER == __LITTLE_ENDIAN
1205 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1206 return 0;
1207#elif __BYTE_ORDER == __BIG_ENDIAN
1208 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1209 return 0;
1210#else
1211# error "Unrecognized __BYTE_ORDER__"
1212#endif
1213 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1214 return -LIBBPF_ERRNO__ENDIAN;
1215}
1216
1217static int
1218bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1219{
1220 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
1221 pr_debug("license of %s is %s\n", obj->path, obj->license);
1222 return 0;
1223}
1224
1225static int
1226bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1227{
1228 __u32 kver;
1229
1230 if (size != sizeof(kver)) {
1231 pr_warn("invalid kver section in %s\n", obj->path);
1232 return -LIBBPF_ERRNO__FORMAT;
1233 }
1234 memcpy(&kver, data, sizeof(kver));
1235 obj->kern_version = kver;
1236 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1237 return 0;
1238}
1239
1240static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1241{
1242 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1243 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1244 return true;
1245 return false;
1246}
1247
1248int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1249 __u32 *size)
1250{
1251 int ret = -ENOENT;
1252
1253 *size = 0;
1254 if (!name) {
1255 return -EINVAL;
1256 } else if (!strcmp(name, DATA_SEC)) {
1257 if (obj->efile.data)
1258 *size = obj->efile.data->d_size;
1259 } else if (!strcmp(name, BSS_SEC)) {
1260 if (obj->efile.bss)
1261 *size = obj->efile.bss->d_size;
1262 } else if (!strcmp(name, RODATA_SEC)) {
1263 if (obj->efile.rodata)
1264 *size = obj->efile.rodata->d_size;
1265 } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1266 if (obj->efile.st_ops_data)
1267 *size = obj->efile.st_ops_data->d_size;
1268 } else {
1269 Elf_Scn *scn = elf_sec_by_name(obj, name);
1270 Elf_Data *data = elf_sec_data(obj, scn);
1271
1272 if (data) {
1273 ret = 0;
1274 *size = data->d_size;
1275 }
1276 }
1277
1278 return *size ? 0 : ret;
1279}
1280
1281int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1282 __u32 *off)
1283{
1284 Elf_Data *symbols = obj->efile.symbols;
1285 const char *sname;
1286 size_t si;
1287
1288 if (!name || !off)
1289 return -EINVAL;
1290
1291 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1292 GElf_Sym sym;
1293
1294 if (!gelf_getsym(symbols, si, &sym))
1295 continue;
1296 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1297 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1298 continue;
1299
1300 sname = elf_sym_str(obj, sym.st_name);
1301 if (!sname) {
1302 pr_warn("failed to get sym name string for var %s\n",
1303 name);
1304 return -EIO;
1305 }
1306 if (strcmp(name, sname) == 0) {
1307 *off = sym.st_value;
1308 return 0;
1309 }
1310 }
1311
1312 return -ENOENT;
1313}
1314
1315static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1316{
1317 struct bpf_map *new_maps;
1318 size_t new_cap;
1319 int i;
1320
1321 if (obj->nr_maps < obj->maps_cap)
1322 return &obj->maps[obj->nr_maps++];
1323
1324 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
1325 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
1326 if (!new_maps) {
1327 pr_warn("alloc maps for object failed\n");
1328 return ERR_PTR(-ENOMEM);
1329 }
1330
1331 obj->maps_cap = new_cap;
1332 obj->maps = new_maps;
1333
1334
1335 memset(obj->maps + obj->nr_maps, 0,
1336 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1337
1338
1339
1340
1341 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1342 obj->maps[i].fd = -1;
1343 obj->maps[i].inner_map_fd = -1;
1344 }
1345
1346 return &obj->maps[obj->nr_maps++];
1347}
1348
1349static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1350{
1351 long page_sz = sysconf(_SC_PAGE_SIZE);
1352 size_t map_sz;
1353
1354 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1355 map_sz = roundup(map_sz, page_sz);
1356 return map_sz;
1357}
1358
1359static char *internal_map_name(struct bpf_object *obj,
1360 enum libbpf_map_type type)
1361{
1362 char map_name[BPF_OBJ_NAME_LEN], *p;
1363 const char *sfx = libbpf_type_to_btf_name[type];
1364 int sfx_len = max((size_t)7, strlen(sfx));
1365 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1366 strlen(obj->name));
1367
1368 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1369 sfx_len, libbpf_type_to_btf_name[type]);
1370
1371
1372 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1373 if (!isalnum(*p) && *p != '_' && *p != '.')
1374 *p = '_';
1375
1376 return strdup(map_name);
1377}
1378
1379static int
1380bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1381 int sec_idx, void *data, size_t data_sz)
1382{
1383 struct bpf_map_def *def;
1384 struct bpf_map *map;
1385 int err;
1386
1387 map = bpf_object__add_map(obj);
1388 if (IS_ERR(map))
1389 return PTR_ERR(map);
1390
1391 map->libbpf_type = type;
1392 map->sec_idx = sec_idx;
1393 map->sec_offset = 0;
1394 map->name = internal_map_name(obj, type);
1395 if (!map->name) {
1396 pr_warn("failed to alloc map name\n");
1397 return -ENOMEM;
1398 }
1399
1400 def = &map->def;
1401 def->type = BPF_MAP_TYPE_ARRAY;
1402 def->key_size = sizeof(int);
1403 def->value_size = data_sz;
1404 def->max_entries = 1;
1405 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1406 ? BPF_F_RDONLY_PROG : 0;
1407 def->map_flags |= BPF_F_MMAPABLE;
1408
1409 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1410 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1411
1412 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1413 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1414 if (map->mmaped == MAP_FAILED) {
1415 err = -errno;
1416 map->mmaped = NULL;
1417 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1418 map->name, err);
1419 zfree(&map->name);
1420 return err;
1421 }
1422
1423 if (data)
1424 memcpy(map->mmaped, data, data_sz);
1425
1426 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1427 return 0;
1428}
1429
1430static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1431{
1432 int err;
1433
1434
1435
1436
1437 if (obj->efile.data_shndx >= 0) {
1438 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1439 obj->efile.data_shndx,
1440 obj->efile.data->d_buf,
1441 obj->efile.data->d_size);
1442 if (err)
1443 return err;
1444 }
1445 if (obj->efile.rodata_shndx >= 0) {
1446 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1447 obj->efile.rodata_shndx,
1448 obj->efile.rodata->d_buf,
1449 obj->efile.rodata->d_size);
1450 if (err)
1451 return err;
1452
1453 obj->rodata_map_idx = obj->nr_maps - 1;
1454 }
1455 if (obj->efile.bss_shndx >= 0) {
1456 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1457 obj->efile.bss_shndx,
1458 NULL,
1459 obj->efile.bss->d_size);
1460 if (err)
1461 return err;
1462 }
1463 return 0;
1464}
1465
1466
1467static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1468 const void *name)
1469{
1470 int i;
1471
1472 for (i = 0; i < obj->nr_extern; i++) {
1473 if (strcmp(obj->externs[i].name, name) == 0)
1474 return &obj->externs[i];
1475 }
1476 return NULL;
1477}
1478
1479static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1480 char value)
1481{
1482 switch (ext->kcfg.type) {
1483 case KCFG_BOOL:
1484 if (value == 'm') {
1485 pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1486 ext->name, value);
1487 return -EINVAL;
1488 }
1489 *(bool *)ext_val = value == 'y' ? true : false;
1490 break;
1491 case KCFG_TRISTATE:
1492 if (value == 'y')
1493 *(enum libbpf_tristate *)ext_val = TRI_YES;
1494 else if (value == 'm')
1495 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1496 else
1497 *(enum libbpf_tristate *)ext_val = TRI_NO;
1498 break;
1499 case KCFG_CHAR:
1500 *(char *)ext_val = value;
1501 break;
1502 case KCFG_UNKNOWN:
1503 case KCFG_INT:
1504 case KCFG_CHAR_ARR:
1505 default:
1506 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1507 ext->name, value);
1508 return -EINVAL;
1509 }
1510 ext->is_set = true;
1511 return 0;
1512}
1513
1514static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1515 const char *value)
1516{
1517 size_t len;
1518
1519 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1520 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1521 return -EINVAL;
1522 }
1523
1524 len = strlen(value);
1525 if (value[len - 1] != '"') {
1526 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1527 ext->name, value);
1528 return -EINVAL;
1529 }
1530
1531
1532 len -= 2;
1533 if (len >= ext->kcfg.sz) {
1534 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1535 ext->name, value, len, ext->kcfg.sz - 1);
1536 len = ext->kcfg.sz - 1;
1537 }
1538 memcpy(ext_val, value + 1, len);
1539 ext_val[len] = '\0';
1540 ext->is_set = true;
1541 return 0;
1542}
1543
1544static int parse_u64(const char *value, __u64 *res)
1545{
1546 char *value_end;
1547 int err;
1548
1549 errno = 0;
1550 *res = strtoull(value, &value_end, 0);
1551 if (errno) {
1552 err = -errno;
1553 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1554 return err;
1555 }
1556 if (*value_end) {
1557 pr_warn("failed to parse '%s' as integer completely\n", value);
1558 return -EINVAL;
1559 }
1560 return 0;
1561}
1562
1563static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1564{
1565 int bit_sz = ext->kcfg.sz * 8;
1566
1567 if (ext->kcfg.sz == 8)
1568 return true;
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582 if (ext->kcfg.is_signed)
1583 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1584 else
1585 return (v >> bit_sz) == 0;
1586}
1587
1588static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1589 __u64 value)
1590{
1591 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1592 pr_warn("extern (kcfg) %s=%llu should be integer\n",
1593 ext->name, (unsigned long long)value);
1594 return -EINVAL;
1595 }
1596 if (!is_kcfg_value_in_range(ext, value)) {
1597 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1598 ext->name, (unsigned long long)value, ext->kcfg.sz);
1599 return -ERANGE;
1600 }
1601 switch (ext->kcfg.sz) {
1602 case 1: *(__u8 *)ext_val = value; break;
1603 case 2: *(__u16 *)ext_val = value; break;
1604 case 4: *(__u32 *)ext_val = value; break;
1605 case 8: *(__u64 *)ext_val = value; break;
1606 default:
1607 return -EINVAL;
1608 }
1609 ext->is_set = true;
1610 return 0;
1611}
1612
1613static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1614 char *buf, void *data)
1615{
1616 struct extern_desc *ext;
1617 char *sep, *value;
1618 int len, err = 0;
1619 void *ext_val;
1620 __u64 num;
1621
1622 if (strncmp(buf, "CONFIG_", 7))
1623 return 0;
1624
1625 sep = strchr(buf, '=');
1626 if (!sep) {
1627 pr_warn("failed to parse '%s': no separator\n", buf);
1628 return -EINVAL;
1629 }
1630
1631
1632 len = strlen(buf);
1633 if (buf[len - 1] == '\n')
1634 buf[len - 1] = '\0';
1635
1636 *sep = '\0';
1637 if (!sep[1]) {
1638 *sep = '=';
1639 pr_warn("failed to parse '%s': no value\n", buf);
1640 return -EINVAL;
1641 }
1642
1643 ext = find_extern_by_name(obj, buf);
1644 if (!ext || ext->is_set)
1645 return 0;
1646
1647 ext_val = data + ext->kcfg.data_off;
1648 value = sep + 1;
1649
1650 switch (*value) {
1651 case 'y': case 'n': case 'm':
1652 err = set_kcfg_value_tri(ext, ext_val, *value);
1653 break;
1654 case '"':
1655 err = set_kcfg_value_str(ext, ext_val, value);
1656 break;
1657 default:
1658
1659 err = parse_u64(value, &num);
1660 if (err) {
1661 pr_warn("extern (kcfg) %s=%s should be integer\n",
1662 ext->name, value);
1663 return err;
1664 }
1665 err = set_kcfg_value_num(ext, ext_val, num);
1666 break;
1667 }
1668 if (err)
1669 return err;
1670 pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1671 return 0;
1672}
1673
1674static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1675{
1676 char buf[PATH_MAX];
1677 struct utsname uts;
1678 int len, err = 0;
1679 gzFile file;
1680
1681 uname(&uts);
1682 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1683 if (len < 0)
1684 return -EINVAL;
1685 else if (len >= PATH_MAX)
1686 return -ENAMETOOLONG;
1687
1688
1689 file = gzopen(buf, "r");
1690 if (!file)
1691 file = gzopen("/proc/config.gz", "r");
1692
1693 if (!file) {
1694 pr_warn("failed to open system Kconfig\n");
1695 return -ENOENT;
1696 }
1697
1698 while (gzgets(file, buf, sizeof(buf))) {
1699 err = bpf_object__process_kconfig_line(obj, buf, data);
1700 if (err) {
1701 pr_warn("error parsing system Kconfig line '%s': %d\n",
1702 buf, err);
1703 goto out;
1704 }
1705 }
1706
1707out:
1708 gzclose(file);
1709 return err;
1710}
1711
1712static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1713 const char *config, void *data)
1714{
1715 char buf[PATH_MAX];
1716 int err = 0;
1717 FILE *file;
1718
1719 file = fmemopen((void *)config, strlen(config), "r");
1720 if (!file) {
1721 err = -errno;
1722 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1723 return err;
1724 }
1725
1726 while (fgets(buf, sizeof(buf), file)) {
1727 err = bpf_object__process_kconfig_line(obj, buf, data);
1728 if (err) {
1729 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1730 buf, err);
1731 break;
1732 }
1733 }
1734
1735 fclose(file);
1736 return err;
1737}
1738
1739static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1740{
1741 struct extern_desc *last_ext = NULL, *ext;
1742 size_t map_sz;
1743 int i, err;
1744
1745 for (i = 0; i < obj->nr_extern; i++) {
1746 ext = &obj->externs[i];
1747 if (ext->type == EXT_KCFG)
1748 last_ext = ext;
1749 }
1750
1751 if (!last_ext)
1752 return 0;
1753
1754 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1755 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1756 obj->efile.symbols_shndx,
1757 NULL, map_sz);
1758 if (err)
1759 return err;
1760
1761 obj->kconfig_map_idx = obj->nr_maps - 1;
1762
1763 return 0;
1764}
1765
1766static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1767{
1768 Elf_Data *symbols = obj->efile.symbols;
1769 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1770 Elf_Data *data = NULL;
1771 Elf_Scn *scn;
1772
1773 if (obj->efile.maps_shndx < 0)
1774 return 0;
1775
1776 if (!symbols)
1777 return -EINVAL;
1778
1779
1780 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1781 data = elf_sec_data(obj, scn);
1782 if (!scn || !data) {
1783 pr_warn("elf: failed to get legacy map definitions for %s\n",
1784 obj->path);
1785 return -EINVAL;
1786 }
1787
1788
1789
1790
1791
1792
1793
1794
1795 nr_syms = symbols->d_size / sizeof(GElf_Sym);
1796 for (i = 0; i < nr_syms; i++) {
1797 GElf_Sym sym;
1798
1799 if (!gelf_getsym(symbols, i, &sym))
1800 continue;
1801 if (sym.st_shndx != obj->efile.maps_shndx)
1802 continue;
1803 nr_maps++;
1804 }
1805
1806 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1807 nr_maps, data->d_size, obj->path);
1808
1809 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1810 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1811 obj->path);
1812 return -EINVAL;
1813 }
1814 map_def_sz = data->d_size / nr_maps;
1815
1816
1817 for (i = 0; i < nr_syms; i++) {
1818 GElf_Sym sym;
1819 const char *map_name;
1820 struct bpf_map_def *def;
1821 struct bpf_map *map;
1822
1823 if (!gelf_getsym(symbols, i, &sym))
1824 continue;
1825 if (sym.st_shndx != obj->efile.maps_shndx)
1826 continue;
1827
1828 map = bpf_object__add_map(obj);
1829 if (IS_ERR(map))
1830 return PTR_ERR(map);
1831
1832 map_name = elf_sym_str(obj, sym.st_name);
1833 if (!map_name) {
1834 pr_warn("failed to get map #%d name sym string for obj %s\n",
1835 i, obj->path);
1836 return -LIBBPF_ERRNO__FORMAT;
1837 }
1838
1839 map->libbpf_type = LIBBPF_MAP_UNSPEC;
1840 map->sec_idx = sym.st_shndx;
1841 map->sec_offset = sym.st_value;
1842 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1843 map_name, map->sec_idx, map->sec_offset);
1844 if (sym.st_value + map_def_sz > data->d_size) {
1845 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1846 obj->path, map_name);
1847 return -EINVAL;
1848 }
1849
1850 map->name = strdup(map_name);
1851 if (!map->name) {
1852 pr_warn("failed to alloc map name\n");
1853 return -ENOMEM;
1854 }
1855 pr_debug("map %d is \"%s\"\n", i, map->name);
1856 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
1857
1858
1859
1860
1861
1862
1863 if (map_def_sz <= sizeof(struct bpf_map_def)) {
1864 memcpy(&map->def, def, map_def_sz);
1865 } else {
1866
1867
1868
1869
1870
1871
1872 char *b;
1873
1874 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1875 b < ((char *)def) + map_def_sz; b++) {
1876 if (*b != 0) {
1877 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
1878 obj->path, map_name);
1879 if (strict)
1880 return -EINVAL;
1881 }
1882 }
1883 memcpy(&map->def, def, sizeof(struct bpf_map_def));
1884 }
1885 }
1886 return 0;
1887}
1888
1889static const struct btf_type *
1890skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1891{
1892 const struct btf_type *t = btf__type_by_id(btf, id);
1893
1894 if (res_id)
1895 *res_id = id;
1896
1897 while (btf_is_mod(t) || btf_is_typedef(t)) {
1898 if (res_id)
1899 *res_id = t->type;
1900 t = btf__type_by_id(btf, t->type);
1901 }
1902
1903 return t;
1904}
1905
1906static const struct btf_type *
1907resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1908{
1909 const struct btf_type *t;
1910
1911 t = skip_mods_and_typedefs(btf, id, NULL);
1912 if (!btf_is_ptr(t))
1913 return NULL;
1914
1915 t = skip_mods_and_typedefs(btf, t->type, res_id);
1916
1917 return btf_is_func_proto(t) ? t : NULL;
1918}
1919
1920static const char *btf_kind_str(const struct btf_type *t)
1921{
1922 switch (btf_kind(t)) {
1923 case BTF_KIND_UNKN: return "void";
1924 case BTF_KIND_INT: return "int";
1925 case BTF_KIND_PTR: return "ptr";
1926 case BTF_KIND_ARRAY: return "array";
1927 case BTF_KIND_STRUCT: return "struct";
1928 case BTF_KIND_UNION: return "union";
1929 case BTF_KIND_ENUM: return "enum";
1930 case BTF_KIND_FWD: return "fwd";
1931 case BTF_KIND_TYPEDEF: return "typedef";
1932 case BTF_KIND_VOLATILE: return "volatile";
1933 case BTF_KIND_CONST: return "const";
1934 case BTF_KIND_RESTRICT: return "restrict";
1935 case BTF_KIND_FUNC: return "func";
1936 case BTF_KIND_FUNC_PROTO: return "func_proto";
1937 case BTF_KIND_VAR: return "var";
1938 case BTF_KIND_DATASEC: return "datasec";
1939 default: return "unknown";
1940 }
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950static bool get_map_field_int(const char *map_name, const struct btf *btf,
1951 const struct btf_member *m, __u32 *res)
1952{
1953 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1954 const char *name = btf__name_by_offset(btf, m->name_off);
1955 const struct btf_array *arr_info;
1956 const struct btf_type *arr_t;
1957
1958 if (!btf_is_ptr(t)) {
1959 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
1960 map_name, name, btf_kind_str(t));
1961 return false;
1962 }
1963
1964 arr_t = btf__type_by_id(btf, t->type);
1965 if (!arr_t) {
1966 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1967 map_name, name, t->type);
1968 return false;
1969 }
1970 if (!btf_is_array(arr_t)) {
1971 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
1972 map_name, name, btf_kind_str(arr_t));
1973 return false;
1974 }
1975 arr_info = btf_array(arr_t);
1976 *res = arr_info->nelems;
1977 return true;
1978}
1979
1980static int build_map_pin_path(struct bpf_map *map, const char *path)
1981{
1982 char buf[PATH_MAX];
1983 int len;
1984
1985 if (!path)
1986 path = "/sys/fs/bpf";
1987
1988 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
1989 if (len < 0)
1990 return -EINVAL;
1991 else if (len >= PATH_MAX)
1992 return -ENAMETOOLONG;
1993
1994 return bpf_map__set_pin_path(map, buf);
1995}
1996
1997
1998static int parse_btf_map_def(struct bpf_object *obj,
1999 struct bpf_map *map,
2000 const struct btf_type *def,
2001 bool strict, bool is_inner,
2002 const char *pin_root_path)
2003{
2004 const struct btf_type *t;
2005 const struct btf_member *m;
2006 int vlen, i;
2007
2008 vlen = btf_vlen(def);
2009 m = btf_members(def);
2010 for (i = 0; i < vlen; i++, m++) {
2011 const char *name = btf__name_by_offset(obj->btf, m->name_off);
2012
2013 if (!name) {
2014 pr_warn("map '%s': invalid field #%d.\n", map->name, i);
2015 return -EINVAL;
2016 }
2017 if (strcmp(name, "type") == 0) {
2018 if (!get_map_field_int(map->name, obj->btf, m,
2019 &map->def.type))
2020 return -EINVAL;
2021 pr_debug("map '%s': found type = %u.\n",
2022 map->name, map->def.type);
2023 } else if (strcmp(name, "max_entries") == 0) {
2024 if (!get_map_field_int(map->name, obj->btf, m,
2025 &map->def.max_entries))
2026 return -EINVAL;
2027 pr_debug("map '%s': found max_entries = %u.\n",
2028 map->name, map->def.max_entries);
2029 } else if (strcmp(name, "map_flags") == 0) {
2030 if (!get_map_field_int(map->name, obj->btf, m,
2031 &map->def.map_flags))
2032 return -EINVAL;
2033 pr_debug("map '%s': found map_flags = %u.\n",
2034 map->name, map->def.map_flags);
2035 } else if (strcmp(name, "numa_node") == 0) {
2036 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
2037 return -EINVAL;
2038 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node);
2039 } else if (strcmp(name, "key_size") == 0) {
2040 __u32 sz;
2041
2042 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2043 return -EINVAL;
2044 pr_debug("map '%s': found key_size = %u.\n",
2045 map->name, sz);
2046 if (map->def.key_size && map->def.key_size != sz) {
2047 pr_warn("map '%s': conflicting key size %u != %u.\n",
2048 map->name, map->def.key_size, sz);
2049 return -EINVAL;
2050 }
2051 map->def.key_size = sz;
2052 } else if (strcmp(name, "key") == 0) {
2053 __s64 sz;
2054
2055 t = btf__type_by_id(obj->btf, m->type);
2056 if (!t) {
2057 pr_warn("map '%s': key type [%d] not found.\n",
2058 map->name, m->type);
2059 return -EINVAL;
2060 }
2061 if (!btf_is_ptr(t)) {
2062 pr_warn("map '%s': key spec is not PTR: %s.\n",
2063 map->name, btf_kind_str(t));
2064 return -EINVAL;
2065 }
2066 sz = btf__resolve_size(obj->btf, t->type);
2067 if (sz < 0) {
2068 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2069 map->name, t->type, (ssize_t)sz);
2070 return sz;
2071 }
2072 pr_debug("map '%s': found key [%u], sz = %zd.\n",
2073 map->name, t->type, (ssize_t)sz);
2074 if (map->def.key_size && map->def.key_size != sz) {
2075 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2076 map->name, map->def.key_size, (ssize_t)sz);
2077 return -EINVAL;
2078 }
2079 map->def.key_size = sz;
2080 map->btf_key_type_id = t->type;
2081 } else if (strcmp(name, "value_size") == 0) {
2082 __u32 sz;
2083
2084 if (!get_map_field_int(map->name, obj->btf, m, &sz))
2085 return -EINVAL;
2086 pr_debug("map '%s': found value_size = %u.\n",
2087 map->name, sz);
2088 if (map->def.value_size && map->def.value_size != sz) {
2089 pr_warn("map '%s': conflicting value size %u != %u.\n",
2090 map->name, map->def.value_size, sz);
2091 return -EINVAL;
2092 }
2093 map->def.value_size = sz;
2094 } else if (strcmp(name, "value") == 0) {
2095 __s64 sz;
2096
2097 t = btf__type_by_id(obj->btf, m->type);
2098 if (!t) {
2099 pr_warn("map '%s': value type [%d] not found.\n",
2100 map->name, m->type);
2101 return -EINVAL;
2102 }
2103 if (!btf_is_ptr(t)) {
2104 pr_warn("map '%s': value spec is not PTR: %s.\n",
2105 map->name, btf_kind_str(t));
2106 return -EINVAL;
2107 }
2108 sz = btf__resolve_size(obj->btf, t->type);
2109 if (sz < 0) {
2110 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2111 map->name, t->type, (ssize_t)sz);
2112 return sz;
2113 }
2114 pr_debug("map '%s': found value [%u], sz = %zd.\n",
2115 map->name, t->type, (ssize_t)sz);
2116 if (map->def.value_size && map->def.value_size != sz) {
2117 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2118 map->name, map->def.value_size, (ssize_t)sz);
2119 return -EINVAL;
2120 }
2121 map->def.value_size = sz;
2122 map->btf_value_type_id = t->type;
2123 }
2124 else if (strcmp(name, "values") == 0) {
2125 int err;
2126
2127 if (is_inner) {
2128 pr_warn("map '%s': multi-level inner maps not supported.\n",
2129 map->name);
2130 return -ENOTSUP;
2131 }
2132 if (i != vlen - 1) {
2133 pr_warn("map '%s': '%s' member should be last.\n",
2134 map->name, name);
2135 return -EINVAL;
2136 }
2137 if (!bpf_map_type__is_map_in_map(map->def.type)) {
2138 pr_warn("map '%s': should be map-in-map.\n",
2139 map->name);
2140 return -ENOTSUP;
2141 }
2142 if (map->def.value_size && map->def.value_size != 4) {
2143 pr_warn("map '%s': conflicting value size %u != 4.\n",
2144 map->name, map->def.value_size);
2145 return -EINVAL;
2146 }
2147 map->def.value_size = 4;
2148 t = btf__type_by_id(obj->btf, m->type);
2149 if (!t) {
2150 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2151 map->name, m->type);
2152 return -EINVAL;
2153 }
2154 if (!btf_is_array(t) || btf_array(t)->nelems) {
2155 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2156 map->name);
2157 return -EINVAL;
2158 }
2159 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
2160 NULL);
2161 if (!btf_is_ptr(t)) {
2162 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2163 map->name, btf_kind_str(t));
2164 return -EINVAL;
2165 }
2166 t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
2167 if (!btf_is_struct(t)) {
2168 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2169 map->name, btf_kind_str(t));
2170 return -EINVAL;
2171 }
2172
2173 map->inner_map = calloc(1, sizeof(*map->inner_map));
2174 if (!map->inner_map)
2175 return -ENOMEM;
2176 map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
2177 map->inner_map->name = malloc(strlen(map->name) +
2178 sizeof(".inner") + 1);
2179 if (!map->inner_map->name)
2180 return -ENOMEM;
2181 sprintf(map->inner_map->name, "%s.inner", map->name);
2182
2183 err = parse_btf_map_def(obj, map->inner_map, t, strict,
2184 true , NULL);
2185 if (err)
2186 return err;
2187 } else if (strcmp(name, "pinning") == 0) {
2188 __u32 val;
2189 int err;
2190
2191 if (is_inner) {
2192 pr_debug("map '%s': inner def can't be pinned.\n",
2193 map->name);
2194 return -EINVAL;
2195 }
2196 if (!get_map_field_int(map->name, obj->btf, m, &val))
2197 return -EINVAL;
2198 pr_debug("map '%s': found pinning = %u.\n",
2199 map->name, val);
2200
2201 if (val != LIBBPF_PIN_NONE &&
2202 val != LIBBPF_PIN_BY_NAME) {
2203 pr_warn("map '%s': invalid pinning value %u.\n",
2204 map->name, val);
2205 return -EINVAL;
2206 }
2207 if (val == LIBBPF_PIN_BY_NAME) {
2208 err = build_map_pin_path(map, pin_root_path);
2209 if (err) {
2210 pr_warn("map '%s': couldn't build pin path.\n",
2211 map->name);
2212 return err;
2213 }
2214 }
2215 } else {
2216 if (strict) {
2217 pr_warn("map '%s': unknown field '%s'.\n",
2218 map->name, name);
2219 return -ENOTSUP;
2220 }
2221 pr_debug("map '%s': ignoring unknown field '%s'.\n",
2222 map->name, name);
2223 }
2224 }
2225
2226 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
2227 pr_warn("map '%s': map type isn't specified.\n", map->name);
2228 return -EINVAL;
2229 }
2230
2231 return 0;
2232}
2233
2234static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2235 const struct btf_type *sec,
2236 int var_idx, int sec_idx,
2237 const Elf_Data *data, bool strict,
2238 const char *pin_root_path)
2239{
2240 const struct btf_type *var, *def;
2241 const struct btf_var_secinfo *vi;
2242 const struct btf_var *var_extra;
2243 const char *map_name;
2244 struct bpf_map *map;
2245
2246 vi = btf_var_secinfos(sec) + var_idx;
2247 var = btf__type_by_id(obj->btf, vi->type);
2248 var_extra = btf_var(var);
2249 map_name = btf__name_by_offset(obj->btf, var->name_off);
2250
2251 if (map_name == NULL || map_name[0] == '\0') {
2252 pr_warn("map #%d: empty name.\n", var_idx);
2253 return -EINVAL;
2254 }
2255 if ((__u64)vi->offset + vi->size > data->d_size) {
2256 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2257 return -EINVAL;
2258 }
2259 if (!btf_is_var(var)) {
2260 pr_warn("map '%s': unexpected var kind %s.\n",
2261 map_name, btf_kind_str(var));
2262 return -EINVAL;
2263 }
2264 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2265 var_extra->linkage != BTF_VAR_STATIC) {
2266 pr_warn("map '%s': unsupported var linkage %u.\n",
2267 map_name, var_extra->linkage);
2268 return -EOPNOTSUPP;
2269 }
2270
2271 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2272 if (!btf_is_struct(def)) {
2273 pr_warn("map '%s': unexpected def kind %s.\n",
2274 map_name, btf_kind_str(var));
2275 return -EINVAL;
2276 }
2277 if (def->size > vi->size) {
2278 pr_warn("map '%s': invalid def size.\n", map_name);
2279 return -EINVAL;
2280 }
2281
2282 map = bpf_object__add_map(obj);
2283 if (IS_ERR(map))
2284 return PTR_ERR(map);
2285 map->name = strdup(map_name);
2286 if (!map->name) {
2287 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2288 return -ENOMEM;
2289 }
2290 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2291 map->def.type = BPF_MAP_TYPE_UNSPEC;
2292 map->sec_idx = sec_idx;
2293 map->sec_offset = vi->offset;
2294 map->btf_var_idx = var_idx;
2295 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2296 map_name, map->sec_idx, map->sec_offset);
2297
2298 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
2299}
2300
2301static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2302 const char *pin_root_path)
2303{
2304 const struct btf_type *sec = NULL;
2305 int nr_types, i, vlen, err;
2306 const struct btf_type *t;
2307 const char *name;
2308 Elf_Data *data;
2309 Elf_Scn *scn;
2310
2311 if (obj->efile.btf_maps_shndx < 0)
2312 return 0;
2313
2314 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2315 data = elf_sec_data(obj, scn);
2316 if (!scn || !data) {
2317 pr_warn("elf: failed to get %s map definitions for %s\n",
2318 MAPS_ELF_SEC, obj->path);
2319 return -EINVAL;
2320 }
2321
2322 nr_types = btf__get_nr_types(obj->btf);
2323 for (i = 1; i <= nr_types; i++) {
2324 t = btf__type_by_id(obj->btf, i);
2325 if (!btf_is_datasec(t))
2326 continue;
2327 name = btf__name_by_offset(obj->btf, t->name_off);
2328 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2329 sec = t;
2330 obj->efile.btf_maps_sec_btf_id = i;
2331 break;
2332 }
2333 }
2334
2335 if (!sec) {
2336 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2337 return -ENOENT;
2338 }
2339
2340 vlen = btf_vlen(sec);
2341 for (i = 0; i < vlen; i++) {
2342 err = bpf_object__init_user_btf_map(obj, sec, i,
2343 obj->efile.btf_maps_shndx,
2344 data, strict,
2345 pin_root_path);
2346 if (err)
2347 return err;
2348 }
2349
2350 return 0;
2351}
2352
2353static int bpf_object__init_maps(struct bpf_object *obj,
2354 const struct bpf_object_open_opts *opts)
2355{
2356 const char *pin_root_path;
2357 bool strict;
2358 int err;
2359
2360 strict = !OPTS_GET(opts, relaxed_maps, false);
2361 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2362
2363 err = bpf_object__init_user_maps(obj, strict);
2364 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2365 err = err ?: bpf_object__init_global_data_maps(obj);
2366 err = err ?: bpf_object__init_kconfig_map(obj);
2367 err = err ?: bpf_object__init_struct_ops_maps(obj);
2368 if (err)
2369 return err;
2370
2371 return 0;
2372}
2373
2374static bool section_have_execinstr(struct bpf_object *obj, int idx)
2375{
2376 GElf_Shdr sh;
2377
2378 if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
2379 return false;
2380
2381 return sh.sh_flags & SHF_EXECINSTR;
2382}
2383
2384static bool btf_needs_sanitization(struct bpf_object *obj)
2385{
2386 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2387 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2388 bool has_func = kernel_supports(FEAT_BTF_FUNC);
2389
2390 return !has_func || !has_datasec || !has_func_global;
2391}
2392
2393static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2394{
2395 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2396 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
2397 bool has_func = kernel_supports(FEAT_BTF_FUNC);
2398 struct btf_type *t;
2399 int i, j, vlen;
2400
2401 for (i = 1; i <= btf__get_nr_types(btf); i++) {
2402 t = (struct btf_type *)btf__type_by_id(btf, i);
2403
2404 if (!has_datasec && btf_is_var(t)) {
2405
2406 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2407
2408
2409
2410
2411
2412 t->size = 1;
2413 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2414 } else if (!has_datasec && btf_is_datasec(t)) {
2415
2416 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2417 struct btf_member *m = btf_members(t);
2418 struct btf_type *vt;
2419 char *name;
2420
2421 name = (char *)btf__name_by_offset(btf, t->name_off);
2422 while (*name) {
2423 if (*name == '.')
2424 *name = '_';
2425 name++;
2426 }
2427
2428 vlen = btf_vlen(t);
2429 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2430 for (j = 0; j < vlen; j++, v++, m++) {
2431
2432 m->offset = v->offset * 8;
2433 m->type = v->type;
2434
2435 vt = (void *)btf__type_by_id(btf, v->type);
2436 m->name_off = vt->name_off;
2437 }
2438 } else if (!has_func && btf_is_func_proto(t)) {
2439
2440 vlen = btf_vlen(t);
2441 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2442 t->size = sizeof(__u32);
2443 } else if (!has_func && btf_is_func(t)) {
2444
2445 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2446 } else if (!has_func_global && btf_is_func(t)) {
2447
2448 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2449 }
2450 }
2451}
2452
2453static bool libbpf_needs_btf(const struct bpf_object *obj)
2454{
2455 return obj->efile.btf_maps_shndx >= 0 ||
2456 obj->efile.st_ops_shndx >= 0 ||
2457 obj->nr_extern > 0;
2458}
2459
2460static bool kernel_needs_btf(const struct bpf_object *obj)
2461{
2462 return obj->efile.st_ops_shndx >= 0;
2463}
2464
2465static int bpf_object__init_btf(struct bpf_object *obj,
2466 Elf_Data *btf_data,
2467 Elf_Data *btf_ext_data)
2468{
2469 int err = -ENOENT;
2470
2471 if (btf_data) {
2472 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2473 if (IS_ERR(obj->btf)) {
2474 err = PTR_ERR(obj->btf);
2475 obj->btf = NULL;
2476 pr_warn("Error loading ELF section %s: %d.\n",
2477 BTF_ELF_SEC, err);
2478 goto out;
2479 }
2480
2481 btf__set_pointer_size(obj->btf, 8);
2482 err = 0;
2483 }
2484 if (btf_ext_data) {
2485 if (!obj->btf) {
2486 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2487 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2488 goto out;
2489 }
2490 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2491 btf_ext_data->d_size);
2492 if (IS_ERR(obj->btf_ext)) {
2493 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
2494 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
2495 obj->btf_ext = NULL;
2496 goto out;
2497 }
2498 }
2499out:
2500 if (err && libbpf_needs_btf(obj)) {
2501 pr_warn("BTF is required, but is missing or corrupted.\n");
2502 return err;
2503 }
2504 return 0;
2505}
2506
2507static int bpf_object__finalize_btf(struct bpf_object *obj)
2508{
2509 int err;
2510
2511 if (!obj->btf)
2512 return 0;
2513
2514 err = btf__finalize_data(obj, obj->btf);
2515 if (err) {
2516 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2517 return err;
2518 }
2519
2520 return 0;
2521}
2522
2523static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2524{
2525 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2526 prog->type == BPF_PROG_TYPE_LSM)
2527 return true;
2528
2529
2530
2531
2532 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2533 return true;
2534
2535 return false;
2536}
2537
2538static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2539{
2540 struct bpf_program *prog;
2541 int i;
2542
2543
2544 if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
2545 return true;
2546
2547
2548 for (i = 0; i < obj->nr_extern; i++) {
2549 const struct extern_desc *ext;
2550
2551 ext = &obj->externs[i];
2552 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2553 return true;
2554 }
2555
2556 bpf_object__for_each_program(prog, obj) {
2557 if (!prog->load)
2558 continue;
2559 if (prog_needs_vmlinux_btf(prog))
2560 return true;
2561 }
2562
2563 return false;
2564}
2565
2566static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2567{
2568 int err;
2569
2570
2571 if (obj->btf_vmlinux)
2572 return 0;
2573
2574 if (!force && !obj_needs_vmlinux_btf(obj))
2575 return 0;
2576
2577 obj->btf_vmlinux = libbpf_find_kernel_btf();
2578 if (IS_ERR(obj->btf_vmlinux)) {
2579 err = PTR_ERR(obj->btf_vmlinux);
2580 pr_warn("Error loading vmlinux BTF: %d\n", err);
2581 obj->btf_vmlinux = NULL;
2582 return err;
2583 }
2584 return 0;
2585}
2586
2587static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2588{
2589 struct btf *kern_btf = obj->btf;
2590 bool btf_mandatory, sanitize;
2591 int err = 0;
2592
2593 if (!obj->btf)
2594 return 0;
2595
2596 if (!kernel_supports(FEAT_BTF)) {
2597 if (kernel_needs_btf(obj)) {
2598 err = -EOPNOTSUPP;
2599 goto report;
2600 }
2601 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2602 return 0;
2603 }
2604
2605 sanitize = btf_needs_sanitization(obj);
2606 if (sanitize) {
2607 const void *raw_data;
2608 __u32 sz;
2609
2610
2611 raw_data = btf__get_raw_data(obj->btf, &sz);
2612 kern_btf = btf__new(raw_data, sz);
2613 if (IS_ERR(kern_btf))
2614 return PTR_ERR(kern_btf);
2615
2616
2617 btf__set_pointer_size(obj->btf, 8);
2618 bpf_object__sanitize_btf(obj, kern_btf);
2619 }
2620
2621 err = btf__load(kern_btf);
2622 if (sanitize) {
2623 if (!err) {
2624
2625 btf__set_fd(obj->btf, btf__fd(kern_btf));
2626 btf__set_fd(kern_btf, -1);
2627 }
2628 btf__free(kern_btf);
2629 }
2630report:
2631 if (err) {
2632 btf_mandatory = kernel_needs_btf(obj);
2633 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
2634 btf_mandatory ? "BTF is mandatory, can't proceed."
2635 : "BTF is optional, ignoring.");
2636 if (!btf_mandatory)
2637 err = 0;
2638 }
2639 return err;
2640}
2641
2642static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2643{
2644 const char *name;
2645
2646 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2647 if (!name) {
2648 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2649 off, obj->path, elf_errmsg(-1));
2650 return NULL;
2651 }
2652
2653 return name;
2654}
2655
2656static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2657{
2658 const char *name;
2659
2660 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2661 if (!name) {
2662 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2663 off, obj->path, elf_errmsg(-1));
2664 return NULL;
2665 }
2666
2667 return name;
2668}
2669
2670static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2671{
2672 Elf_Scn *scn;
2673
2674 scn = elf_getscn(obj->efile.elf, idx);
2675 if (!scn) {
2676 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
2677 idx, obj->path, elf_errmsg(-1));
2678 return NULL;
2679 }
2680 return scn;
2681}
2682
2683static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2684{
2685 Elf_Scn *scn = NULL;
2686 Elf *elf = obj->efile.elf;
2687 const char *sec_name;
2688
2689 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2690 sec_name = elf_sec_name(obj, scn);
2691 if (!sec_name)
2692 return NULL;
2693
2694 if (strcmp(sec_name, name) != 0)
2695 continue;
2696
2697 return scn;
2698 }
2699 return NULL;
2700}
2701
2702static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2703{
2704 if (!scn)
2705 return -EINVAL;
2706
2707 if (gelf_getshdr(scn, hdr) != hdr) {
2708 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
2709 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2710 return -EINVAL;
2711 }
2712
2713 return 0;
2714}
2715
2716static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2717{
2718 const char *name;
2719 GElf_Shdr sh;
2720
2721 if (!scn)
2722 return NULL;
2723
2724 if (elf_sec_hdr(obj, scn, &sh))
2725 return NULL;
2726
2727 name = elf_sec_str(obj, sh.sh_name);
2728 if (!name) {
2729 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
2730 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2731 return NULL;
2732 }
2733
2734 return name;
2735}
2736
2737static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2738{
2739 Elf_Data *data;
2740
2741 if (!scn)
2742 return NULL;
2743
2744 data = elf_getdata(scn, 0);
2745 if (!data) {
2746 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
2747 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2748 obj->path, elf_errmsg(-1));
2749 return NULL;
2750 }
2751
2752 return data;
2753}
2754
2755static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
2756 size_t off, __u32 sym_type, GElf_Sym *sym)
2757{
2758 Elf_Data *symbols = obj->efile.symbols;
2759 size_t n = symbols->d_size / sizeof(GElf_Sym);
2760 int i;
2761
2762 for (i = 0; i < n; i++) {
2763 if (!gelf_getsym(symbols, i, sym))
2764 continue;
2765 if (sym->st_shndx != sec_idx || sym->st_value != off)
2766 continue;
2767 if (GELF_ST_TYPE(sym->st_info) != sym_type)
2768 continue;
2769 return 0;
2770 }
2771
2772 return -ENOENT;
2773}
2774
2775static bool is_sec_name_dwarf(const char *name)
2776{
2777
2778 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
2779}
2780
2781static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
2782{
2783
2784 if (hdr->sh_type == SHT_STRTAB)
2785 return true;
2786
2787
2788 if (hdr->sh_type == 0x6FFF4C03 )
2789 return true;
2790
2791
2792 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
2793 strcmp(name, ".text") == 0)
2794 return true;
2795
2796
2797 if (is_sec_name_dwarf(name))
2798 return true;
2799
2800 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
2801 name += sizeof(".rel") - 1;
2802
2803 if (is_sec_name_dwarf(name))
2804 return true;
2805
2806
2807 if (strcmp(name, BTF_ELF_SEC) == 0 ||
2808 strcmp(name, BTF_EXT_ELF_SEC) == 0)
2809 return true;
2810 }
2811
2812 return false;
2813}
2814
2815static int cmp_progs(const void *_a, const void *_b)
2816{
2817 const struct bpf_program *a = _a;
2818 const struct bpf_program *b = _b;
2819
2820 if (a->sec_idx != b->sec_idx)
2821 return a->sec_idx < b->sec_idx ? -1 : 1;
2822
2823
2824 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
2825}
2826
2827static int bpf_object__elf_collect(struct bpf_object *obj)
2828{
2829 Elf *elf = obj->efile.elf;
2830 Elf_Data *btf_ext_data = NULL;
2831 Elf_Data *btf_data = NULL;
2832 int idx = 0, err = 0;
2833 const char *name;
2834 Elf_Data *data;
2835 Elf_Scn *scn;
2836 GElf_Shdr sh;
2837
2838
2839
2840
2841 scn = NULL;
2842 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2843 if (elf_sec_hdr(obj, scn, &sh))
2844 return -LIBBPF_ERRNO__FORMAT;
2845
2846 if (sh.sh_type == SHT_SYMTAB) {
2847 if (obj->efile.symbols) {
2848 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2849 return -LIBBPF_ERRNO__FORMAT;
2850 }
2851
2852 data = elf_sec_data(obj, scn);
2853 if (!data)
2854 return -LIBBPF_ERRNO__FORMAT;
2855
2856 obj->efile.symbols = data;
2857 obj->efile.symbols_shndx = elf_ndxscn(scn);
2858 obj->efile.strtabidx = sh.sh_link;
2859 }
2860 }
2861
2862 scn = NULL;
2863 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2864 idx++;
2865
2866 if (elf_sec_hdr(obj, scn, &sh))
2867 return -LIBBPF_ERRNO__FORMAT;
2868
2869 name = elf_sec_str(obj, sh.sh_name);
2870 if (!name)
2871 return -LIBBPF_ERRNO__FORMAT;
2872
2873 if (ignore_elf_section(&sh, name))
2874 continue;
2875
2876 data = elf_sec_data(obj, scn);
2877 if (!data)
2878 return -LIBBPF_ERRNO__FORMAT;
2879
2880 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
2881 idx, name, (unsigned long)data->d_size,
2882 (int)sh.sh_link, (unsigned long)sh.sh_flags,
2883 (int)sh.sh_type);
2884
2885 if (strcmp(name, "license") == 0) {
2886 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
2887 if (err)
2888 return err;
2889 } else if (strcmp(name, "version") == 0) {
2890 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
2891 if (err)
2892 return err;
2893 } else if (strcmp(name, "maps") == 0) {
2894 obj->efile.maps_shndx = idx;
2895 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
2896 obj->efile.btf_maps_shndx = idx;
2897 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
2898 btf_data = data;
2899 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
2900 btf_ext_data = data;
2901 } else if (sh.sh_type == SHT_SYMTAB) {
2902
2903 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
2904 if (sh.sh_flags & SHF_EXECINSTR) {
2905 if (strcmp(name, ".text") == 0)
2906 obj->efile.text_shndx = idx;
2907 err = bpf_object__add_programs(obj, data, name, idx);
2908 if (err)
2909 return err;
2910 } else if (strcmp(name, DATA_SEC) == 0) {
2911 obj->efile.data = data;
2912 obj->efile.data_shndx = idx;
2913 } else if (strcmp(name, RODATA_SEC) == 0) {
2914 obj->efile.rodata = data;
2915 obj->efile.rodata_shndx = idx;
2916 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
2917 obj->efile.st_ops_data = data;
2918 obj->efile.st_ops_shndx = idx;
2919 } else {
2920 pr_info("elf: skipping unrecognized data section(%d) %s\n",
2921 idx, name);
2922 }
2923 } else if (sh.sh_type == SHT_REL) {
2924 int nr_sects = obj->efile.nr_reloc_sects;
2925 void *sects = obj->efile.reloc_sects;
2926 int sec = sh.sh_info;
2927
2928
2929 if (!section_have_execinstr(obj, sec) &&
2930 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
2931 strcmp(name, ".rel" MAPS_ELF_SEC)) {
2932 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
2933 idx, name, sec,
2934 elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
2935 continue;
2936 }
2937
2938 sects = libbpf_reallocarray(sects, nr_sects + 1,
2939 sizeof(*obj->efile.reloc_sects));
2940 if (!sects)
2941 return -ENOMEM;
2942
2943 obj->efile.reloc_sects = sects;
2944 obj->efile.nr_reloc_sects++;
2945
2946 obj->efile.reloc_sects[nr_sects].shdr = sh;
2947 obj->efile.reloc_sects[nr_sects].data = data;
2948 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
2949 obj->efile.bss = data;
2950 obj->efile.bss_shndx = idx;
2951 } else {
2952 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
2953 (size_t)sh.sh_size);
2954 }
2955 }
2956
2957 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
2958 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
2959 return -LIBBPF_ERRNO__FORMAT;
2960 }
2961
2962
2963
2964 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
2965
2966 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
2967}
2968
2969static bool sym_is_extern(const GElf_Sym *sym)
2970{
2971 int bind = GELF_ST_BIND(sym->st_info);
2972
2973 return sym->st_shndx == SHN_UNDEF &&
2974 (bind == STB_GLOBAL || bind == STB_WEAK) &&
2975 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
2976}
2977
2978static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
2979{
2980 const struct btf_type *t;
2981 const char *var_name;
2982 int i, n;
2983
2984 if (!btf)
2985 return -ESRCH;
2986
2987 n = btf__get_nr_types(btf);
2988 for (i = 1; i <= n; i++) {
2989 t = btf__type_by_id(btf, i);
2990
2991 if (!btf_is_var(t))
2992 continue;
2993
2994 var_name = btf__name_by_offset(btf, t->name_off);
2995 if (strcmp(var_name, ext_name))
2996 continue;
2997
2998 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
2999 return -EINVAL;
3000
3001 return i;
3002 }
3003
3004 return -ENOENT;
3005}
3006
3007static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3008 const struct btf_var_secinfo *vs;
3009 const struct btf_type *t;
3010 int i, j, n;
3011
3012 if (!btf)
3013 return -ESRCH;
3014
3015 n = btf__get_nr_types(btf);
3016 for (i = 1; i <= n; i++) {
3017 t = btf__type_by_id(btf, i);
3018
3019 if (!btf_is_datasec(t))
3020 continue;
3021
3022 vs = btf_var_secinfos(t);
3023 for (j = 0; j < btf_vlen(t); j++, vs++) {
3024 if (vs->type == ext_btf_id)
3025 return i;
3026 }
3027 }
3028
3029 return -ENOENT;
3030}
3031
3032static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3033 bool *is_signed)
3034{
3035 const struct btf_type *t;
3036 const char *name;
3037
3038 t = skip_mods_and_typedefs(btf, id, NULL);
3039 name = btf__name_by_offset(btf, t->name_off);
3040
3041 if (is_signed)
3042 *is_signed = false;
3043 switch (btf_kind(t)) {
3044 case BTF_KIND_INT: {
3045 int enc = btf_int_encoding(t);
3046
3047 if (enc & BTF_INT_BOOL)
3048 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3049 if (is_signed)
3050 *is_signed = enc & BTF_INT_SIGNED;
3051 if (t->size == 1)
3052 return KCFG_CHAR;
3053 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3054 return KCFG_UNKNOWN;
3055 return KCFG_INT;
3056 }
3057 case BTF_KIND_ENUM:
3058 if (t->size != 4)
3059 return KCFG_UNKNOWN;
3060 if (strcmp(name, "libbpf_tristate"))
3061 return KCFG_UNKNOWN;
3062 return KCFG_TRISTATE;
3063 case BTF_KIND_ARRAY:
3064 if (btf_array(t)->nelems == 0)
3065 return KCFG_UNKNOWN;
3066 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3067 return KCFG_UNKNOWN;
3068 return KCFG_CHAR_ARR;
3069 default:
3070 return KCFG_UNKNOWN;
3071 }
3072}
3073
3074static int cmp_externs(const void *_a, const void *_b)
3075{
3076 const struct extern_desc *a = _a;
3077 const struct extern_desc *b = _b;
3078
3079 if (a->type != b->type)
3080 return a->type < b->type ? -1 : 1;
3081
3082 if (a->type == EXT_KCFG) {
3083
3084 if (a->kcfg.align != b->kcfg.align)
3085 return a->kcfg.align > b->kcfg.align ? -1 : 1;
3086
3087 if (a->kcfg.sz != b->kcfg.sz)
3088 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3089 }
3090
3091
3092 return strcmp(a->name, b->name);
3093}
3094
3095static int find_int_btf_id(const struct btf *btf)
3096{
3097 const struct btf_type *t;
3098 int i, n;
3099
3100 n = btf__get_nr_types(btf);
3101 for (i = 1; i <= n; i++) {
3102 t = btf__type_by_id(btf, i);
3103
3104 if (btf_is_int(t) && btf_int_bits(t) == 32)
3105 return i;
3106 }
3107
3108 return 0;
3109}
3110
3111static int bpf_object__collect_externs(struct bpf_object *obj)
3112{
3113 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3114 const struct btf_type *t;
3115 struct extern_desc *ext;
3116 int i, n, off;
3117 const char *ext_name, *sec_name;
3118 Elf_Scn *scn;
3119 GElf_Shdr sh;
3120
3121 if (!obj->efile.symbols)
3122 return 0;
3123
3124 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3125 if (elf_sec_hdr(obj, scn, &sh))
3126 return -LIBBPF_ERRNO__FORMAT;
3127
3128 n = sh.sh_size / sh.sh_entsize;
3129 pr_debug("looking for externs among %d symbols...\n", n);
3130
3131 for (i = 0; i < n; i++) {
3132 GElf_Sym sym;
3133
3134 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3135 return -LIBBPF_ERRNO__FORMAT;
3136 if (!sym_is_extern(&sym))
3137 continue;
3138 ext_name = elf_sym_str(obj, sym.st_name);
3139 if (!ext_name || !ext_name[0])
3140 continue;
3141
3142 ext = obj->externs;
3143 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3144 if (!ext)
3145 return -ENOMEM;
3146 obj->externs = ext;
3147 ext = &ext[obj->nr_extern];
3148 memset(ext, 0, sizeof(*ext));
3149 obj->nr_extern++;
3150
3151 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3152 if (ext->btf_id <= 0) {
3153 pr_warn("failed to find BTF for extern '%s': %d\n",
3154 ext_name, ext->btf_id);
3155 return ext->btf_id;
3156 }
3157 t = btf__type_by_id(obj->btf, ext->btf_id);
3158 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3159 ext->sym_idx = i;
3160 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
3161
3162 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3163 if (ext->sec_btf_id <= 0) {
3164 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3165 ext_name, ext->btf_id, ext->sec_btf_id);
3166 return ext->sec_btf_id;
3167 }
3168 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3169 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3170
3171 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3172 kcfg_sec = sec;
3173 ext->type = EXT_KCFG;
3174 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3175 if (ext->kcfg.sz <= 0) {
3176 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3177 ext_name, ext->kcfg.sz);
3178 return ext->kcfg.sz;
3179 }
3180 ext->kcfg.align = btf__align_of(obj->btf, t->type);
3181 if (ext->kcfg.align <= 0) {
3182 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3183 ext_name, ext->kcfg.align);
3184 return -EINVAL;
3185 }
3186 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3187 &ext->kcfg.is_signed);
3188 if (ext->kcfg.type == KCFG_UNKNOWN) {
3189 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3190 return -ENOTSUP;
3191 }
3192 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3193 ksym_sec = sec;
3194 ext->type = EXT_KSYM;
3195 skip_mods_and_typedefs(obj->btf, t->type,
3196 &ext->ksym.type_id);
3197 } else {
3198 pr_warn("unrecognized extern section '%s'\n", sec_name);
3199 return -ENOTSUP;
3200 }
3201 }
3202 pr_debug("collected %d externs total\n", obj->nr_extern);
3203
3204 if (!obj->nr_extern)
3205 return 0;
3206
3207
3208 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3209
3210
3211
3212
3213
3214 if (ksym_sec) {
3215
3216
3217
3218 int int_btf_id = find_int_btf_id(obj->btf);
3219
3220 for (i = 0; i < obj->nr_extern; i++) {
3221 ext = &obj->externs[i];
3222 if (ext->type != EXT_KSYM)
3223 continue;
3224 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3225 i, ext->sym_idx, ext->name);
3226 }
3227
3228 sec = ksym_sec;
3229 n = btf_vlen(sec);
3230 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3231 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3232 struct btf_type *vt;
3233
3234 vt = (void *)btf__type_by_id(obj->btf, vs->type);
3235 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3236 ext = find_extern_by_name(obj, ext_name);
3237 if (!ext) {
3238 pr_warn("failed to find extern definition for BTF var '%s'\n",
3239 ext_name);
3240 return -ESRCH;
3241 }
3242 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3243 vt->type = int_btf_id;
3244 vs->offset = off;
3245 vs->size = sizeof(int);
3246 }
3247 sec->size = off;
3248 }
3249
3250 if (kcfg_sec) {
3251 sec = kcfg_sec;
3252
3253 off = 0;
3254 for (i = 0; i < obj->nr_extern; i++) {
3255 ext = &obj->externs[i];
3256 if (ext->type != EXT_KCFG)
3257 continue;
3258
3259 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3260 off = ext->kcfg.data_off + ext->kcfg.sz;
3261 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3262 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3263 }
3264 sec->size = off;
3265 n = btf_vlen(sec);
3266 for (i = 0; i < n; i++) {
3267 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3268
3269 t = btf__type_by_id(obj->btf, vs->type);
3270 ext_name = btf__name_by_offset(obj->btf, t->name_off);
3271 ext = find_extern_by_name(obj, ext_name);
3272 if (!ext) {
3273 pr_warn("failed to find extern definition for BTF var '%s'\n",
3274 ext_name);
3275 return -ESRCH;
3276 }
3277 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3278 vs->offset = ext->kcfg.data_off;
3279 }
3280 }
3281 return 0;
3282}
3283
3284struct bpf_program *
3285bpf_object__find_program_by_title(const struct bpf_object *obj,
3286 const char *title)
3287{
3288 struct bpf_program *pos;
3289
3290 bpf_object__for_each_program(pos, obj) {
3291 if (pos->sec_name && !strcmp(pos->sec_name, title))
3292 return pos;
3293 }
3294 return NULL;
3295}
3296
3297static bool prog_is_subprog(const struct bpf_object *obj,
3298 const struct bpf_program *prog)
3299{
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3313}
3314
3315struct bpf_program *
3316bpf_object__find_program_by_name(const struct bpf_object *obj,
3317 const char *name)
3318{
3319 struct bpf_program *prog;
3320
3321 bpf_object__for_each_program(prog, obj) {
3322 if (prog_is_subprog(obj, prog))
3323 continue;
3324 if (!strcmp(prog->name, name))
3325 return prog;
3326 }
3327 return NULL;
3328}
3329
3330static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3331 int shndx)
3332{
3333 return shndx == obj->efile.data_shndx ||
3334 shndx == obj->efile.bss_shndx ||
3335 shndx == obj->efile.rodata_shndx;
3336}
3337
3338static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3339 int shndx)
3340{
3341 return shndx == obj->efile.maps_shndx ||
3342 shndx == obj->efile.btf_maps_shndx;
3343}
3344
3345static enum libbpf_map_type
3346bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3347{
3348 if (shndx == obj->efile.data_shndx)
3349 return LIBBPF_MAP_DATA;
3350 else if (shndx == obj->efile.bss_shndx)
3351 return LIBBPF_MAP_BSS;
3352 else if (shndx == obj->efile.rodata_shndx)
3353 return LIBBPF_MAP_RODATA;
3354 else if (shndx == obj->efile.symbols_shndx)
3355 return LIBBPF_MAP_KCONFIG;
3356 else
3357 return LIBBPF_MAP_UNSPEC;
3358}
3359
3360static int bpf_program__record_reloc(struct bpf_program *prog,
3361 struct reloc_desc *reloc_desc,
3362 __u32 insn_idx, const char *sym_name,
3363 const GElf_Sym *sym, const GElf_Rel *rel)
3364{
3365 struct bpf_insn *insn = &prog->insns[insn_idx];
3366 size_t map_idx, nr_maps = prog->obj->nr_maps;
3367 struct bpf_object *obj = prog->obj;
3368 __u32 shdr_idx = sym->st_shndx;
3369 enum libbpf_map_type type;
3370 const char *sym_sec_name;
3371 struct bpf_map *map;
3372
3373 reloc_desc->processed = false;
3374
3375
3376 if (insn->code == (BPF_JMP | BPF_CALL)) {
3377 if (insn->src_reg != BPF_PSEUDO_CALL) {
3378 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3379 return -LIBBPF_ERRNO__RELOC;
3380 }
3381
3382 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3383 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3384 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3385 prog->name, sym_name, sym_sec_name);
3386 return -LIBBPF_ERRNO__RELOC;
3387 }
3388 if (sym->st_value % BPF_INSN_SZ) {
3389 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3390 prog->name, sym_name, (size_t)sym->st_value);
3391 return -LIBBPF_ERRNO__RELOC;
3392 }
3393 reloc_desc->type = RELO_CALL;
3394 reloc_desc->insn_idx = insn_idx;
3395 reloc_desc->sym_off = sym->st_value;
3396 return 0;
3397 }
3398
3399 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
3400 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3401 prog->name, sym_name, insn_idx, insn->code);
3402 return -LIBBPF_ERRNO__RELOC;
3403 }
3404
3405 if (sym_is_extern(sym)) {
3406 int sym_idx = GELF_R_SYM(rel->r_info);
3407 int i, n = obj->nr_extern;
3408 struct extern_desc *ext;
3409
3410 for (i = 0; i < n; i++) {
3411 ext = &obj->externs[i];
3412 if (ext->sym_idx == sym_idx)
3413 break;
3414 }
3415 if (i >= n) {
3416 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3417 prog->name, sym_name, sym_idx);
3418 return -LIBBPF_ERRNO__RELOC;
3419 }
3420 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3421 prog->name, i, ext->name, ext->sym_idx, insn_idx);
3422 reloc_desc->type = RELO_EXTERN;
3423 reloc_desc->insn_idx = insn_idx;
3424 reloc_desc->sym_off = i;
3425 return 0;
3426 }
3427
3428 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3429 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3430 prog->name, sym_name, shdr_idx);
3431 return -LIBBPF_ERRNO__RELOC;
3432 }
3433
3434 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
3435 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3436
3437
3438 if (type == LIBBPF_MAP_UNSPEC) {
3439 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
3440 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3441 prog->name, sym_name, sym_sec_name);
3442 return -LIBBPF_ERRNO__RELOC;
3443 }
3444 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3445 map = &obj->maps[map_idx];
3446 if (map->libbpf_type != type ||
3447 map->sec_idx != sym->st_shndx ||
3448 map->sec_offset != sym->st_value)
3449 continue;
3450 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
3451 prog->name, map_idx, map->name, map->sec_idx,
3452 map->sec_offset, insn_idx);
3453 break;
3454 }
3455 if (map_idx >= nr_maps) {
3456 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
3457 prog->name, sym_sec_name, (size_t)sym->st_value);
3458 return -LIBBPF_ERRNO__RELOC;
3459 }
3460 reloc_desc->type = RELO_LD64;
3461 reloc_desc->insn_idx = insn_idx;
3462 reloc_desc->map_idx = map_idx;
3463 reloc_desc->sym_off = 0;
3464 return 0;
3465 }
3466
3467
3468 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
3469 pr_warn("prog '%s': bad data relo against section '%s'\n",
3470 prog->name, sym_sec_name);
3471 return -LIBBPF_ERRNO__RELOC;
3472 }
3473 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3474 map = &obj->maps[map_idx];
3475 if (map->libbpf_type != type)
3476 continue;
3477 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3478 prog->name, map_idx, map->name, map->sec_idx,
3479 map->sec_offset, insn_idx);
3480 break;
3481 }
3482 if (map_idx >= nr_maps) {
3483 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
3484 prog->name, sym_sec_name);
3485 return -LIBBPF_ERRNO__RELOC;
3486 }
3487
3488 reloc_desc->type = RELO_DATA;
3489 reloc_desc->insn_idx = insn_idx;
3490 reloc_desc->map_idx = map_idx;
3491 reloc_desc->sym_off = sym->st_value;
3492 return 0;
3493}
3494
3495static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
3496{
3497 return insn_idx >= prog->sec_insn_off &&
3498 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
3499}
3500
3501static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3502 size_t sec_idx, size_t insn_idx)
3503{
3504 int l = 0, r = obj->nr_programs - 1, m;
3505 struct bpf_program *prog;
3506
3507 while (l < r) {
3508 m = l + (r - l + 1) / 2;
3509 prog = &obj->programs[m];
3510
3511 if (prog->sec_idx < sec_idx ||
3512 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
3513 l = m;
3514 else
3515 r = m - 1;
3516 }
3517
3518
3519
3520 prog = &obj->programs[l];
3521 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
3522 return prog;
3523 return NULL;
3524}
3525
3526static int
3527bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
3528{
3529 Elf_Data *symbols = obj->efile.symbols;
3530 const char *relo_sec_name, *sec_name;
3531 size_t sec_idx = shdr->sh_info;
3532 struct bpf_program *prog;
3533 struct reloc_desc *relos;
3534 int err, i, nrels;
3535 const char *sym_name;
3536 __u32 insn_idx;
3537 GElf_Sym sym;
3538 GElf_Rel rel;
3539
3540 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3541 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
3542 if (!relo_sec_name || !sec_name)
3543 return -EINVAL;
3544
3545 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
3546 relo_sec_name, sec_idx, sec_name);
3547 nrels = shdr->sh_size / shdr->sh_entsize;
3548
3549 for (i = 0; i < nrels; i++) {
3550 if (!gelf_getrel(data, i, &rel)) {
3551 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
3552 return -LIBBPF_ERRNO__FORMAT;
3553 }
3554 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
3555 pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
3556 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3557 return -LIBBPF_ERRNO__FORMAT;
3558 }
3559 if (rel.r_offset % BPF_INSN_SZ) {
3560 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
3561 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
3562 return -LIBBPF_ERRNO__FORMAT;
3563 }
3564
3565 insn_idx = rel.r_offset / BPF_INSN_SZ;
3566
3567
3568
3569
3570
3571
3572 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
3573 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3574 else
3575 sym_name = elf_sym_str(obj, sym.st_name);
3576 sym_name = sym_name ?: "<?";
3577
3578 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
3579 relo_sec_name, i, insn_idx, sym_name);
3580
3581 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3582 if (!prog) {
3583 pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
3584 relo_sec_name, i, sec_name, insn_idx);
3585 return -LIBBPF_ERRNO__RELOC;
3586 }
3587
3588 relos = libbpf_reallocarray(prog->reloc_desc,
3589 prog->nr_reloc + 1, sizeof(*relos));
3590 if (!relos)
3591 return -ENOMEM;
3592 prog->reloc_desc = relos;
3593
3594
3595 insn_idx -= prog->sec_insn_off;
3596 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
3597 insn_idx, sym_name, &sym, &rel);
3598 if (err)
3599 return err;
3600
3601 prog->nr_reloc++;
3602 }
3603 return 0;
3604}
3605
3606static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
3607{
3608 struct bpf_map_def *def = &map->def;
3609 __u32 key_type_id = 0, value_type_id = 0;
3610 int ret;
3611
3612
3613
3614
3615
3616 if (map->sec_idx == obj->efile.btf_maps_shndx ||
3617 bpf_map__is_struct_ops(map))
3618 return 0;
3619
3620 if (!bpf_map__is_internal(map)) {
3621 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
3622 def->value_size, &key_type_id,
3623 &value_type_id);
3624 } else {
3625
3626
3627
3628
3629 ret = btf__find_by_name(obj->btf,
3630 libbpf_type_to_btf_name[map->libbpf_type]);
3631 }
3632 if (ret < 0)
3633 return ret;
3634
3635 map->btf_key_type_id = key_type_id;
3636 map->btf_value_type_id = bpf_map__is_internal(map) ?
3637 ret : value_type_id;
3638 return 0;
3639}
3640
3641int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3642{
3643 struct bpf_map_info info = {};
3644 __u32 len = sizeof(info);
3645 int new_fd, err;
3646 char *new_name;
3647
3648 err = bpf_obj_get_info_by_fd(fd, &info, &len);
3649 if (err)
3650 return err;
3651
3652 new_name = strdup(info.name);
3653 if (!new_name)
3654 return -errno;
3655
3656 new_fd = open("/", O_RDONLY | O_CLOEXEC);
3657 if (new_fd < 0) {
3658 err = -errno;
3659 goto err_free_new_name;
3660 }
3661
3662 new_fd = dup3(fd, new_fd, O_CLOEXEC);
3663 if (new_fd < 0) {
3664 err = -errno;
3665 goto err_close_new_fd;
3666 }
3667
3668 err = zclose(map->fd);
3669 if (err) {
3670 err = -errno;
3671 goto err_close_new_fd;
3672 }
3673 free(map->name);
3674
3675 map->fd = new_fd;
3676 map->name = new_name;
3677 map->def.type = info.type;
3678 map->def.key_size = info.key_size;
3679 map->def.value_size = info.value_size;
3680 map->def.max_entries = info.max_entries;
3681 map->def.map_flags = info.map_flags;
3682 map->btf_key_type_id = info.btf_key_type_id;
3683 map->btf_value_type_id = info.btf_value_type_id;
3684 map->reused = true;
3685
3686 return 0;
3687
3688err_close_new_fd:
3689 close(new_fd);
3690err_free_new_name:
3691 free(new_name);
3692 return err;
3693}
3694
3695__u32 bpf_map__max_entries(const struct bpf_map *map)
3696{
3697 return map->def.max_entries;
3698}
3699
3700int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
3701{
3702 if (map->fd >= 0)
3703 return -EBUSY;
3704 map->def.max_entries = max_entries;
3705 return 0;
3706}
3707
3708int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3709{
3710 if (!map || !max_entries)
3711 return -EINVAL;
3712
3713 return bpf_map__set_max_entries(map, max_entries);
3714}
3715
3716static int
3717bpf_object__probe_loading(struct bpf_object *obj)
3718{
3719 struct bpf_load_program_attr attr;
3720 char *cp, errmsg[STRERR_BUFSIZE];
3721 struct bpf_insn insns[] = {
3722 BPF_MOV64_IMM(BPF_REG_0, 0),
3723 BPF_EXIT_INSN(),
3724 };
3725 int ret;
3726
3727
3728
3729 memset(&attr, 0, sizeof(attr));
3730 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3731 attr.insns = insns;
3732 attr.insns_cnt = ARRAY_SIZE(insns);
3733 attr.license = "GPL";
3734
3735 ret = bpf_load_program_xattr(&attr, NULL, 0);
3736 if (ret < 0) {
3737 ret = errno;
3738 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3739 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
3740 "program. Make sure your kernel supports BPF "
3741 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
3742 "set to big enough value.\n", __func__, cp, ret);
3743 return -ret;
3744 }
3745 close(ret);
3746
3747 return 0;
3748}
3749
3750static int probe_fd(int fd)
3751{
3752 if (fd >= 0)
3753 close(fd);
3754 return fd >= 0;
3755}
3756
3757static int probe_kern_prog_name(void)
3758{
3759 struct bpf_load_program_attr attr;
3760 struct bpf_insn insns[] = {
3761 BPF_MOV64_IMM(BPF_REG_0, 0),
3762 BPF_EXIT_INSN(),
3763 };
3764 int ret;
3765
3766
3767
3768 memset(&attr, 0, sizeof(attr));
3769 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3770 attr.insns = insns;
3771 attr.insns_cnt = ARRAY_SIZE(insns);
3772 attr.license = "GPL";
3773 attr.name = "test";
3774 ret = bpf_load_program_xattr(&attr, NULL, 0);
3775 return probe_fd(ret);
3776}
3777
3778static int probe_kern_global_data(void)
3779{
3780 struct bpf_load_program_attr prg_attr;
3781 struct bpf_create_map_attr map_attr;
3782 char *cp, errmsg[STRERR_BUFSIZE];
3783 struct bpf_insn insns[] = {
3784 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
3785 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
3786 BPF_MOV64_IMM(BPF_REG_0, 0),
3787 BPF_EXIT_INSN(),
3788 };
3789 int ret, map;
3790
3791 memset(&map_attr, 0, sizeof(map_attr));
3792 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3793 map_attr.key_size = sizeof(int);
3794 map_attr.value_size = 32;
3795 map_attr.max_entries = 1;
3796
3797 map = bpf_create_map_xattr(&map_attr);
3798 if (map < 0) {
3799 ret = -errno;
3800 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3801 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3802 __func__, cp, -ret);
3803 return ret;
3804 }
3805
3806 insns[0].imm = map;
3807
3808 memset(&prg_attr, 0, sizeof(prg_attr));
3809 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3810 prg_attr.insns = insns;
3811 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3812 prg_attr.license = "GPL";
3813
3814 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
3815 close(map);
3816 return probe_fd(ret);
3817}
3818
3819static int probe_kern_btf(void)
3820{
3821 static const char strs[] = "\0int";
3822 __u32 types[] = {
3823
3824 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3825 };
3826
3827 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3828 strs, sizeof(strs)));
3829}
3830
3831static int probe_kern_btf_func(void)
3832{
3833 static const char strs[] = "\0int\0x\0a";
3834
3835 __u32 types[] = {
3836
3837 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3838
3839 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3840 BTF_PARAM_ENC(7, 1),
3841
3842 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
3843 };
3844
3845 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3846 strs, sizeof(strs)));
3847}
3848
3849static int probe_kern_btf_func_global(void)
3850{
3851 static const char strs[] = "\0int\0x\0a";
3852
3853 __u32 types[] = {
3854
3855 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3856
3857 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3858 BTF_PARAM_ENC(7, 1),
3859
3860 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
3861 };
3862
3863 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3864 strs, sizeof(strs)));
3865}
3866
3867static int probe_kern_btf_datasec(void)
3868{
3869 static const char strs[] = "\0x\0.data";
3870
3871 __u32 types[] = {
3872
3873 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
3874
3875 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
3876 BTF_VAR_STATIC,
3877
3878 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
3879 BTF_VAR_SECINFO_ENC(2, 0, 4),
3880 };
3881
3882 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3883 strs, sizeof(strs)));
3884}
3885
3886static int probe_kern_array_mmap(void)
3887{
3888 struct bpf_create_map_attr attr = {
3889 .map_type = BPF_MAP_TYPE_ARRAY,
3890 .map_flags = BPF_F_MMAPABLE,
3891 .key_size = sizeof(int),
3892 .value_size = sizeof(int),
3893 .max_entries = 1,
3894 };
3895
3896 return probe_fd(bpf_create_map_xattr(&attr));
3897}
3898
3899static int probe_kern_exp_attach_type(void)
3900{
3901 struct bpf_load_program_attr attr;
3902 struct bpf_insn insns[] = {
3903 BPF_MOV64_IMM(BPF_REG_0, 0),
3904 BPF_EXIT_INSN(),
3905 };
3906
3907 memset(&attr, 0, sizeof(attr));
3908
3909
3910
3911
3912
3913 attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
3914 attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
3915 attr.insns = insns;
3916 attr.insns_cnt = ARRAY_SIZE(insns);
3917 attr.license = "GPL";
3918
3919 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3920}
3921
3922static int probe_kern_probe_read_kernel(void)
3923{
3924 struct bpf_load_program_attr attr;
3925 struct bpf_insn insns[] = {
3926 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
3928 BPF_MOV64_IMM(BPF_REG_2, 8),
3929 BPF_MOV64_IMM(BPF_REG_3, 0),
3930 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
3931 BPF_EXIT_INSN(),
3932 };
3933
3934 memset(&attr, 0, sizeof(attr));
3935 attr.prog_type = BPF_PROG_TYPE_KPROBE;
3936 attr.insns = insns;
3937 attr.insns_cnt = ARRAY_SIZE(insns);
3938 attr.license = "GPL";
3939
3940 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
3941}
3942
3943static int probe_prog_bind_map(void)
3944{
3945 struct bpf_load_program_attr prg_attr;
3946 struct bpf_create_map_attr map_attr;
3947 char *cp, errmsg[STRERR_BUFSIZE];
3948 struct bpf_insn insns[] = {
3949 BPF_MOV64_IMM(BPF_REG_0, 0),
3950 BPF_EXIT_INSN(),
3951 };
3952 int ret, map, prog;
3953
3954 memset(&map_attr, 0, sizeof(map_attr));
3955 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3956 map_attr.key_size = sizeof(int);
3957 map_attr.value_size = 32;
3958 map_attr.max_entries = 1;
3959
3960 map = bpf_create_map_xattr(&map_attr);
3961 if (map < 0) {
3962 ret = -errno;
3963 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3964 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
3965 __func__, cp, -ret);
3966 return ret;
3967 }
3968
3969 memset(&prg_attr, 0, sizeof(prg_attr));
3970 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3971 prg_attr.insns = insns;
3972 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3973 prg_attr.license = "GPL";
3974
3975 prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
3976 if (prog < 0) {
3977 close(map);
3978 return 0;
3979 }
3980
3981 ret = bpf_prog_bind_map(prog, map, NULL);
3982
3983 close(map);
3984 close(prog);
3985
3986 return ret >= 0;
3987}
3988
3989static int probe_module_btf(void)
3990{
3991 static const char strs[] = "\0int";
3992 __u32 types[] = {
3993
3994 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3995 };
3996 struct bpf_btf_info info;
3997 __u32 len = sizeof(info);
3998 char name[16];
3999 int fd, err;
4000
4001 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4002 if (fd < 0)
4003 return 0;
4004
4005 memset(&info, 0, sizeof(info));
4006 info.name = ptr_to_u64(name);
4007 info.name_len = sizeof(name);
4008
4009
4010
4011
4012
4013 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4014 close(fd);
4015 return !err;
4016}
4017
4018enum kern_feature_result {
4019 FEAT_UNKNOWN = 0,
4020 FEAT_SUPPORTED = 1,
4021 FEAT_MISSING = 2,
4022};
4023
4024typedef int (*feature_probe_fn)(void);
4025
4026static struct kern_feature_desc {
4027 const char *desc;
4028 feature_probe_fn probe;
4029 enum kern_feature_result res;
4030} feature_probes[__FEAT_CNT] = {
4031 [FEAT_PROG_NAME] = {
4032 "BPF program name", probe_kern_prog_name,
4033 },
4034 [FEAT_GLOBAL_DATA] = {
4035 "global variables", probe_kern_global_data,
4036 },
4037 [FEAT_BTF] = {
4038 "minimal BTF", probe_kern_btf,
4039 },
4040 [FEAT_BTF_FUNC] = {
4041 "BTF functions", probe_kern_btf_func,
4042 },
4043 [FEAT_BTF_GLOBAL_FUNC] = {
4044 "BTF global function", probe_kern_btf_func_global,
4045 },
4046 [FEAT_BTF_DATASEC] = {
4047 "BTF data section and variable", probe_kern_btf_datasec,
4048 },
4049 [FEAT_ARRAY_MMAP] = {
4050 "ARRAY map mmap()", probe_kern_array_mmap,
4051 },
4052 [FEAT_EXP_ATTACH_TYPE] = {
4053 "BPF_PROG_LOAD expected_attach_type attribute",
4054 probe_kern_exp_attach_type,
4055 },
4056 [FEAT_PROBE_READ_KERN] = {
4057 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4058 },
4059 [FEAT_PROG_BIND_MAP] = {
4060 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4061 },
4062 [FEAT_MODULE_BTF] = {
4063 "module BTF support", probe_module_btf,
4064 },
4065};
4066
4067static bool kernel_supports(enum kern_feature_id feat_id)
4068{
4069 struct kern_feature_desc *feat = &feature_probes[feat_id];
4070 int ret;
4071
4072 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4073 ret = feat->probe();
4074 if (ret > 0) {
4075 WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4076 } else if (ret == 0) {
4077 WRITE_ONCE(feat->res, FEAT_MISSING);
4078 } else {
4079 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4080 WRITE_ONCE(feat->res, FEAT_MISSING);
4081 }
4082 }
4083
4084 return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4085}
4086
4087static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4088{
4089 struct bpf_map_info map_info = {};
4090 char msg[STRERR_BUFSIZE];
4091 __u32 map_info_len;
4092
4093 map_info_len = sizeof(map_info);
4094
4095 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
4096 pr_warn("failed to get map info for map FD %d: %s\n",
4097 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
4098 return false;
4099 }
4100
4101 return (map_info.type == map->def.type &&
4102 map_info.key_size == map->def.key_size &&
4103 map_info.value_size == map->def.value_size &&
4104 map_info.max_entries == map->def.max_entries &&
4105 map_info.map_flags == map->def.map_flags);
4106}
4107
4108static int
4109bpf_object__reuse_map(struct bpf_map *map)
4110{
4111 char *cp, errmsg[STRERR_BUFSIZE];
4112 int err, pin_fd;
4113
4114 pin_fd = bpf_obj_get(map->pin_path);
4115 if (pin_fd < 0) {
4116 err = -errno;
4117 if (err == -ENOENT) {
4118 pr_debug("found no pinned map to reuse at '%s'\n",
4119 map->pin_path);
4120 return 0;
4121 }
4122
4123 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4124 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4125 map->pin_path, cp);
4126 return err;
4127 }
4128
4129 if (!map_is_reuse_compat(map, pin_fd)) {
4130 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4131 map->pin_path);
4132 close(pin_fd);
4133 return -EINVAL;
4134 }
4135
4136 err = bpf_map__reuse_fd(map, pin_fd);
4137 if (err) {
4138 close(pin_fd);
4139 return err;
4140 }
4141 map->pinned = true;
4142 pr_debug("reused pinned map at '%s'\n", map->pin_path);
4143
4144 return 0;
4145}
4146
4147static int
4148bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4149{
4150 enum libbpf_map_type map_type = map->libbpf_type;
4151 char *cp, errmsg[STRERR_BUFSIZE];
4152 int err, zero = 0;
4153
4154 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4155 if (err) {
4156 err = -errno;
4157 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4158 pr_warn("Error setting initial map(%s) contents: %s\n",
4159 map->name, cp);
4160 return err;
4161 }
4162
4163
4164 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4165 err = bpf_map_freeze(map->fd);
4166 if (err) {
4167 err = -errno;
4168 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4169 pr_warn("Error freezing map(%s) as read-only: %s\n",
4170 map->name, cp);
4171 return err;
4172 }
4173 }
4174 return 0;
4175}
4176
4177static void bpf_map__destroy(struct bpf_map *map);
4178
4179static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
4180{
4181 struct bpf_create_map_attr create_attr;
4182 struct bpf_map_def *def = &map->def;
4183
4184 memset(&create_attr, 0, sizeof(create_attr));
4185
4186 if (kernel_supports(FEAT_PROG_NAME))
4187 create_attr.name = map->name;
4188 create_attr.map_ifindex = map->map_ifindex;
4189 create_attr.map_type = def->type;
4190 create_attr.map_flags = def->map_flags;
4191 create_attr.key_size = def->key_size;
4192 create_attr.value_size = def->value_size;
4193 create_attr.numa_node = map->numa_node;
4194
4195 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4196 int nr_cpus;
4197
4198 nr_cpus = libbpf_num_possible_cpus();
4199 if (nr_cpus < 0) {
4200 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4201 map->name, nr_cpus);
4202 return nr_cpus;
4203 }
4204 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4205 create_attr.max_entries = nr_cpus;
4206 } else {
4207 create_attr.max_entries = def->max_entries;
4208 }
4209
4210 if (bpf_map__is_struct_ops(map))
4211 create_attr.btf_vmlinux_value_type_id =
4212 map->btf_vmlinux_value_type_id;
4213
4214 create_attr.btf_fd = 0;
4215 create_attr.btf_key_type_id = 0;
4216 create_attr.btf_value_type_id = 0;
4217 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
4218 create_attr.btf_fd = btf__fd(obj->btf);
4219 create_attr.btf_key_type_id = map->btf_key_type_id;
4220 create_attr.btf_value_type_id = map->btf_value_type_id;
4221 }
4222
4223 if (bpf_map_type__is_map_in_map(def->type)) {
4224 if (map->inner_map) {
4225 int err;
4226
4227 err = bpf_object__create_map(obj, map->inner_map);
4228 if (err) {
4229 pr_warn("map '%s': failed to create inner map: %d\n",
4230 map->name, err);
4231 return err;
4232 }
4233 map->inner_map_fd = bpf_map__fd(map->inner_map);
4234 }
4235 if (map->inner_map_fd >= 0)
4236 create_attr.inner_map_fd = map->inner_map_fd;
4237 }
4238
4239 map->fd = bpf_create_map_xattr(&create_attr);
4240 if (map->fd < 0 && (create_attr.btf_key_type_id ||
4241 create_attr.btf_value_type_id)) {
4242 char *cp, errmsg[STRERR_BUFSIZE];
4243 int err = -errno;
4244
4245 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4246 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4247 map->name, cp, err);
4248 create_attr.btf_fd = 0;
4249 create_attr.btf_key_type_id = 0;
4250 create_attr.btf_value_type_id = 0;
4251 map->btf_key_type_id = 0;
4252 map->btf_value_type_id = 0;
4253 map->fd = bpf_create_map_xattr(&create_attr);
4254 }
4255
4256 if (map->fd < 0)
4257 return -errno;
4258
4259 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4260 bpf_map__destroy(map->inner_map);
4261 zfree(&map->inner_map);
4262 }
4263
4264 return 0;
4265}
4266
4267static int init_map_slots(struct bpf_map *map)
4268{
4269 const struct bpf_map *targ_map;
4270 unsigned int i;
4271 int fd, err;
4272
4273 for (i = 0; i < map->init_slots_sz; i++) {
4274 if (!map->init_slots[i])
4275 continue;
4276
4277 targ_map = map->init_slots[i];
4278 fd = bpf_map__fd(targ_map);
4279 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4280 if (err) {
4281 err = -errno;
4282 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4283 map->name, i, targ_map->name,
4284 fd, err);
4285 return err;
4286 }
4287 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4288 map->name, i, targ_map->name, fd);
4289 }
4290
4291 zfree(&map->init_slots);
4292 map->init_slots_sz = 0;
4293
4294 return 0;
4295}
4296
4297static int
4298bpf_object__create_maps(struct bpf_object *obj)
4299{
4300 struct bpf_map *map;
4301 char *cp, errmsg[STRERR_BUFSIZE];
4302 unsigned int i, j;
4303 int err;
4304
4305 for (i = 0; i < obj->nr_maps; i++) {
4306 map = &obj->maps[i];
4307
4308 if (map->pin_path) {
4309 err = bpf_object__reuse_map(map);
4310 if (err) {
4311 pr_warn("map '%s': error reusing pinned map\n",
4312 map->name);
4313 goto err_out;
4314 }
4315 }
4316
4317 if (map->fd >= 0) {
4318 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
4319 map->name, map->fd);
4320 } else {
4321 err = bpf_object__create_map(obj, map);
4322 if (err)
4323 goto err_out;
4324
4325 pr_debug("map '%s': created successfully, fd=%d\n",
4326 map->name, map->fd);
4327
4328 if (bpf_map__is_internal(map)) {
4329 err = bpf_object__populate_internal_map(obj, map);
4330 if (err < 0) {
4331 zclose(map->fd);
4332 goto err_out;
4333 }
4334 }
4335
4336 if (map->init_slots_sz) {
4337 err = init_map_slots(map);
4338 if (err < 0) {
4339 zclose(map->fd);
4340 goto err_out;
4341 }
4342 }
4343 }
4344
4345 if (map->pin_path && !map->pinned) {
4346 err = bpf_map__pin(map, NULL);
4347 if (err) {
4348 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
4349 map->name, map->pin_path, err);
4350 zclose(map->fd);
4351 goto err_out;
4352 }
4353 }
4354 }
4355
4356 return 0;
4357
4358err_out:
4359 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4360 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
4361 pr_perm_msg(err);
4362 for (j = 0; j < i; j++)
4363 zclose(obj->maps[j].fd);
4364 return err;
4365}
4366
4367#define BPF_CORE_SPEC_MAX_LEN 64
4368
4369
4370struct bpf_core_accessor {
4371 __u32 type_id;
4372 __u32 idx;
4373 const char *name;
4374};
4375
4376struct bpf_core_spec {
4377 const struct btf *btf;
4378
4379 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
4380
4381 __u32 root_type_id;
4382
4383 enum bpf_core_relo_kind relo_kind;
4384
4385 int len;
4386
4387 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
4388
4389 int raw_len;
4390
4391 __u32 bit_offset;
4392};
4393
4394static bool str_is_empty(const char *s)
4395{
4396 return !s || !s[0];
4397}
4398
4399static bool is_flex_arr(const struct btf *btf,
4400 const struct bpf_core_accessor *acc,
4401 const struct btf_array *arr)
4402{
4403 const struct btf_type *t;
4404
4405
4406 if (!acc->name || arr->nelems > 0)
4407 return false;
4408
4409
4410 t = btf__type_by_id(btf, acc->type_id);
4411 return acc->idx == btf_vlen(t) - 1;
4412}
4413
4414static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
4415{
4416 switch (kind) {
4417 case BPF_FIELD_BYTE_OFFSET: return "byte_off";
4418 case BPF_FIELD_BYTE_SIZE: return "byte_sz";
4419 case BPF_FIELD_EXISTS: return "field_exists";
4420 case BPF_FIELD_SIGNED: return "signed";
4421 case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
4422 case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
4423 case BPF_TYPE_ID_LOCAL: return "local_type_id";
4424 case BPF_TYPE_ID_TARGET: return "target_type_id";
4425 case BPF_TYPE_EXISTS: return "type_exists";
4426 case BPF_TYPE_SIZE: return "type_size";
4427 case BPF_ENUMVAL_EXISTS: return "enumval_exists";
4428 case BPF_ENUMVAL_VALUE: return "enumval_value";
4429 default: return "unknown";
4430 }
4431}
4432
4433static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
4434{
4435 switch (kind) {
4436 case BPF_FIELD_BYTE_OFFSET:
4437 case BPF_FIELD_BYTE_SIZE:
4438 case BPF_FIELD_EXISTS:
4439 case BPF_FIELD_SIGNED:
4440 case BPF_FIELD_LSHIFT_U64:
4441 case BPF_FIELD_RSHIFT_U64:
4442 return true;
4443 default:
4444 return false;
4445 }
4446}
4447
4448static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
4449{
4450 switch (kind) {
4451 case BPF_TYPE_ID_LOCAL:
4452 case BPF_TYPE_ID_TARGET:
4453 case BPF_TYPE_EXISTS:
4454 case BPF_TYPE_SIZE:
4455 return true;
4456 default:
4457 return false;
4458 }
4459}
4460
4461static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
4462{
4463 switch (kind) {
4464 case BPF_ENUMVAL_EXISTS:
4465 case BPF_ENUMVAL_VALUE:
4466 return true;
4467 default:
4468 return false;
4469 }
4470}
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509static int bpf_core_parse_spec(const struct btf *btf,
4510 __u32 type_id,
4511 const char *spec_str,
4512 enum bpf_core_relo_kind relo_kind,
4513 struct bpf_core_spec *spec)
4514{
4515 int access_idx, parsed_len, i;
4516 struct bpf_core_accessor *acc;
4517 const struct btf_type *t;
4518 const char *name;
4519 __u32 id;
4520 __s64 sz;
4521
4522 if (str_is_empty(spec_str) || *spec_str == ':')
4523 return -EINVAL;
4524
4525 memset(spec, 0, sizeof(*spec));
4526 spec->btf = btf;
4527 spec->root_type_id = type_id;
4528 spec->relo_kind = relo_kind;
4529
4530
4531 if (core_relo_is_type_based(relo_kind)) {
4532 if (strcmp(spec_str, "0"))
4533 return -EINVAL;
4534 return 0;
4535 }
4536
4537
4538 while (*spec_str) {
4539 if (*spec_str == ':')
4540 ++spec_str;
4541 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
4542 return -EINVAL;
4543 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4544 return -E2BIG;
4545 spec_str += parsed_len;
4546 spec->raw_spec[spec->raw_len++] = access_idx;
4547 }
4548
4549 if (spec->raw_len == 0)
4550 return -EINVAL;
4551
4552 t = skip_mods_and_typedefs(btf, type_id, &id);
4553 if (!t)
4554 return -EINVAL;
4555
4556 access_idx = spec->raw_spec[0];
4557 acc = &spec->spec[0];
4558 acc->type_id = id;
4559 acc->idx = access_idx;
4560 spec->len++;
4561
4562 if (core_relo_is_enumval_based(relo_kind)) {
4563 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
4564 return -EINVAL;
4565
4566
4567 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
4568 return 0;
4569 }
4570
4571 if (!core_relo_is_field_based(relo_kind))
4572 return -EINVAL;
4573
4574 sz = btf__resolve_size(btf, id);
4575 if (sz < 0)
4576 return sz;
4577 spec->bit_offset = access_idx * sz * 8;
4578
4579 for (i = 1; i < spec->raw_len; i++) {
4580 t = skip_mods_and_typedefs(btf, id, &id);
4581 if (!t)
4582 return -EINVAL;
4583
4584 access_idx = spec->raw_spec[i];
4585 acc = &spec->spec[spec->len];
4586
4587 if (btf_is_composite(t)) {
4588 const struct btf_member *m;
4589 __u32 bit_offset;
4590
4591 if (access_idx >= btf_vlen(t))
4592 return -EINVAL;
4593
4594 bit_offset = btf_member_bit_offset(t, access_idx);
4595 spec->bit_offset += bit_offset;
4596
4597 m = btf_members(t) + access_idx;
4598 if (m->name_off) {
4599 name = btf__name_by_offset(btf, m->name_off);
4600 if (str_is_empty(name))
4601 return -EINVAL;
4602
4603 acc->type_id = id;
4604 acc->idx = access_idx;
4605 acc->name = name;
4606 spec->len++;
4607 }
4608
4609 id = m->type;
4610 } else if (btf_is_array(t)) {
4611 const struct btf_array *a = btf_array(t);
4612 bool flex;
4613
4614 t = skip_mods_and_typedefs(btf, a->type, &id);
4615 if (!t)
4616 return -EINVAL;
4617
4618 flex = is_flex_arr(btf, acc - 1, a);
4619 if (!flex && access_idx >= a->nelems)
4620 return -EINVAL;
4621
4622 spec->spec[spec->len].type_id = id;
4623 spec->spec[spec->len].idx = access_idx;
4624 spec->len++;
4625
4626 sz = btf__resolve_size(btf, id);
4627 if (sz < 0)
4628 return sz;
4629 spec->bit_offset += access_idx * sz * 8;
4630 } else {
4631 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
4632 type_id, spec_str, i, id, btf_kind_str(t));
4633 return -EINVAL;
4634 }
4635 }
4636
4637 return 0;
4638}
4639
4640static bool bpf_core_is_flavor_sep(const char *s)
4641{
4642
4643 return s[0] != '_' &&
4644 s[1] == '_' && s[2] == '_' && s[3] == '_' &&
4645 s[4] != '_';
4646}
4647
4648
4649
4650
4651
4652static size_t bpf_core_essential_name_len(const char *name)
4653{
4654 size_t n = strlen(name);
4655 int i;
4656
4657 for (i = n - 5; i >= 0; i--) {
4658 if (bpf_core_is_flavor_sep(name + i))
4659 return i + 1;
4660 }
4661 return n;
4662}
4663
4664struct core_cand
4665{
4666 const struct btf *btf;
4667 const struct btf_type *t;
4668 const char *name;
4669 __u32 id;
4670};
4671
4672
4673struct core_cand_list {
4674 struct core_cand *cands;
4675 int len;
4676};
4677
4678static void bpf_core_free_cands(struct core_cand_list *cands)
4679{
4680 free(cands->cands);
4681 free(cands);
4682}
4683
4684static int bpf_core_add_cands(struct core_cand *local_cand,
4685 size_t local_essent_len,
4686 const struct btf *targ_btf,
4687 const char *targ_btf_name,
4688 int targ_start_id,
4689 struct core_cand_list *cands)
4690{
4691 struct core_cand *new_cands, *cand;
4692 const struct btf_type *t;
4693 const char *targ_name;
4694 size_t targ_essent_len;
4695 int n, i;
4696
4697 n = btf__get_nr_types(targ_btf);
4698 for (i = targ_start_id; i <= n; i++) {
4699 t = btf__type_by_id(targ_btf, i);
4700 if (btf_kind(t) != btf_kind(local_cand->t))
4701 continue;
4702
4703 targ_name = btf__name_by_offset(targ_btf, t->name_off);
4704 if (str_is_empty(targ_name))
4705 continue;
4706
4707 targ_essent_len = bpf_core_essential_name_len(targ_name);
4708 if (targ_essent_len != local_essent_len)
4709 continue;
4710
4711 if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
4712 continue;
4713
4714 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
4715 local_cand->id, btf_kind_str(local_cand->t),
4716 local_cand->name, i, btf_kind_str(t), targ_name,
4717 targ_btf_name);
4718 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
4719 sizeof(*cands->cands));
4720 if (!new_cands)
4721 return -ENOMEM;
4722
4723 cand = &new_cands[cands->len];
4724 cand->btf = targ_btf;
4725 cand->t = t;
4726 cand->name = targ_name;
4727 cand->id = i;
4728
4729 cands->cands = new_cands;
4730 cands->len++;
4731 }
4732 return 0;
4733}
4734
4735static int load_module_btfs(struct bpf_object *obj)
4736{
4737 struct bpf_btf_info info;
4738 struct module_btf *mod_btf;
4739 struct btf *btf;
4740 char name[64];
4741 __u32 id = 0, len;
4742 int err, fd;
4743
4744 if (obj->btf_modules_loaded)
4745 return 0;
4746
4747
4748 obj->btf_modules_loaded = true;
4749
4750
4751 if (!kernel_supports(FEAT_MODULE_BTF))
4752 return 0;
4753
4754 while (true) {
4755 err = bpf_btf_get_next_id(id, &id);
4756 if (err && errno == ENOENT)
4757 return 0;
4758 if (err) {
4759 err = -errno;
4760 pr_warn("failed to iterate BTF objects: %d\n", err);
4761 return err;
4762 }
4763
4764 fd = bpf_btf_get_fd_by_id(id);
4765 if (fd < 0) {
4766 if (errno == ENOENT)
4767 continue;
4768 err = -errno;
4769 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
4770 return err;
4771 }
4772
4773 len = sizeof(info);
4774 memset(&info, 0, sizeof(info));
4775 info.name = ptr_to_u64(name);
4776 info.name_len = sizeof(name);
4777
4778 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4779 if (err) {
4780 err = -errno;
4781 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
4782 goto err_out;
4783 }
4784
4785
4786 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
4787 close(fd);
4788 continue;
4789 }
4790
4791 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
4792 if (IS_ERR(btf)) {
4793 pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n",
4794 name, id, PTR_ERR(btf));
4795 err = PTR_ERR(btf);
4796 goto err_out;
4797 }
4798
4799 err = btf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
4800 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
4801 if (err)
4802 goto err_out;
4803
4804 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
4805
4806 mod_btf->btf = btf;
4807 mod_btf->id = id;
4808 mod_btf->fd = fd;
4809 mod_btf->name = strdup(name);
4810 if (!mod_btf->name) {
4811 err = -ENOMEM;
4812 goto err_out;
4813 }
4814 continue;
4815
4816err_out:
4817 close(fd);
4818 return err;
4819 }
4820
4821 return 0;
4822}
4823
4824static struct core_cand_list *
4825bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
4826{
4827 struct core_cand local_cand = {};
4828 struct core_cand_list *cands;
4829 const struct btf *main_btf;
4830 size_t local_essent_len;
4831 int err, i;
4832
4833 local_cand.btf = local_btf;
4834 local_cand.t = btf__type_by_id(local_btf, local_type_id);
4835 if (!local_cand.t)
4836 return ERR_PTR(-EINVAL);
4837
4838 local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
4839 if (str_is_empty(local_cand.name))
4840 return ERR_PTR(-EINVAL);
4841 local_essent_len = bpf_core_essential_name_len(local_cand.name);
4842
4843 cands = calloc(1, sizeof(*cands));
4844 if (!cands)
4845 return ERR_PTR(-ENOMEM);
4846
4847
4848 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
4849 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
4850 if (err)
4851 goto err_out;
4852
4853
4854 if (cands->len)
4855 return cands;
4856
4857
4858 if (obj->btf_vmlinux_override)
4859 return cands;
4860
4861
4862 err = load_module_btfs(obj);
4863 if (err)
4864 goto err_out;
4865
4866 for (i = 0; i < obj->btf_module_cnt; i++) {
4867 err = bpf_core_add_cands(&local_cand, local_essent_len,
4868 obj->btf_modules[i].btf,
4869 obj->btf_modules[i].name,
4870 btf__get_nr_types(obj->btf_vmlinux) + 1,
4871 cands);
4872 if (err)
4873 goto err_out;
4874 }
4875
4876 return cands;
4877err_out:
4878 bpf_core_free_cands(cands);
4879 return ERR_PTR(err);
4880}
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898static int bpf_core_fields_are_compat(const struct btf *local_btf,
4899 __u32 local_id,
4900 const struct btf *targ_btf,
4901 __u32 targ_id)
4902{
4903 const struct btf_type *local_type, *targ_type;
4904
4905recur:
4906 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
4907 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4908 if (!local_type || !targ_type)
4909 return -EINVAL;
4910
4911 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
4912 return 1;
4913 if (btf_kind(local_type) != btf_kind(targ_type))
4914 return 0;
4915
4916 switch (btf_kind(local_type)) {
4917 case BTF_KIND_PTR:
4918 return 1;
4919 case BTF_KIND_FWD:
4920 case BTF_KIND_ENUM: {
4921 const char *local_name, *targ_name;
4922 size_t local_len, targ_len;
4923
4924 local_name = btf__name_by_offset(local_btf,
4925 local_type->name_off);
4926 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
4927 local_len = bpf_core_essential_name_len(local_name);
4928 targ_len = bpf_core_essential_name_len(targ_name);
4929
4930 return local_len == 0 || targ_len == 0 ||
4931 (local_len == targ_len &&
4932 strncmp(local_name, targ_name, local_len) == 0);
4933 }
4934 case BTF_KIND_INT:
4935
4936
4937
4938 return btf_int_offset(local_type) == 0 &&
4939 btf_int_offset(targ_type) == 0;
4940 case BTF_KIND_ARRAY:
4941 local_id = btf_array(local_type)->type;
4942 targ_id = btf_array(targ_type)->type;
4943 goto recur;
4944 default:
4945 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
4946 btf_kind(local_type), local_id, targ_id);
4947 return 0;
4948 }
4949}
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967static int bpf_core_match_member(const struct btf *local_btf,
4968 const struct bpf_core_accessor *local_acc,
4969 const struct btf *targ_btf,
4970 __u32 targ_id,
4971 struct bpf_core_spec *spec,
4972 __u32 *next_targ_id)
4973{
4974 const struct btf_type *local_type, *targ_type;
4975 const struct btf_member *local_member, *m;
4976 const char *local_name, *targ_name;
4977 __u32 local_id;
4978 int i, n, found;
4979
4980 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4981 if (!targ_type)
4982 return -EINVAL;
4983 if (!btf_is_composite(targ_type))
4984 return 0;
4985
4986 local_id = local_acc->type_id;
4987 local_type = btf__type_by_id(local_btf, local_id);
4988 local_member = btf_members(local_type) + local_acc->idx;
4989 local_name = btf__name_by_offset(local_btf, local_member->name_off);
4990
4991 n = btf_vlen(targ_type);
4992 m = btf_members(targ_type);
4993 for (i = 0; i < n; i++, m++) {
4994 __u32 bit_offset;
4995
4996 bit_offset = btf_member_bit_offset(targ_type, i);
4997
4998
4999 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5000 return -E2BIG;
5001
5002
5003 spec->bit_offset += bit_offset;
5004 spec->raw_spec[spec->raw_len++] = i;
5005
5006 targ_name = btf__name_by_offset(targ_btf, m->name_off);
5007 if (str_is_empty(targ_name)) {
5008
5009 found = bpf_core_match_member(local_btf, local_acc,
5010 targ_btf, m->type,
5011 spec, next_targ_id);
5012 if (found)
5013 return found;
5014 } else if (strcmp(local_name, targ_name) == 0) {
5015
5016 struct bpf_core_accessor *targ_acc;
5017
5018 targ_acc = &spec->spec[spec->len++];
5019 targ_acc->type_id = targ_id;
5020 targ_acc->idx = i;
5021 targ_acc->name = targ_name;
5022
5023 *next_targ_id = m->type;
5024 found = bpf_core_fields_are_compat(local_btf,
5025 local_member->type,
5026 targ_btf, m->type);
5027 if (!found)
5028 spec->len--;
5029 return found;
5030 }
5031
5032 spec->bit_offset -= bit_offset;
5033 spec->raw_len--;
5034 }
5035
5036 return 0;
5037}
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5059 const struct btf *targ_btf, __u32 targ_id)
5060{
5061 const struct btf_type *local_type, *targ_type;
5062 int depth = 32;
5063
5064
5065 local_type = btf__type_by_id(local_btf, local_id);
5066 targ_type = btf__type_by_id(targ_btf, targ_id);
5067 if (btf_kind(local_type) != btf_kind(targ_type))
5068 return 0;
5069
5070recur:
5071 depth--;
5072 if (depth < 0)
5073 return -EINVAL;
5074
5075 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5076 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5077 if (!local_type || !targ_type)
5078 return -EINVAL;
5079
5080 if (btf_kind(local_type) != btf_kind(targ_type))
5081 return 0;
5082
5083 switch (btf_kind(local_type)) {
5084 case BTF_KIND_UNKN:
5085 case BTF_KIND_STRUCT:
5086 case BTF_KIND_UNION:
5087 case BTF_KIND_ENUM:
5088 case BTF_KIND_FWD:
5089 return 1;
5090 case BTF_KIND_INT:
5091
5092
5093
5094 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5095 case BTF_KIND_PTR:
5096 local_id = local_type->type;
5097 targ_id = targ_type->type;
5098 goto recur;
5099 case BTF_KIND_ARRAY:
5100 local_id = btf_array(local_type)->type;
5101 targ_id = btf_array(targ_type)->type;
5102 goto recur;
5103 case BTF_KIND_FUNC_PROTO: {
5104 struct btf_param *local_p = btf_params(local_type);
5105 struct btf_param *targ_p = btf_params(targ_type);
5106 __u16 local_vlen = btf_vlen(local_type);
5107 __u16 targ_vlen = btf_vlen(targ_type);
5108 int i, err;
5109
5110 if (local_vlen != targ_vlen)
5111 return 0;
5112
5113 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5114 skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5115 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5116 err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5117 if (err <= 0)
5118 return err;
5119 }
5120
5121
5122 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5123 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5124 goto recur;
5125 }
5126 default:
5127 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5128 btf_kind_str(local_type), local_id, targ_id);
5129 return 0;
5130 }
5131}
5132
5133
5134
5135
5136
5137static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
5138 const struct btf *targ_btf, __u32 targ_id,
5139 struct bpf_core_spec *targ_spec)
5140{
5141 const struct btf_type *targ_type;
5142 const struct bpf_core_accessor *local_acc;
5143 struct bpf_core_accessor *targ_acc;
5144 int i, sz, matched;
5145
5146 memset(targ_spec, 0, sizeof(*targ_spec));
5147 targ_spec->btf = targ_btf;
5148 targ_spec->root_type_id = targ_id;
5149 targ_spec->relo_kind = local_spec->relo_kind;
5150
5151 if (core_relo_is_type_based(local_spec->relo_kind)) {
5152 return bpf_core_types_are_compat(local_spec->btf,
5153 local_spec->root_type_id,
5154 targ_btf, targ_id);
5155 }
5156
5157 local_acc = &local_spec->spec[0];
5158 targ_acc = &targ_spec->spec[0];
5159
5160 if (core_relo_is_enumval_based(local_spec->relo_kind)) {
5161 size_t local_essent_len, targ_essent_len;
5162 const struct btf_enum *e;
5163 const char *targ_name;
5164
5165
5166 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
5167 if (!btf_is_enum(targ_type))
5168 return 0;
5169
5170 local_essent_len = bpf_core_essential_name_len(local_acc->name);
5171
5172 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
5173 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
5174 targ_essent_len = bpf_core_essential_name_len(targ_name);
5175 if (targ_essent_len != local_essent_len)
5176 continue;
5177 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
5178 targ_acc->type_id = targ_id;
5179 targ_acc->idx = i;
5180 targ_acc->name = targ_name;
5181 targ_spec->len++;
5182 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5183 targ_spec->raw_len++;
5184 return 1;
5185 }
5186 }
5187 return 0;
5188 }
5189
5190 if (!core_relo_is_field_based(local_spec->relo_kind))
5191 return -EINVAL;
5192
5193 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
5194 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
5195 &targ_id);
5196 if (!targ_type)
5197 return -EINVAL;
5198
5199 if (local_acc->name) {
5200 matched = bpf_core_match_member(local_spec->btf,
5201 local_acc,
5202 targ_btf, targ_id,
5203 targ_spec, &targ_id);
5204 if (matched <= 0)
5205 return matched;
5206 } else {
5207
5208
5209
5210
5211 if (i > 0) {
5212 const struct btf_array *a;
5213 bool flex;
5214
5215 if (!btf_is_array(targ_type))
5216 return 0;
5217
5218 a = btf_array(targ_type);
5219 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
5220 if (!flex && local_acc->idx >= a->nelems)
5221 return 0;
5222 if (!skip_mods_and_typedefs(targ_btf, a->type,
5223 &targ_id))
5224 return -EINVAL;
5225 }
5226
5227
5228 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5229 return -E2BIG;
5230
5231 targ_acc->type_id = targ_id;
5232 targ_acc->idx = local_acc->idx;
5233 targ_acc->name = NULL;
5234 targ_spec->len++;
5235 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5236 targ_spec->raw_len++;
5237
5238 sz = btf__resolve_size(targ_btf, targ_id);
5239 if (sz < 0)
5240 return sz;
5241 targ_spec->bit_offset += local_acc->idx * sz * 8;
5242 }
5243 }
5244
5245 return 1;
5246}
5247
5248static int bpf_core_calc_field_relo(const struct bpf_program *prog,
5249 const struct bpf_core_relo *relo,
5250 const struct bpf_core_spec *spec,
5251 __u32 *val, __u32 *field_sz, __u32 *type_id,
5252 bool *validate)
5253{
5254 const struct bpf_core_accessor *acc;
5255 const struct btf_type *t;
5256 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
5257 const struct btf_member *m;
5258 const struct btf_type *mt;
5259 bool bitfield;
5260 __s64 sz;
5261
5262 *field_sz = 0;
5263
5264 if (relo->kind == BPF_FIELD_EXISTS) {
5265 *val = spec ? 1 : 0;
5266 return 0;
5267 }
5268
5269 if (!spec)
5270 return -EUCLEAN;
5271
5272 acc = &spec->spec[spec->len - 1];
5273 t = btf__type_by_id(spec->btf, acc->type_id);
5274
5275
5276 if (!acc->name) {
5277 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
5278 *val = spec->bit_offset / 8;
5279
5280 sz = btf__resolve_size(spec->btf, acc->type_id);
5281 if (sz < 0)
5282 return -EINVAL;
5283 *field_sz = sz;
5284 *type_id = acc->type_id;
5285 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
5286 sz = btf__resolve_size(spec->btf, acc->type_id);
5287 if (sz < 0)
5288 return -EINVAL;
5289 *val = sz;
5290 } else {
5291 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
5292 prog->name, relo->kind, relo->insn_off / 8);
5293 return -EINVAL;
5294 }
5295 if (validate)
5296 *validate = true;
5297 return 0;
5298 }
5299
5300 m = btf_members(t) + acc->idx;
5301 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
5302 bit_off = spec->bit_offset;
5303 bit_sz = btf_member_bitfield_size(t, acc->idx);
5304
5305 bitfield = bit_sz > 0;
5306 if (bitfield) {
5307 byte_sz = mt->size;
5308 byte_off = bit_off / 8 / byte_sz * byte_sz;
5309
5310 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
5311 if (byte_sz >= 8) {
5312
5313 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
5314 prog->name, relo->kind, relo->insn_off / 8);
5315 return -E2BIG;
5316 }
5317 byte_sz *= 2;
5318 byte_off = bit_off / 8 / byte_sz * byte_sz;
5319 }
5320 } else {
5321 sz = btf__resolve_size(spec->btf, field_type_id);
5322 if (sz < 0)
5323 return -EINVAL;
5324 byte_sz = sz;
5325 byte_off = spec->bit_offset / 8;
5326 bit_sz = byte_sz * 8;
5327 }
5328
5329
5330
5331
5332
5333 if (validate)
5334 *validate = !bitfield;
5335
5336 switch (relo->kind) {
5337 case BPF_FIELD_BYTE_OFFSET:
5338 *val = byte_off;
5339 if (!bitfield) {
5340 *field_sz = byte_sz;
5341 *type_id = field_type_id;
5342 }
5343 break;
5344 case BPF_FIELD_BYTE_SIZE:
5345 *val = byte_sz;
5346 break;
5347 case BPF_FIELD_SIGNED:
5348
5349 *val = btf_is_enum(mt) ||
5350 (btf_int_encoding(mt) & BTF_INT_SIGNED);
5351 if (validate)
5352 *validate = true;
5353 break;
5354 case BPF_FIELD_LSHIFT_U64:
5355#if __BYTE_ORDER == __LITTLE_ENDIAN
5356 *val = 64 - (bit_off + bit_sz - byte_off * 8);
5357#else
5358 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
5359#endif
5360 break;
5361 case BPF_FIELD_RSHIFT_U64:
5362 *val = 64 - bit_sz;
5363 if (validate)
5364 *validate = true;
5365 break;
5366 case BPF_FIELD_EXISTS:
5367 default:
5368 return -EOPNOTSUPP;
5369 }
5370
5371 return 0;
5372}
5373
5374static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
5375 const struct bpf_core_spec *spec,
5376 __u32 *val)
5377{
5378 __s64 sz;
5379
5380
5381 if (!spec) {
5382 *val = 0;
5383 return 0;
5384 }
5385
5386 switch (relo->kind) {
5387 case BPF_TYPE_ID_TARGET:
5388 *val = spec->root_type_id;
5389 break;
5390 case BPF_TYPE_EXISTS:
5391 *val = 1;
5392 break;
5393 case BPF_TYPE_SIZE:
5394 sz = btf__resolve_size(spec->btf, spec->root_type_id);
5395 if (sz < 0)
5396 return -EINVAL;
5397 *val = sz;
5398 break;
5399 case BPF_TYPE_ID_LOCAL:
5400
5401 default:
5402 return -EOPNOTSUPP;
5403 }
5404
5405 return 0;
5406}
5407
5408static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
5409 const struct bpf_core_spec *spec,
5410 __u32 *val)
5411{
5412 const struct btf_type *t;
5413 const struct btf_enum *e;
5414
5415 switch (relo->kind) {
5416 case BPF_ENUMVAL_EXISTS:
5417 *val = spec ? 1 : 0;
5418 break;
5419 case BPF_ENUMVAL_VALUE:
5420 if (!spec)
5421 return -EUCLEAN;
5422 t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
5423 e = btf_enum(t) + spec->spec[0].idx;
5424 *val = e->val;
5425 break;
5426 default:
5427 return -EOPNOTSUPP;
5428 }
5429
5430 return 0;
5431}
5432
5433struct bpf_core_relo_res
5434{
5435
5436 __u32 orig_val;
5437
5438 __u32 new_val;
5439
5440 bool poison;
5441
5442 bool validate;
5443
5444
5445
5446
5447
5448
5449
5450
5451 bool fail_memsz_adjust;
5452 __u32 orig_sz;
5453 __u32 orig_type_id;
5454 __u32 new_sz;
5455 __u32 new_type_id;
5456};
5457
5458
5459
5460
5461
5462
5463
5464static int bpf_core_calc_relo(const struct bpf_program *prog,
5465 const struct bpf_core_relo *relo,
5466 int relo_idx,
5467 const struct bpf_core_spec *local_spec,
5468 const struct bpf_core_spec *targ_spec,
5469 struct bpf_core_relo_res *res)
5470{
5471 int err = -EOPNOTSUPP;
5472
5473 res->orig_val = 0;
5474 res->new_val = 0;
5475 res->poison = false;
5476 res->validate = true;
5477 res->fail_memsz_adjust = false;
5478 res->orig_sz = res->new_sz = 0;
5479 res->orig_type_id = res->new_type_id = 0;
5480
5481 if (core_relo_is_field_based(relo->kind)) {
5482 err = bpf_core_calc_field_relo(prog, relo, local_spec,
5483 &res->orig_val, &res->orig_sz,
5484 &res->orig_type_id, &res->validate);
5485 err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
5486 &res->new_val, &res->new_sz,
5487 &res->new_type_id, NULL);
5488 if (err)
5489 goto done;
5490
5491
5492
5493
5494 res->fail_memsz_adjust = false;
5495 if (res->orig_sz != res->new_sz) {
5496 const struct btf_type *orig_t, *new_t;
5497
5498 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
5499 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
5516 goto done;
5517 if (btf_is_int(orig_t) && btf_is_int(new_t) &&
5518 btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
5519 btf_int_encoding(new_t) != BTF_INT_SIGNED)
5520 goto done;
5521
5522
5523
5524
5525 res->fail_memsz_adjust = true;
5526 }
5527 } else if (core_relo_is_type_based(relo->kind)) {
5528 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
5529 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
5530 } else if (core_relo_is_enumval_based(relo->kind)) {
5531 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
5532 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
5533 }
5534
5535done:
5536 if (err == -EUCLEAN) {
5537
5538 res->poison = true;
5539 err = 0;
5540 } else if (err == -EOPNOTSUPP) {
5541
5542 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
5543 prog->name, relo_idx, core_relo_kind_str(relo->kind),
5544 relo->kind, relo->insn_off / 8);
5545 }
5546
5547 return err;
5548}
5549
5550
5551
5552
5553
5554static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
5555 int insn_idx, struct bpf_insn *insn)
5556{
5557 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
5558 prog->name, relo_idx, insn_idx);
5559 insn->code = BPF_JMP | BPF_CALL;
5560 insn->dst_reg = 0;
5561 insn->src_reg = 0;
5562 insn->off = 0;
5563
5564
5565
5566
5567 insn->imm = 195896080;
5568}
5569
5570static bool is_ldimm64(struct bpf_insn *insn)
5571{
5572 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
5573}
5574
5575static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
5576{
5577 switch (BPF_SIZE(insn->code)) {
5578 case BPF_DW: return 8;
5579 case BPF_W: return 4;
5580 case BPF_H: return 2;
5581 case BPF_B: return 1;
5582 default: return -1;
5583 }
5584}
5585
5586static int insn_bytes_to_bpf_size(__u32 sz)
5587{
5588 switch (sz) {
5589 case 8: return BPF_DW;
5590 case 4: return BPF_W;
5591 case 2: return BPF_H;
5592 case 1: return BPF_B;
5593 default: return -1;
5594 }
5595}
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614static int bpf_core_patch_insn(struct bpf_program *prog,
5615 const struct bpf_core_relo *relo,
5616 int relo_idx,
5617 const struct bpf_core_relo_res *res)
5618{
5619 __u32 orig_val, new_val;
5620 struct bpf_insn *insn;
5621 int insn_idx;
5622 __u8 class;
5623
5624 if (relo->insn_off % BPF_INSN_SZ)
5625 return -EINVAL;
5626 insn_idx = relo->insn_off / BPF_INSN_SZ;
5627
5628
5629
5630
5631 insn_idx = insn_idx - prog->sec_insn_off;
5632 insn = &prog->insns[insn_idx];
5633 class = BPF_CLASS(insn->code);
5634
5635 if (res->poison) {
5636poison:
5637
5638
5639
5640 if (is_ldimm64(insn))
5641 bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
5642 bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
5643 return 0;
5644 }
5645
5646 orig_val = res->orig_val;
5647 new_val = res->new_val;
5648
5649 switch (class) {
5650 case BPF_ALU:
5651 case BPF_ALU64:
5652 if (BPF_SRC(insn->code) != BPF_K)
5653 return -EINVAL;
5654 if (res->validate && insn->imm != orig_val) {
5655 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
5656 prog->name, relo_idx,
5657 insn_idx, insn->imm, orig_val, new_val);
5658 return -EINVAL;
5659 }
5660 orig_val = insn->imm;
5661 insn->imm = new_val;
5662 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
5663 prog->name, relo_idx, insn_idx,
5664 orig_val, new_val);
5665 break;
5666 case BPF_LDX:
5667 case BPF_ST:
5668 case BPF_STX:
5669 if (res->validate && insn->off != orig_val) {
5670 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
5671 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
5672 return -EINVAL;
5673 }
5674 if (new_val > SHRT_MAX) {
5675 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
5676 prog->name, relo_idx, insn_idx, new_val);
5677 return -ERANGE;
5678 }
5679 if (res->fail_memsz_adjust) {
5680 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
5681 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
5682 prog->name, relo_idx, insn_idx);
5683 goto poison;
5684 }
5685
5686 orig_val = insn->off;
5687 insn->off = new_val;
5688 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
5689 prog->name, relo_idx, insn_idx, orig_val, new_val);
5690
5691 if (res->new_sz != res->orig_sz) {
5692 int insn_bytes_sz, insn_bpf_sz;
5693
5694 insn_bytes_sz = insn_bpf_size_to_bytes(insn);
5695 if (insn_bytes_sz != res->orig_sz) {
5696 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
5697 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
5698 return -EINVAL;
5699 }
5700
5701 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
5702 if (insn_bpf_sz < 0) {
5703 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
5704 prog->name, relo_idx, insn_idx, res->new_sz);
5705 return -EINVAL;
5706 }
5707
5708 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
5709 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
5710 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
5711 }
5712 break;
5713 case BPF_LD: {
5714 __u64 imm;
5715
5716 if (!is_ldimm64(insn) ||
5717 insn[0].src_reg != 0 || insn[0].off != 0 ||
5718 insn_idx + 1 >= prog->insns_cnt ||
5719 insn[1].code != 0 || insn[1].dst_reg != 0 ||
5720 insn[1].src_reg != 0 || insn[1].off != 0) {
5721 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
5722 prog->name, relo_idx, insn_idx);
5723 return -EINVAL;
5724 }
5725
5726 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
5727 if (res->validate && imm != orig_val) {
5728 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
5729 prog->name, relo_idx,
5730 insn_idx, (unsigned long long)imm,
5731 orig_val, new_val);
5732 return -EINVAL;
5733 }
5734
5735 insn[0].imm = new_val;
5736 insn[1].imm = 0;
5737 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
5738 prog->name, relo_idx, insn_idx,
5739 (unsigned long long)imm, new_val);
5740 break;
5741 }
5742 default:
5743 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
5744 prog->name, relo_idx, insn_idx, insn->code,
5745 insn->src_reg, insn->dst_reg, insn->off, insn->imm);
5746 return -EINVAL;
5747 }
5748
5749 return 0;
5750}
5751
5752
5753
5754
5755
5756static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
5757{
5758 const struct btf_type *t;
5759 const struct btf_enum *e;
5760 const char *s;
5761 __u32 type_id;
5762 int i;
5763
5764 type_id = spec->root_type_id;
5765 t = btf__type_by_id(spec->btf, type_id);
5766 s = btf__name_by_offset(spec->btf, t->name_off);
5767
5768 libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
5769
5770 if (core_relo_is_type_based(spec->relo_kind))
5771 return;
5772
5773 if (core_relo_is_enumval_based(spec->relo_kind)) {
5774 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
5775 e = btf_enum(t) + spec->raw_spec[0];
5776 s = btf__name_by_offset(spec->btf, e->name_off);
5777
5778 libbpf_print(level, "::%s = %u", s, e->val);
5779 return;
5780 }
5781
5782 if (core_relo_is_field_based(spec->relo_kind)) {
5783 for (i = 0; i < spec->len; i++) {
5784 if (spec->spec[i].name)
5785 libbpf_print(level, ".%s", spec->spec[i].name);
5786 else if (i > 0 || spec->spec[i].idx > 0)
5787 libbpf_print(level, "[%u]", spec->spec[i].idx);
5788 }
5789
5790 libbpf_print(level, " (");
5791 for (i = 0; i < spec->raw_len; i++)
5792 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
5793
5794 if (spec->bit_offset % 8)
5795 libbpf_print(level, " @ offset %u.%u)",
5796 spec->bit_offset / 8, spec->bit_offset % 8);
5797 else
5798 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
5799 return;
5800 }
5801}
5802
5803static size_t bpf_core_hash_fn(const void *key, void *ctx)
5804{
5805 return (size_t)key;
5806}
5807
5808static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5809{
5810 return k1 == k2;
5811}
5812
5813static void *u32_as_hash_key(__u32 x)
5814{
5815 return (void *)(uintptr_t)x;
5816}
5817
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868static int bpf_core_apply_relo(struct bpf_program *prog,
5869 const struct bpf_core_relo *relo,
5870 int relo_idx,
5871 const struct btf *local_btf,
5872 struct hashmap *cand_cache)
5873{
5874 struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
5875 const void *type_key = u32_as_hash_key(relo->type_id);
5876 struct bpf_core_relo_res cand_res, targ_res;
5877 const struct btf_type *local_type;
5878 const char *local_name;
5879 struct core_cand_list *cands = NULL;
5880 __u32 local_id;
5881 const char *spec_str;
5882 int i, j, err;
5883
5884 local_id = relo->type_id;
5885 local_type = btf__type_by_id(local_btf, local_id);
5886 if (!local_type)
5887 return -EINVAL;
5888
5889 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5890 if (!local_name)
5891 return -EINVAL;
5892
5893 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
5894 if (str_is_empty(spec_str))
5895 return -EINVAL;
5896
5897 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
5898 if (err) {
5899 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
5900 prog->name, relo_idx, local_id, btf_kind_str(local_type),
5901 str_is_empty(local_name) ? "<anon>" : local_name,
5902 spec_str, err);
5903 return -EINVAL;
5904 }
5905
5906 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
5907 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5908 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
5909 libbpf_print(LIBBPF_DEBUG, "\n");
5910
5911
5912 if (relo->kind == BPF_TYPE_ID_LOCAL) {
5913 targ_res.validate = true;
5914 targ_res.poison = false;
5915 targ_res.orig_val = local_spec.root_type_id;
5916 targ_res.new_val = local_spec.root_type_id;
5917 goto patch_insn;
5918 }
5919
5920
5921 if (str_is_empty(spec_str)) {
5922 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
5923 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
5924 return -EOPNOTSUPP;
5925 }
5926
5927 if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
5928 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5929 if (IS_ERR(cands)) {
5930 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5931 prog->name, relo_idx, local_id, btf_kind_str(local_type),
5932 local_name, PTR_ERR(cands));
5933 return PTR_ERR(cands);
5934 }
5935 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
5936 if (err) {
5937 bpf_core_free_cands(cands);
5938 return err;
5939 }
5940 }
5941
5942 for (i = 0, j = 0; i < cands->len; i++) {
5943 err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
5944 cands->cands[i].id, &cand_spec);
5945 if (err < 0) {
5946 pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
5947 prog->name, relo_idx, i);
5948 bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
5949 libbpf_print(LIBBPF_WARN, ": %d\n", err);
5950 return err;
5951 }
5952
5953 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
5954 relo_idx, err == 0 ? "non-matching" : "matching", i);
5955 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
5956 libbpf_print(LIBBPF_DEBUG, "\n");
5957
5958 if (err == 0)
5959 continue;
5960
5961 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
5962 if (err)
5963 return err;
5964
5965 if (j == 0) {
5966 targ_res = cand_res;
5967 targ_spec = cand_spec;
5968 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
5969
5970
5971
5972 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
5973 prog->name, relo_idx, cand_spec.bit_offset,
5974 targ_spec.bit_offset);
5975 return -EINVAL;
5976 } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
5977
5978
5979
5980
5981 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
5982 prog->name, relo_idx,
5983 cand_res.poison ? "failure" : "success", cand_res.new_val,
5984 targ_res.poison ? "failure" : "success", targ_res.new_val);
5985 return -EINVAL;
5986 }
5987
5988 cands->cands[j++] = cands->cands[i];
5989 }
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999 if (j > 0)
6000 cands->len = j;
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013 if (j == 0) {
6014 pr_debug("prog '%s': relo #%d: no matching targets found\n",
6015 prog->name, relo_idx);
6016
6017
6018 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
6019 if (err)
6020 return err;
6021 }
6022
6023patch_insn:
6024
6025 err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
6026 if (err) {
6027 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
6028 prog->name, relo_idx, relo->insn_off, err);
6029 return -EINVAL;
6030 }
6031
6032 return 0;
6033}
6034
6035static int
6036bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
6037{
6038 const struct btf_ext_info_sec *sec;
6039 const struct bpf_core_relo *rec;
6040 const struct btf_ext_info *seg;
6041 struct hashmap_entry *entry;
6042 struct hashmap *cand_cache = NULL;
6043 struct bpf_program *prog;
6044 const char *sec_name;
6045 int i, err = 0, insn_idx, sec_idx;
6046
6047 if (obj->btf_ext->core_relo_info.len == 0)
6048 return 0;
6049
6050 if (targ_btf_path) {
6051 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6052 if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) {
6053 err = PTR_ERR(obj->btf_vmlinux_override);
6054 pr_warn("failed to parse target BTF: %d\n", err);
6055 return err;
6056 }
6057 }
6058
6059 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
6060 if (IS_ERR(cand_cache)) {
6061 err = PTR_ERR(cand_cache);
6062 goto out;
6063 }
6064
6065 seg = &obj->btf_ext->core_relo_info;
6066 for_each_btf_ext_sec(seg, sec) {
6067 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6068 if (str_is_empty(sec_name)) {
6069 err = -EINVAL;
6070 goto out;
6071 }
6072
6073
6074
6075
6076
6077
6078 prog = NULL;
6079 for (i = 0; i < obj->nr_programs; i++) {
6080 prog = &obj->programs[i];
6081 if (strcmp(prog->sec_name, sec_name) == 0)
6082 break;
6083 }
6084 if (!prog) {
6085 pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
6086 return -ENOENT;
6087 }
6088 sec_idx = prog->sec_idx;
6089
6090 pr_debug("sec '%s': found %d CO-RE relocations\n",
6091 sec_name, sec->num_info);
6092
6093 for_each_btf_ext_rec(seg, sec, i, rec) {
6094 insn_idx = rec->insn_off / BPF_INSN_SZ;
6095 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6096 if (!prog) {
6097 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
6098 sec_name, insn_idx, i);
6099 err = -EINVAL;
6100 goto out;
6101 }
6102
6103
6104
6105 if (!prog->load)
6106 continue;
6107
6108 err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
6109 if (err) {
6110 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
6111 prog->name, i, err);
6112 goto out;
6113 }
6114 }
6115 }
6116
6117out:
6118
6119 btf__free(obj->btf_vmlinux_override);
6120 obj->btf_vmlinux_override = NULL;
6121
6122 if (!IS_ERR_OR_NULL(cand_cache)) {
6123 hashmap__for_each_entry(cand_cache, entry, i) {
6124 bpf_core_free_cands(entry->value);
6125 }
6126 hashmap__free(cand_cache);
6127 }
6128 return err;
6129}
6130
6131
6132
6133
6134
6135
6136static int
6137bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6138{
6139 int i;
6140
6141 for (i = 0; i < prog->nr_reloc; i++) {
6142 struct reloc_desc *relo = &prog->reloc_desc[i];
6143 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6144 struct extern_desc *ext;
6145
6146 switch (relo->type) {
6147 case RELO_LD64:
6148 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6149 insn[0].imm = obj->maps[relo->map_idx].fd;
6150 relo->processed = true;
6151 break;
6152 case RELO_DATA:
6153 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6154 insn[1].imm = insn[0].imm + relo->sym_off;
6155 insn[0].imm = obj->maps[relo->map_idx].fd;
6156 relo->processed = true;
6157 break;
6158 case RELO_EXTERN:
6159 ext = &obj->externs[relo->sym_off];
6160 if (ext->type == EXT_KCFG) {
6161 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6162 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6163 insn[1].imm = ext->kcfg.data_off;
6164 } else {
6165 if (ext->ksym.type_id) {
6166 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
6167 insn[0].imm = ext->ksym.kernel_btf_id;
6168 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6169 } else {
6170 insn[0].imm = (__u32)ext->ksym.addr;
6171 insn[1].imm = ext->ksym.addr >> 32;
6172 }
6173 }
6174 relo->processed = true;
6175 break;
6176 case RELO_CALL:
6177
6178 break;
6179 default:
6180 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6181 prog->name, i, relo->type);
6182 return -EINVAL;
6183 }
6184 }
6185
6186 return 0;
6187}
6188
6189static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6190 const struct bpf_program *prog,
6191 const struct btf_ext_info *ext_info,
6192 void **prog_info, __u32 *prog_rec_cnt,
6193 __u32 *prog_rec_sz)
6194{
6195 void *copy_start = NULL, *copy_end = NULL;
6196 void *rec, *rec_end, *new_prog_info;
6197 const struct btf_ext_info_sec *sec;
6198 size_t old_sz, new_sz;
6199 const char *sec_name;
6200 int i, off_adj;
6201
6202 for_each_btf_ext_sec(ext_info, sec) {
6203 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6204 if (!sec_name)
6205 return -EINVAL;
6206 if (strcmp(sec_name, prog->sec_name) != 0)
6207 continue;
6208
6209 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6210 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6211
6212 if (insn_off < prog->sec_insn_off)
6213 continue;
6214 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6215 break;
6216
6217 if (!copy_start)
6218 copy_start = rec;
6219 copy_end = rec + ext_info->rec_size;
6220 }
6221
6222 if (!copy_start)
6223 return -ENOENT;
6224
6225
6226
6227
6228 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6229 new_sz = old_sz + (copy_end - copy_start);
6230 new_prog_info = realloc(*prog_info, new_sz);
6231 if (!new_prog_info)
6232 return -ENOMEM;
6233 *prog_info = new_prog_info;
6234 *prog_rec_cnt = new_sz / ext_info->rec_size;
6235 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6236
6237
6238
6239
6240
6241
6242
6243 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6244 rec = new_prog_info + old_sz;
6245 rec_end = new_prog_info + new_sz;
6246 for (; rec < rec_end; rec += ext_info->rec_size) {
6247 __u32 *insn_off = rec;
6248
6249 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6250 }
6251 *prog_rec_sz = ext_info->rec_size;
6252 return 0;
6253 }
6254
6255 return -ENOENT;
6256}
6257
6258static int
6259reloc_prog_func_and_line_info(const struct bpf_object *obj,
6260 struct bpf_program *main_prog,
6261 const struct bpf_program *prog)
6262{
6263 int err;
6264
6265
6266
6267
6268 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
6269 return 0;
6270
6271
6272
6273
6274 if (main_prog != prog && !main_prog->func_info)
6275 goto line_info;
6276
6277 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6278 &main_prog->func_info,
6279 &main_prog->func_info_cnt,
6280 &main_prog->func_info_rec_size);
6281 if (err) {
6282 if (err != -ENOENT) {
6283 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6284 prog->name, err);
6285 return err;
6286 }
6287 if (main_prog->func_info) {
6288
6289
6290
6291
6292 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6293 return err;
6294 }
6295
6296 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6297 prog->name);
6298 }
6299
6300line_info:
6301
6302 if (main_prog != prog && !main_prog->line_info)
6303 return 0;
6304
6305 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6306 &main_prog->line_info,
6307 &main_prog->line_info_cnt,
6308 &main_prog->line_info_rec_size);
6309 if (err) {
6310 if (err != -ENOENT) {
6311 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6312 prog->name, err);
6313 return err;
6314 }
6315 if (main_prog->line_info) {
6316
6317
6318
6319
6320 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6321 return err;
6322 }
6323
6324 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6325 prog->name);
6326 }
6327 return 0;
6328}
6329
6330static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6331{
6332 size_t insn_idx = *(const size_t *)key;
6333 const struct reloc_desc *relo = elem;
6334
6335 if (insn_idx == relo->insn_idx)
6336 return 0;
6337 return insn_idx < relo->insn_idx ? -1 : 1;
6338}
6339
6340static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6341{
6342 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6343 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6344}
6345
6346static int
6347bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6348 struct bpf_program *prog)
6349{
6350 size_t sub_insn_idx, insn_idx, new_cnt;
6351 struct bpf_program *subprog;
6352 struct bpf_insn *insns, *insn;
6353 struct reloc_desc *relo;
6354 int err;
6355
6356 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6357 if (err)
6358 return err;
6359
6360 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6361 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6362 if (!insn_is_subprog_call(insn))
6363 continue;
6364
6365 relo = find_prog_insn_relo(prog, insn_idx);
6366 if (relo && relo->type != RELO_CALL) {
6367 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6368 prog->name, insn_idx, relo->type);
6369 return -LIBBPF_ERRNO__RELOC;
6370 }
6371 if (relo) {
6372
6373
6374
6375
6376
6377
6378
6379 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6380 } else {
6381
6382
6383
6384
6385
6386
6387 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6388 }
6389
6390
6391 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6392 if (!subprog) {
6393 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6394 prog->name);
6395 return -LIBBPF_ERRNO__RELOC;
6396 }
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406
6407
6408 if (subprog->sub_insn_off == 0) {
6409 subprog->sub_insn_off = main_prog->insns_cnt;
6410
6411 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6412 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6413 if (!insns) {
6414 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6415 return -ENOMEM;
6416 }
6417 main_prog->insns = insns;
6418 main_prog->insns_cnt = new_cnt;
6419
6420 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6421 subprog->insns_cnt * sizeof(*insns));
6422
6423 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6424 main_prog->name, subprog->insns_cnt, subprog->name);
6425
6426 err = bpf_object__reloc_code(obj, main_prog, subprog);
6427 if (err)
6428 return err;
6429 }
6430
6431
6432
6433
6434 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6435
6436
6437
6438
6439
6440 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6441
6442 if (relo)
6443 relo->processed = true;
6444
6445 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6446 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6447 }
6448
6449 return 0;
6450}
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533static int
6534bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6535{
6536 struct bpf_program *subprog;
6537 int i, j, err;
6538
6539
6540
6541
6542 for (i = 0; i < obj->nr_programs; i++) {
6543 subprog = &obj->programs[i];
6544 if (!prog_is_subprog(obj, subprog))
6545 continue;
6546
6547 subprog->sub_insn_off = 0;
6548 for (j = 0; j < subprog->nr_reloc; j++)
6549 if (subprog->reloc_desc[j].type == RELO_CALL)
6550 subprog->reloc_desc[j].processed = false;
6551 }
6552
6553 err = bpf_object__reloc_code(obj, prog, prog);
6554 if (err)
6555 return err;
6556
6557
6558 return 0;
6559}
6560
6561static int
6562bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6563{
6564 struct bpf_program *prog;
6565 size_t i;
6566 int err;
6567
6568 if (obj->btf_ext) {
6569 err = bpf_object__relocate_core(obj, targ_btf_path);
6570 if (err) {
6571 pr_warn("failed to perform CO-RE relocations: %d\n",
6572 err);
6573 return err;
6574 }
6575 }
6576
6577
6578
6579
6580 for (i = 0; i < obj->nr_programs; i++) {
6581 prog = &obj->programs[i];
6582 err = bpf_object__relocate_data(obj, prog);
6583 if (err) {
6584 pr_warn("prog '%s': failed to relocate data references: %d\n",
6585 prog->name, err);
6586 return err;
6587 }
6588 }
6589
6590
6591
6592
6593
6594 for (i = 0; i < obj->nr_programs; i++) {
6595 prog = &obj->programs[i];
6596
6597
6598
6599 if (prog_is_subprog(obj, prog))
6600 continue;
6601
6602 err = bpf_object__relocate_calls(obj, prog);
6603 if (err) {
6604 pr_warn("prog '%s': failed to relocate calls: %d\n",
6605 prog->name, err);
6606 return err;
6607 }
6608 }
6609
6610 for (i = 0; i < obj->nr_programs; i++) {
6611 prog = &obj->programs[i];
6612 zfree(&prog->reloc_desc);
6613 prog->nr_reloc = 0;
6614 }
6615 return 0;
6616}
6617
6618static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6619 GElf_Shdr *shdr, Elf_Data *data);
6620
6621static int bpf_object__collect_map_relos(struct bpf_object *obj,
6622 GElf_Shdr *shdr, Elf_Data *data)
6623{
6624 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6625 int i, j, nrels, new_sz;
6626 const struct btf_var_secinfo *vi = NULL;
6627 const struct btf_type *sec, *var, *def;
6628 struct bpf_map *map = NULL, *targ_map;
6629 const struct btf_member *member;
6630 const char *name, *mname;
6631 Elf_Data *symbols;
6632 unsigned int moff;
6633 GElf_Sym sym;
6634 GElf_Rel rel;
6635 void *tmp;
6636
6637 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6638 return -EINVAL;
6639 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6640 if (!sec)
6641 return -EINVAL;
6642
6643 symbols = obj->efile.symbols;
6644 nrels = shdr->sh_size / shdr->sh_entsize;
6645 for (i = 0; i < nrels; i++) {
6646 if (!gelf_getrel(data, i, &rel)) {
6647 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6648 return -LIBBPF_ERRNO__FORMAT;
6649 }
6650 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
6651 pr_warn(".maps relo #%d: symbol %zx not found\n",
6652 i, (size_t)GELF_R_SYM(rel.r_info));
6653 return -LIBBPF_ERRNO__FORMAT;
6654 }
6655 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
6656 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
6657 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6658 i, name);
6659 return -LIBBPF_ERRNO__RELOC;
6660 }
6661
6662 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
6663 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
6664 (size_t)rel.r_offset, sym.st_name, name);
6665
6666 for (j = 0; j < obj->nr_maps; j++) {
6667 map = &obj->maps[j];
6668 if (map->sec_idx != obj->efile.btf_maps_shndx)
6669 continue;
6670
6671 vi = btf_var_secinfos(sec) + map->btf_var_idx;
6672 if (vi->offset <= rel.r_offset &&
6673 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6674 break;
6675 }
6676 if (j == obj->nr_maps) {
6677 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
6678 i, name, (size_t)rel.r_offset);
6679 return -EINVAL;
6680 }
6681
6682 if (!bpf_map_type__is_map_in_map(map->def.type))
6683 return -EINVAL;
6684 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6685 map->def.key_size != sizeof(int)) {
6686 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6687 i, map->name, sizeof(int));
6688 return -EINVAL;
6689 }
6690
6691 targ_map = bpf_object__find_map_by_name(obj, name);
6692 if (!targ_map)
6693 return -ESRCH;
6694
6695 var = btf__type_by_id(obj->btf, vi->type);
6696 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6697 if (btf_vlen(def) == 0)
6698 return -EINVAL;
6699 member = btf_members(def) + btf_vlen(def) - 1;
6700 mname = btf__name_by_offset(obj->btf, member->name_off);
6701 if (strcmp(mname, "values"))
6702 return -EINVAL;
6703
6704 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6705 if (rel.r_offset - vi->offset < moff)
6706 return -EINVAL;
6707
6708 moff = rel.r_offset - vi->offset - moff;
6709
6710
6711
6712 if (moff % bpf_ptr_sz)
6713 return -EINVAL;
6714 moff /= bpf_ptr_sz;
6715 if (moff >= map->init_slots_sz) {
6716 new_sz = moff + 1;
6717 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6718 if (!tmp)
6719 return -ENOMEM;
6720 map->init_slots = tmp;
6721 memset(map->init_slots + map->init_slots_sz, 0,
6722 (new_sz - map->init_slots_sz) * host_ptr_sz);
6723 map->init_slots_sz = new_sz;
6724 }
6725 map->init_slots[moff] = targ_map;
6726
6727 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
6728 i, map->name, moff, name);
6729 }
6730
6731 return 0;
6732}
6733
6734static int cmp_relocs(const void *_a, const void *_b)
6735{
6736 const struct reloc_desc *a = _a;
6737 const struct reloc_desc *b = _b;
6738
6739 if (a->insn_idx != b->insn_idx)
6740 return a->insn_idx < b->insn_idx ? -1 : 1;
6741
6742
6743 if (a->type != b->type)
6744 return a->type < b->type ? -1 : 1;
6745
6746 return 0;
6747}
6748
6749static int bpf_object__collect_relos(struct bpf_object *obj)
6750{
6751 int i, err;
6752
6753 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
6754 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
6755 Elf_Data *data = obj->efile.reloc_sects[i].data;
6756 int idx = shdr->sh_info;
6757
6758 if (shdr->sh_type != SHT_REL) {
6759 pr_warn("internal error at %d\n", __LINE__);
6760 return -LIBBPF_ERRNO__INTERNAL;
6761 }
6762
6763 if (idx == obj->efile.st_ops_shndx)
6764 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6765 else if (idx == obj->efile.btf_maps_shndx)
6766 err = bpf_object__collect_map_relos(obj, shdr, data);
6767 else
6768 err = bpf_object__collect_prog_relos(obj, shdr, data);
6769 if (err)
6770 return err;
6771 }
6772
6773 for (i = 0; i < obj->nr_programs; i++) {
6774 struct bpf_program *p = &obj->programs[i];
6775
6776 if (!p->nr_reloc)
6777 continue;
6778
6779 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6780 }
6781 return 0;
6782}
6783
6784static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6785{
6786 if (BPF_CLASS(insn->code) == BPF_JMP &&
6787 BPF_OP(insn->code) == BPF_CALL &&
6788 BPF_SRC(insn->code) == BPF_K &&
6789 insn->src_reg == 0 &&
6790 insn->dst_reg == 0) {
6791 *func_id = insn->imm;
6792 return true;
6793 }
6794 return false;
6795}
6796
6797static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
6798{
6799 struct bpf_insn *insn = prog->insns;
6800 enum bpf_func_id func_id;
6801 int i;
6802
6803 for (i = 0; i < prog->insns_cnt; i++, insn++) {
6804 if (!insn_is_helper_call(insn, &func_id))
6805 continue;
6806
6807
6808
6809
6810
6811 switch (func_id) {
6812 case BPF_FUNC_probe_read_kernel:
6813 case BPF_FUNC_probe_read_user:
6814 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6815 insn->imm = BPF_FUNC_probe_read;
6816 break;
6817 case BPF_FUNC_probe_read_kernel_str:
6818 case BPF_FUNC_probe_read_user_str:
6819 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6820 insn->imm = BPF_FUNC_probe_read_str;
6821 break;
6822 default:
6823 break;
6824 }
6825 }
6826 return 0;
6827}
6828
6829static int
6830load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
6831 char *license, __u32 kern_version, int *pfd)
6832{
6833 struct bpf_prog_load_params load_attr = {};
6834 char *cp, errmsg[STRERR_BUFSIZE];
6835 size_t log_buf_size = 0;
6836 char *log_buf = NULL;
6837 int btf_fd, ret;
6838
6839 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6840
6841
6842
6843
6844 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6845 prog->name, prog->sec_name);
6846 return -EINVAL;
6847 }
6848
6849 if (!insns || !insns_cnt)
6850 return -EINVAL;
6851
6852 load_attr.prog_type = prog->type;
6853
6854 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
6855 prog->sec_def->is_exp_attach_type_optional)
6856 load_attr.expected_attach_type = 0;
6857 else
6858 load_attr.expected_attach_type = prog->expected_attach_type;
6859 if (kernel_supports(FEAT_PROG_NAME))
6860 load_attr.name = prog->name;
6861 load_attr.insns = insns;
6862 load_attr.insn_cnt = insns_cnt;
6863 load_attr.license = license;
6864 load_attr.attach_btf_id = prog->attach_btf_id;
6865 if (prog->attach_prog_fd)
6866 load_attr.attach_prog_fd = prog->attach_prog_fd;
6867 else
6868 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6869 load_attr.attach_btf_id = prog->attach_btf_id;
6870 load_attr.kern_version = kern_version;
6871 load_attr.prog_ifindex = prog->prog_ifindex;
6872
6873
6874 btf_fd = bpf_object__btf_fd(prog->obj);
6875 if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
6876 load_attr.prog_btf_fd = btf_fd;
6877 load_attr.func_info = prog->func_info;
6878 load_attr.func_info_rec_size = prog->func_info_rec_size;
6879 load_attr.func_info_cnt = prog->func_info_cnt;
6880 load_attr.line_info = prog->line_info;
6881 load_attr.line_info_rec_size = prog->line_info_rec_size;
6882 load_attr.line_info_cnt = prog->line_info_cnt;
6883 }
6884 load_attr.log_level = prog->log_level;
6885 load_attr.prog_flags = prog->prog_flags;
6886
6887retry_load:
6888 if (log_buf_size) {
6889 log_buf = malloc(log_buf_size);
6890 if (!log_buf)
6891 return -ENOMEM;
6892
6893 *log_buf = 0;
6894 }
6895
6896 load_attr.log_buf = log_buf;
6897 load_attr.log_buf_sz = log_buf_size;
6898 ret = libbpf__bpf_prog_load(&load_attr);
6899
6900 if (ret >= 0) {
6901 if (log_buf && load_attr.log_level)
6902 pr_debug("verifier log:\n%s", log_buf);
6903
6904 if (prog->obj->rodata_map_idx >= 0 &&
6905 kernel_supports(FEAT_PROG_BIND_MAP)) {
6906 struct bpf_map *rodata_map =
6907 &prog->obj->maps[prog->obj->rodata_map_idx];
6908
6909 if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
6910 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6911 pr_warn("prog '%s': failed to bind .rodata map: %s\n",
6912 prog->name, cp);
6913
6914 }
6915 }
6916
6917 *pfd = ret;
6918 ret = 0;
6919 goto out;
6920 }
6921
6922 if (!log_buf || errno == ENOSPC) {
6923 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
6924 log_buf_size << 1);
6925
6926 free(log_buf);
6927 goto retry_load;
6928 }
6929 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
6930 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6931 pr_warn("load bpf program failed: %s\n", cp);
6932 pr_perm_msg(ret);
6933
6934 if (log_buf && log_buf[0] != '\0') {
6935 ret = -LIBBPF_ERRNO__VERIFY;
6936 pr_warn("-- BEGIN DUMP LOG ---\n");
6937 pr_warn("\n%s\n", log_buf);
6938 pr_warn("-- END LOG --\n");
6939 } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
6940 pr_warn("Program too large (%zu insns), at most %d insns\n",
6941 load_attr.insn_cnt, BPF_MAXINSNS);
6942 ret = -LIBBPF_ERRNO__PROG2BIG;
6943 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
6944
6945 int fd;
6946
6947 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
6948 load_attr.expected_attach_type = 0;
6949 load_attr.log_buf = NULL;
6950 load_attr.log_buf_sz = 0;
6951 fd = libbpf__bpf_prog_load(&load_attr);
6952 if (fd >= 0) {
6953 close(fd);
6954 ret = -LIBBPF_ERRNO__PROGTYPE;
6955 goto out;
6956 }
6957 }
6958
6959out:
6960 free(log_buf);
6961 return ret;
6962}
6963
6964static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
6965
6966int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
6967{
6968 int err = 0, fd, i;
6969
6970 if (prog->obj->loaded) {
6971 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
6972 return -EINVAL;
6973 }
6974
6975 if ((prog->type == BPF_PROG_TYPE_TRACING ||
6976 prog->type == BPF_PROG_TYPE_LSM ||
6977 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
6978 int btf_obj_fd = 0, btf_type_id = 0;
6979
6980 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
6981 if (err)
6982 return err;
6983
6984 prog->attach_btf_obj_fd = btf_obj_fd;
6985 prog->attach_btf_id = btf_type_id;
6986 }
6987
6988 if (prog->instances.nr < 0 || !prog->instances.fds) {
6989 if (prog->preprocessor) {
6990 pr_warn("Internal error: can't load program '%s'\n",
6991 prog->name);
6992 return -LIBBPF_ERRNO__INTERNAL;
6993 }
6994
6995 prog->instances.fds = malloc(sizeof(int));
6996 if (!prog->instances.fds) {
6997 pr_warn("Not enough memory for BPF fds\n");
6998 return -ENOMEM;
6999 }
7000 prog->instances.nr = 1;
7001 prog->instances.fds[0] = -1;
7002 }
7003
7004 if (!prog->preprocessor) {
7005 if (prog->instances.nr != 1) {
7006 pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
7007 prog->name, prog->instances.nr);
7008 }
7009 err = load_program(prog, prog->insns, prog->insns_cnt,
7010 license, kern_ver, &fd);
7011 if (!err)
7012 prog->instances.fds[0] = fd;
7013 goto out;
7014 }
7015
7016 for (i = 0; i < prog->instances.nr; i++) {
7017 struct bpf_prog_prep_result result;
7018 bpf_program_prep_t preprocessor = prog->preprocessor;
7019
7020 memset(&result, 0, sizeof(result));
7021 err = preprocessor(prog, i, prog->insns,
7022 prog->insns_cnt, &result);
7023 if (err) {
7024 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
7025 i, prog->name);
7026 goto out;
7027 }
7028
7029 if (!result.new_insn_ptr || !result.new_insn_cnt) {
7030 pr_debug("Skip loading the %dth instance of program '%s'\n",
7031 i, prog->name);
7032 prog->instances.fds[i] = -1;
7033 if (result.pfd)
7034 *result.pfd = -1;
7035 continue;
7036 }
7037
7038 err = load_program(prog, result.new_insn_ptr,
7039 result.new_insn_cnt, license, kern_ver, &fd);
7040 if (err) {
7041 pr_warn("Loading the %dth instance of program '%s' failed\n",
7042 i, prog->name);
7043 goto out;
7044 }
7045
7046 if (result.pfd)
7047 *result.pfd = fd;
7048 prog->instances.fds[i] = fd;
7049 }
7050out:
7051 if (err)
7052 pr_warn("failed to load program '%s'\n", prog->name);
7053 zfree(&prog->insns);
7054 prog->insns_cnt = 0;
7055 return err;
7056}
7057
7058static int
7059bpf_object__load_progs(struct bpf_object *obj, int log_level)
7060{
7061 struct bpf_program *prog;
7062 size_t i;
7063 int err;
7064
7065 for (i = 0; i < obj->nr_programs; i++) {
7066 prog = &obj->programs[i];
7067 err = bpf_object__sanitize_prog(obj, prog);
7068 if (err)
7069 return err;
7070 }
7071
7072 for (i = 0; i < obj->nr_programs; i++) {
7073 prog = &obj->programs[i];
7074 if (prog_is_subprog(obj, prog))
7075 continue;
7076 if (!prog->load) {
7077 pr_debug("prog '%s': skipped loading\n", prog->name);
7078 continue;
7079 }
7080 prog->log_level |= log_level;
7081 err = bpf_program__load(prog, obj->license, obj->kern_version);
7082 if (err)
7083 return err;
7084 }
7085 return 0;
7086}
7087
7088static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7089
7090static struct bpf_object *
7091__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7092 const struct bpf_object_open_opts *opts)
7093{
7094 const char *obj_name, *kconfig;
7095 struct bpf_program *prog;
7096 struct bpf_object *obj;
7097 char tmp_name[64];
7098 int err;
7099
7100 if (elf_version(EV_CURRENT) == EV_NONE) {
7101 pr_warn("failed to init libelf for %s\n",
7102 path ? : "(mem buf)");
7103 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7104 }
7105
7106 if (!OPTS_VALID(opts, bpf_object_open_opts))
7107 return ERR_PTR(-EINVAL);
7108
7109 obj_name = OPTS_GET(opts, object_name, NULL);
7110 if (obj_buf) {
7111 if (!obj_name) {
7112 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7113 (unsigned long)obj_buf,
7114 (unsigned long)obj_buf_sz);
7115 obj_name = tmp_name;
7116 }
7117 path = obj_name;
7118 pr_debug("loading object '%s' from buffer\n", obj_name);
7119 }
7120
7121 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7122 if (IS_ERR(obj))
7123 return obj;
7124
7125 kconfig = OPTS_GET(opts, kconfig, NULL);
7126 if (kconfig) {
7127 obj->kconfig = strdup(kconfig);
7128 if (!obj->kconfig)
7129 return ERR_PTR(-ENOMEM);
7130 }
7131
7132 err = bpf_object__elf_init(obj);
7133 err = err ? : bpf_object__check_endianness(obj);
7134 err = err ? : bpf_object__elf_collect(obj);
7135 err = err ? : bpf_object__collect_externs(obj);
7136 err = err ? : bpf_object__finalize_btf(obj);
7137 err = err ? : bpf_object__init_maps(obj, opts);
7138 err = err ? : bpf_object__collect_relos(obj);
7139 if (err)
7140 goto out;
7141 bpf_object__elf_finish(obj);
7142
7143 bpf_object__for_each_program(prog, obj) {
7144 prog->sec_def = find_sec_def(prog->sec_name);
7145 if (!prog->sec_def) {
7146
7147 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7148 prog->name, prog->sec_name);
7149 continue;
7150 }
7151
7152 if (prog->sec_def->is_sleepable)
7153 prog->prog_flags |= BPF_F_SLEEPABLE;
7154 bpf_program__set_type(prog, prog->sec_def->prog_type);
7155 bpf_program__set_expected_attach_type(prog,
7156 prog->sec_def->expected_attach_type);
7157
7158 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
7159 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
7160 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
7161 }
7162
7163 return obj;
7164out:
7165 bpf_object__close(obj);
7166 return ERR_PTR(err);
7167}
7168
7169static struct bpf_object *
7170__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
7171{
7172 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7173 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7174 );
7175
7176
7177 if (!attr->file)
7178 return NULL;
7179
7180 pr_debug("loading %s\n", attr->file);
7181 return __bpf_object__open(attr->file, NULL, 0, &opts);
7182}
7183
7184struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7185{
7186 return __bpf_object__open_xattr(attr, 0);
7187}
7188
7189struct bpf_object *bpf_object__open(const char *path)
7190{
7191 struct bpf_object_open_attr attr = {
7192 .file = path,
7193 .prog_type = BPF_PROG_TYPE_UNSPEC,
7194 };
7195
7196 return bpf_object__open_xattr(&attr);
7197}
7198
7199struct bpf_object *
7200bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7201{
7202 if (!path)
7203 return ERR_PTR(-EINVAL);
7204
7205 pr_debug("loading %s\n", path);
7206
7207 return __bpf_object__open(path, NULL, 0, opts);
7208}
7209
7210struct bpf_object *
7211bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7212 const struct bpf_object_open_opts *opts)
7213{
7214 if (!obj_buf || obj_buf_sz == 0)
7215 return ERR_PTR(-EINVAL);
7216
7217 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
7218}
7219
7220struct bpf_object *
7221bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7222 const char *name)
7223{
7224 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7225 .object_name = name,
7226
7227 .relaxed_maps = true,
7228 );
7229
7230
7231 if (!obj_buf || obj_buf_sz == 0)
7232 return NULL;
7233
7234 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
7235}
7236
7237int bpf_object__unload(struct bpf_object *obj)
7238{
7239 size_t i;
7240
7241 if (!obj)
7242 return -EINVAL;
7243
7244 for (i = 0; i < obj->nr_maps; i++) {
7245 zclose(obj->maps[i].fd);
7246 if (obj->maps[i].st_ops)
7247 zfree(&obj->maps[i].st_ops->kern_vdata);
7248 }
7249
7250 for (i = 0; i < obj->nr_programs; i++)
7251 bpf_program__unload(&obj->programs[i]);
7252
7253 return 0;
7254}
7255
7256static int bpf_object__sanitize_maps(struct bpf_object *obj)
7257{
7258 struct bpf_map *m;
7259
7260 bpf_object__for_each_map(m, obj) {
7261 if (!bpf_map__is_internal(m))
7262 continue;
7263 if (!kernel_supports(FEAT_GLOBAL_DATA)) {
7264 pr_warn("kernel doesn't support global data\n");
7265 return -ENOTSUP;
7266 }
7267 if (!kernel_supports(FEAT_ARRAY_MMAP))
7268 m->def.map_flags ^= BPF_F_MMAPABLE;
7269 }
7270
7271 return 0;
7272}
7273
7274static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7275{
7276 char sym_type, sym_name[500];
7277 unsigned long long sym_addr;
7278 struct extern_desc *ext;
7279 int ret, err = 0;
7280 FILE *f;
7281
7282 f = fopen("/proc/kallsyms", "r");
7283 if (!f) {
7284 err = -errno;
7285 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7286 return err;
7287 }
7288
7289 while (true) {
7290 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7291 &sym_addr, &sym_type, sym_name);
7292 if (ret == EOF && feof(f))
7293 break;
7294 if (ret != 3) {
7295 pr_warn("failed to read kallsyms entry: %d\n", ret);
7296 err = -EINVAL;
7297 goto out;
7298 }
7299
7300 ext = find_extern_by_name(obj, sym_name);
7301 if (!ext || ext->type != EXT_KSYM)
7302 continue;
7303
7304 if (ext->is_set && ext->ksym.addr != sym_addr) {
7305 pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7306 sym_name, ext->ksym.addr, sym_addr);
7307 err = -EINVAL;
7308 goto out;
7309 }
7310 if (!ext->is_set) {
7311 ext->is_set = true;
7312 ext->ksym.addr = sym_addr;
7313 pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7314 }
7315 }
7316
7317out:
7318 fclose(f);
7319 return err;
7320}
7321
7322static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7323{
7324 struct extern_desc *ext;
7325 struct btf *btf;
7326 int i, j, id, btf_fd, err;
7327
7328 for (i = 0; i < obj->nr_extern; i++) {
7329 const struct btf_type *targ_var, *targ_type;
7330 __u32 targ_type_id, local_type_id;
7331 const char *targ_var_name;
7332 int ret;
7333
7334 ext = &obj->externs[i];
7335 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7336 continue;
7337
7338 btf = obj->btf_vmlinux;
7339 btf_fd = 0;
7340 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7341 if (id == -ENOENT) {
7342 err = load_module_btfs(obj);
7343 if (err)
7344 return err;
7345
7346 for (j = 0; j < obj->btf_module_cnt; j++) {
7347 btf = obj->btf_modules[j].btf;
7348
7349 btf_fd = obj->btf_modules[j].fd;
7350 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7351 if (id != -ENOENT)
7352 break;
7353 }
7354 }
7355 if (id <= 0) {
7356 pr_warn("extern (ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
7357 ext->name);
7358 return -ESRCH;
7359 }
7360
7361
7362 local_type_id = ext->ksym.type_id;
7363
7364
7365 targ_var = btf__type_by_id(btf, id);
7366 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7367 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7368
7369 ret = bpf_core_types_are_compat(obj->btf, local_type_id,
7370 btf, targ_type_id);
7371 if (ret <= 0) {
7372 const struct btf_type *local_type;
7373 const char *targ_name, *local_name;
7374
7375 local_type = btf__type_by_id(obj->btf, local_type_id);
7376 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7377 targ_name = btf__name_by_offset(btf, targ_type->name_off);
7378
7379 pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7380 ext->name, local_type_id,
7381 btf_kind_str(local_type), local_name, targ_type_id,
7382 btf_kind_str(targ_type), targ_name);
7383 return -EINVAL;
7384 }
7385
7386 ext->is_set = true;
7387 ext->ksym.kernel_btf_obj_fd = btf_fd;
7388 ext->ksym.kernel_btf_id = id;
7389 pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n",
7390 ext->name, id, btf_kind_str(targ_var), targ_var_name);
7391 }
7392 return 0;
7393}
7394
7395static int bpf_object__resolve_externs(struct bpf_object *obj,
7396 const char *extra_kconfig)
7397{
7398 bool need_config = false, need_kallsyms = false;
7399 bool need_vmlinux_btf = false;
7400 struct extern_desc *ext;
7401 void *kcfg_data = NULL;
7402 int err, i;
7403
7404 if (obj->nr_extern == 0)
7405 return 0;
7406
7407 if (obj->kconfig_map_idx >= 0)
7408 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7409
7410 for (i = 0; i < obj->nr_extern; i++) {
7411 ext = &obj->externs[i];
7412
7413 if (ext->type == EXT_KCFG &&
7414 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7415 void *ext_val = kcfg_data + ext->kcfg.data_off;
7416 __u32 kver = get_kernel_version();
7417
7418 if (!kver) {
7419 pr_warn("failed to get kernel version\n");
7420 return -EINVAL;
7421 }
7422 err = set_kcfg_value_num(ext, ext_val, kver);
7423 if (err)
7424 return err;
7425 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7426 } else if (ext->type == EXT_KCFG &&
7427 strncmp(ext->name, "CONFIG_", 7) == 0) {
7428 need_config = true;
7429 } else if (ext->type == EXT_KSYM) {
7430 if (ext->ksym.type_id)
7431 need_vmlinux_btf = true;
7432 else
7433 need_kallsyms = true;
7434 } else {
7435 pr_warn("unrecognized extern '%s'\n", ext->name);
7436 return -EINVAL;
7437 }
7438 }
7439 if (need_config && extra_kconfig) {
7440 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7441 if (err)
7442 return -EINVAL;
7443 need_config = false;
7444 for (i = 0; i < obj->nr_extern; i++) {
7445 ext = &obj->externs[i];
7446 if (ext->type == EXT_KCFG && !ext->is_set) {
7447 need_config = true;
7448 break;
7449 }
7450 }
7451 }
7452 if (need_config) {
7453 err = bpf_object__read_kconfig_file(obj, kcfg_data);
7454 if (err)
7455 return -EINVAL;
7456 }
7457 if (need_kallsyms) {
7458 err = bpf_object__read_kallsyms_file(obj);
7459 if (err)
7460 return -EINVAL;
7461 }
7462 if (need_vmlinux_btf) {
7463 err = bpf_object__resolve_ksyms_btf_id(obj);
7464 if (err)
7465 return -EINVAL;
7466 }
7467 for (i = 0; i < obj->nr_extern; i++) {
7468 ext = &obj->externs[i];
7469
7470 if (!ext->is_set && !ext->is_weak) {
7471 pr_warn("extern %s (strong) not resolved\n", ext->name);
7472 return -ESRCH;
7473 } else if (!ext->is_set) {
7474 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7475 ext->name);
7476 }
7477 }
7478
7479 return 0;
7480}
7481
7482int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
7483{
7484 struct bpf_object *obj;
7485 int err, i;
7486
7487 if (!attr)
7488 return -EINVAL;
7489 obj = attr->obj;
7490 if (!obj)
7491 return -EINVAL;
7492
7493 if (obj->loaded) {
7494 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7495 return -EINVAL;
7496 }
7497
7498 err = bpf_object__probe_loading(obj);
7499 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
7500 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7501 err = err ? : bpf_object__sanitize_and_load_btf(obj);
7502 err = err ? : bpf_object__sanitize_maps(obj);
7503 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7504 err = err ? : bpf_object__create_maps(obj);
7505 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
7506 err = err ? : bpf_object__load_progs(obj, attr->log_level);
7507
7508
7509 for (i = 0; i < obj->btf_module_cnt; i++) {
7510 close(obj->btf_modules[i].fd);
7511 btf__free(obj->btf_modules[i].btf);
7512 free(obj->btf_modules[i].name);
7513 }
7514 free(obj->btf_modules);
7515
7516
7517 btf__free(obj->btf_vmlinux);
7518 obj->btf_vmlinux = NULL;
7519
7520 obj->loaded = true;
7521
7522 if (err)
7523 goto out;
7524
7525 return 0;
7526out:
7527
7528 for (i = 0; i < obj->nr_maps; i++)
7529 if (obj->maps[i].pinned && !obj->maps[i].reused)
7530 bpf_map__unpin(&obj->maps[i], NULL);
7531
7532 bpf_object__unload(obj);
7533 pr_warn("failed to load object '%s'\n", obj->path);
7534 return err;
7535}
7536
7537int bpf_object__load(struct bpf_object *obj)
7538{
7539 struct bpf_object_load_attr attr = {
7540 .obj = obj,
7541 };
7542
7543 return bpf_object__load_xattr(&attr);
7544}
7545
7546static int make_parent_dir(const char *path)
7547{
7548 char *cp, errmsg[STRERR_BUFSIZE];
7549 char *dname, *dir;
7550 int err = 0;
7551
7552 dname = strdup(path);
7553 if (dname == NULL)
7554 return -ENOMEM;
7555
7556 dir = dirname(dname);
7557 if (mkdir(dir, 0700) && errno != EEXIST)
7558 err = -errno;
7559
7560 free(dname);
7561 if (err) {
7562 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7563 pr_warn("failed to mkdir %s: %s\n", path, cp);
7564 }
7565 return err;
7566}
7567
7568static int check_path(const char *path)
7569{
7570 char *cp, errmsg[STRERR_BUFSIZE];
7571 struct statfs st_fs;
7572 char *dname, *dir;
7573 int err = 0;
7574
7575 if (path == NULL)
7576 return -EINVAL;
7577
7578 dname = strdup(path);
7579 if (dname == NULL)
7580 return -ENOMEM;
7581
7582 dir = dirname(dname);
7583 if (statfs(dir, &st_fs)) {
7584 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7585 pr_warn("failed to statfs %s: %s\n", dir, cp);
7586 err = -errno;
7587 }
7588 free(dname);
7589
7590 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7591 pr_warn("specified path %s is not on BPF FS\n", path);
7592 err = -EINVAL;
7593 }
7594
7595 return err;
7596}
7597
7598int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
7599 int instance)
7600{
7601 char *cp, errmsg[STRERR_BUFSIZE];
7602 int err;
7603
7604 err = make_parent_dir(path);
7605 if (err)
7606 return err;
7607
7608 err = check_path(path);
7609 if (err)
7610 return err;
7611
7612 if (prog == NULL) {
7613 pr_warn("invalid program pointer\n");
7614 return -EINVAL;
7615 }
7616
7617 if (instance < 0 || instance >= prog->instances.nr) {
7618 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7619 instance, prog->name, prog->instances.nr);
7620 return -EINVAL;
7621 }
7622
7623 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
7624 err = -errno;
7625 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
7626 pr_warn("failed to pin program: %s\n", cp);
7627 return err;
7628 }
7629 pr_debug("pinned program '%s'\n", path);
7630
7631 return 0;
7632}
7633
7634int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
7635 int instance)
7636{
7637 int err;
7638
7639 err = check_path(path);
7640 if (err)
7641 return err;
7642
7643 if (prog == NULL) {
7644 pr_warn("invalid program pointer\n");
7645 return -EINVAL;
7646 }
7647
7648 if (instance < 0 || instance >= prog->instances.nr) {
7649 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
7650 instance, prog->name, prog->instances.nr);
7651 return -EINVAL;
7652 }
7653
7654 err = unlink(path);
7655 if (err != 0)
7656 return -errno;
7657 pr_debug("unpinned program '%s'\n", path);
7658
7659 return 0;
7660}
7661
7662int bpf_program__pin(struct bpf_program *prog, const char *path)
7663{
7664 int i, err;
7665
7666 err = make_parent_dir(path);
7667 if (err)
7668 return err;
7669
7670 err = check_path(path);
7671 if (err)
7672 return err;
7673
7674 if (prog == NULL) {
7675 pr_warn("invalid program pointer\n");
7676 return -EINVAL;
7677 }
7678
7679 if (prog->instances.nr <= 0) {
7680 pr_warn("no instances of prog %s to pin\n", prog->name);
7681 return -EINVAL;
7682 }
7683
7684 if (prog->instances.nr == 1) {
7685
7686 return bpf_program__pin_instance(prog, path, 0);
7687 }
7688
7689 for (i = 0; i < prog->instances.nr; i++) {
7690 char buf[PATH_MAX];
7691 int len;
7692
7693 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7694 if (len < 0) {
7695 err = -EINVAL;
7696 goto err_unpin;
7697 } else if (len >= PATH_MAX) {
7698 err = -ENAMETOOLONG;
7699 goto err_unpin;
7700 }
7701
7702 err = bpf_program__pin_instance(prog, buf, i);
7703 if (err)
7704 goto err_unpin;
7705 }
7706
7707 return 0;
7708
7709err_unpin:
7710 for (i = i - 1; i >= 0; i--) {
7711 char buf[PATH_MAX];
7712 int len;
7713
7714 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7715 if (len < 0)
7716 continue;
7717 else if (len >= PATH_MAX)
7718 continue;
7719
7720 bpf_program__unpin_instance(prog, buf, i);
7721 }
7722
7723 rmdir(path);
7724
7725 return err;
7726}
7727
7728int bpf_program__unpin(struct bpf_program *prog, const char *path)
7729{
7730 int i, err;
7731
7732 err = check_path(path);
7733 if (err)
7734 return err;
7735
7736 if (prog == NULL) {
7737 pr_warn("invalid program pointer\n");
7738 return -EINVAL;
7739 }
7740
7741 if (prog->instances.nr <= 0) {
7742 pr_warn("no instances of prog %s to pin\n", prog->name);
7743 return -EINVAL;
7744 }
7745
7746 if (prog->instances.nr == 1) {
7747
7748 return bpf_program__unpin_instance(prog, path, 0);
7749 }
7750
7751 for (i = 0; i < prog->instances.nr; i++) {
7752 char buf[PATH_MAX];
7753 int len;
7754
7755 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7756 if (len < 0)
7757 return -EINVAL;
7758 else if (len >= PATH_MAX)
7759 return -ENAMETOOLONG;
7760
7761 err = bpf_program__unpin_instance(prog, buf, i);
7762 if (err)
7763 return err;
7764 }
7765
7766 err = rmdir(path);
7767 if (err)
7768 return -errno;
7769
7770 return 0;
7771}
7772
7773int bpf_map__pin(struct bpf_map *map, const char *path)
7774{
7775 char *cp, errmsg[STRERR_BUFSIZE];
7776 int err;
7777
7778 if (map == NULL) {
7779 pr_warn("invalid map pointer\n");
7780 return -EINVAL;
7781 }
7782
7783 if (map->pin_path) {
7784 if (path && strcmp(path, map->pin_path)) {
7785 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7786 bpf_map__name(map), map->pin_path, path);
7787 return -EINVAL;
7788 } else if (map->pinned) {
7789 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7790 bpf_map__name(map), map->pin_path);
7791 return 0;
7792 }
7793 } else {
7794 if (!path) {
7795 pr_warn("missing a path to pin map '%s' at\n",
7796 bpf_map__name(map));
7797 return -EINVAL;
7798 } else if (map->pinned) {
7799 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7800 return -EEXIST;
7801 }
7802
7803 map->pin_path = strdup(path);
7804 if (!map->pin_path) {
7805 err = -errno;
7806 goto out_err;
7807 }
7808 }
7809
7810 err = make_parent_dir(map->pin_path);
7811 if (err)
7812 return err;
7813
7814 err = check_path(map->pin_path);
7815 if (err)
7816 return err;
7817
7818 if (bpf_obj_pin(map->fd, map->pin_path)) {
7819 err = -errno;
7820 goto out_err;
7821 }
7822
7823 map->pinned = true;
7824 pr_debug("pinned map '%s'\n", map->pin_path);
7825
7826 return 0;
7827
7828out_err:
7829 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7830 pr_warn("failed to pin map: %s\n", cp);
7831 return err;
7832}
7833
7834int bpf_map__unpin(struct bpf_map *map, const char *path)
7835{
7836 int err;
7837
7838 if (map == NULL) {
7839 pr_warn("invalid map pointer\n");
7840 return -EINVAL;
7841 }
7842
7843 if (map->pin_path) {
7844 if (path && strcmp(path, map->pin_path)) {
7845 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7846 bpf_map__name(map), map->pin_path, path);
7847 return -EINVAL;
7848 }
7849 path = map->pin_path;
7850 } else if (!path) {
7851 pr_warn("no path to unpin map '%s' from\n",
7852 bpf_map__name(map));
7853 return -EINVAL;
7854 }
7855
7856 err = check_path(path);
7857 if (err)
7858 return err;
7859
7860 err = unlink(path);
7861 if (err != 0)
7862 return -errno;
7863
7864 map->pinned = false;
7865 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
7866
7867 return 0;
7868}
7869
7870int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
7871{
7872 char *new = NULL;
7873
7874 if (path) {
7875 new = strdup(path);
7876 if (!new)
7877 return -errno;
7878 }
7879
7880 free(map->pin_path);
7881 map->pin_path = new;
7882 return 0;
7883}
7884
7885const char *bpf_map__get_pin_path(const struct bpf_map *map)
7886{
7887 return map->pin_path;
7888}
7889
7890bool bpf_map__is_pinned(const struct bpf_map *map)
7891{
7892 return map->pinned;
7893}
7894
7895static void sanitize_pin_path(char *s)
7896{
7897
7898 while (*s) {
7899 if (*s == '.')
7900 *s = '_';
7901 s++;
7902 }
7903}
7904
7905int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
7906{
7907 struct bpf_map *map;
7908 int err;
7909
7910 if (!obj)
7911 return -ENOENT;
7912
7913 if (!obj->loaded) {
7914 pr_warn("object not yet loaded; load it first\n");
7915 return -ENOENT;
7916 }
7917
7918 bpf_object__for_each_map(map, obj) {
7919 char *pin_path = NULL;
7920 char buf[PATH_MAX];
7921
7922 if (path) {
7923 int len;
7924
7925 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7926 bpf_map__name(map));
7927 if (len < 0) {
7928 err = -EINVAL;
7929 goto err_unpin_maps;
7930 } else if (len >= PATH_MAX) {
7931 err = -ENAMETOOLONG;
7932 goto err_unpin_maps;
7933 }
7934 sanitize_pin_path(buf);
7935 pin_path = buf;
7936 } else if (!map->pin_path) {
7937 continue;
7938 }
7939
7940 err = bpf_map__pin(map, pin_path);
7941 if (err)
7942 goto err_unpin_maps;
7943 }
7944
7945 return 0;
7946
7947err_unpin_maps:
7948 while ((map = bpf_map__prev(map, obj))) {
7949 if (!map->pin_path)
7950 continue;
7951
7952 bpf_map__unpin(map, NULL);
7953 }
7954
7955 return err;
7956}
7957
7958int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
7959{
7960 struct bpf_map *map;
7961 int err;
7962
7963 if (!obj)
7964 return -ENOENT;
7965
7966 bpf_object__for_each_map(map, obj) {
7967 char *pin_path = NULL;
7968 char buf[PATH_MAX];
7969
7970 if (path) {
7971 int len;
7972
7973 len = snprintf(buf, PATH_MAX, "%s/%s", path,
7974 bpf_map__name(map));
7975 if (len < 0)
7976 return -EINVAL;
7977 else if (len >= PATH_MAX)
7978 return -ENAMETOOLONG;
7979 sanitize_pin_path(buf);
7980 pin_path = buf;
7981 } else if (!map->pin_path) {
7982 continue;
7983 }
7984
7985 err = bpf_map__unpin(map, pin_path);
7986 if (err)
7987 return err;
7988 }
7989
7990 return 0;
7991}
7992
7993int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
7994{
7995 struct bpf_program *prog;
7996 int err;
7997
7998 if (!obj)
7999 return -ENOENT;
8000
8001 if (!obj->loaded) {
8002 pr_warn("object not yet loaded; load it first\n");
8003 return -ENOENT;
8004 }
8005
8006 bpf_object__for_each_program(prog, obj) {
8007 char buf[PATH_MAX];
8008 int len;
8009
8010 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8011 prog->pin_name);
8012 if (len < 0) {
8013 err = -EINVAL;
8014 goto err_unpin_programs;
8015 } else if (len >= PATH_MAX) {
8016 err = -ENAMETOOLONG;
8017 goto err_unpin_programs;
8018 }
8019
8020 err = bpf_program__pin(prog, buf);
8021 if (err)
8022 goto err_unpin_programs;
8023 }
8024
8025 return 0;
8026
8027err_unpin_programs:
8028 while ((prog = bpf_program__prev(prog, obj))) {
8029 char buf[PATH_MAX];
8030 int len;
8031
8032 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8033 prog->pin_name);
8034 if (len < 0)
8035 continue;
8036 else if (len >= PATH_MAX)
8037 continue;
8038
8039 bpf_program__unpin(prog, buf);
8040 }
8041
8042 return err;
8043}
8044
8045int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8046{
8047 struct bpf_program *prog;
8048 int err;
8049
8050 if (!obj)
8051 return -ENOENT;
8052
8053 bpf_object__for_each_program(prog, obj) {
8054 char buf[PATH_MAX];
8055 int len;
8056
8057 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8058 prog->pin_name);
8059 if (len < 0)
8060 return -EINVAL;
8061 else if (len >= PATH_MAX)
8062 return -ENAMETOOLONG;
8063
8064 err = bpf_program__unpin(prog, buf);
8065 if (err)
8066 return err;
8067 }
8068
8069 return 0;
8070}
8071
8072int bpf_object__pin(struct bpf_object *obj, const char *path)
8073{
8074 int err;
8075
8076 err = bpf_object__pin_maps(obj, path);
8077 if (err)
8078 return err;
8079
8080 err = bpf_object__pin_programs(obj, path);
8081 if (err) {
8082 bpf_object__unpin_maps(obj, path);
8083 return err;
8084 }
8085
8086 return 0;
8087}
8088
8089static void bpf_map__destroy(struct bpf_map *map)
8090{
8091 if (map->clear_priv)
8092 map->clear_priv(map, map->priv);
8093 map->priv = NULL;
8094 map->clear_priv = NULL;
8095
8096 if (map->inner_map) {
8097 bpf_map__destroy(map->inner_map);
8098 zfree(&map->inner_map);
8099 }
8100
8101 zfree(&map->init_slots);
8102 map->init_slots_sz = 0;
8103
8104 if (map->mmaped) {
8105 munmap(map->mmaped, bpf_map_mmap_sz(map));
8106 map->mmaped = NULL;
8107 }
8108
8109 if (map->st_ops) {
8110 zfree(&map->st_ops->data);
8111 zfree(&map->st_ops->progs);
8112 zfree(&map->st_ops->kern_func_off);
8113 zfree(&map->st_ops);
8114 }
8115
8116 zfree(&map->name);
8117 zfree(&map->pin_path);
8118
8119 if (map->fd >= 0)
8120 zclose(map->fd);
8121}
8122
8123void bpf_object__close(struct bpf_object *obj)
8124{
8125 size_t i;
8126
8127 if (IS_ERR_OR_NULL(obj))
8128 return;
8129
8130 if (obj->clear_priv)
8131 obj->clear_priv(obj, obj->priv);
8132
8133 bpf_object__elf_finish(obj);
8134 bpf_object__unload(obj);
8135 btf__free(obj->btf);
8136 btf_ext__free(obj->btf_ext);
8137
8138 for (i = 0; i < obj->nr_maps; i++)
8139 bpf_map__destroy(&obj->maps[i]);
8140
8141 zfree(&obj->kconfig);
8142 zfree(&obj->externs);
8143 obj->nr_extern = 0;
8144
8145 zfree(&obj->maps);
8146 obj->nr_maps = 0;
8147
8148 if (obj->programs && obj->nr_programs) {
8149 for (i = 0; i < obj->nr_programs; i++)
8150 bpf_program__exit(&obj->programs[i]);
8151 }
8152 zfree(&obj->programs);
8153
8154 list_del(&obj->list);
8155 free(obj);
8156}
8157
8158struct bpf_object *
8159bpf_object__next(struct bpf_object *prev)
8160{
8161 struct bpf_object *next;
8162
8163 if (!prev)
8164 next = list_first_entry(&bpf_objects_list,
8165 struct bpf_object,
8166 list);
8167 else
8168 next = list_next_entry(prev, list);
8169
8170
8171 if (&next->list == &bpf_objects_list)
8172 return NULL;
8173
8174 return next;
8175}
8176
8177const char *bpf_object__name(const struct bpf_object *obj)
8178{
8179 return obj ? obj->name : ERR_PTR(-EINVAL);
8180}
8181
8182unsigned int bpf_object__kversion(const struct bpf_object *obj)
8183{
8184 return obj ? obj->kern_version : 0;
8185}
8186
8187struct btf *bpf_object__btf(const struct bpf_object *obj)
8188{
8189 return obj ? obj->btf : NULL;
8190}
8191
8192int bpf_object__btf_fd(const struct bpf_object *obj)
8193{
8194 return obj->btf ? btf__fd(obj->btf) : -1;
8195}
8196
8197int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8198 bpf_object_clear_priv_t clear_priv)
8199{
8200 if (obj->priv && obj->clear_priv)
8201 obj->clear_priv(obj, obj->priv);
8202
8203 obj->priv = priv;
8204 obj->clear_priv = clear_priv;
8205 return 0;
8206}
8207
8208void *bpf_object__priv(const struct bpf_object *obj)
8209{
8210 return obj ? obj->priv : ERR_PTR(-EINVAL);
8211}
8212
8213static struct bpf_program *
8214__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8215 bool forward)
8216{
8217 size_t nr_programs = obj->nr_programs;
8218 ssize_t idx;
8219
8220 if (!nr_programs)
8221 return NULL;
8222
8223 if (!p)
8224
8225 return forward ? &obj->programs[0] :
8226 &obj->programs[nr_programs - 1];
8227
8228 if (p->obj != obj) {
8229 pr_warn("error: program handler doesn't match object\n");
8230 return NULL;
8231 }
8232
8233 idx = (p - obj->programs) + (forward ? 1 : -1);
8234 if (idx >= obj->nr_programs || idx < 0)
8235 return NULL;
8236 return &obj->programs[idx];
8237}
8238
8239struct bpf_program *
8240bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8241{
8242 struct bpf_program *prog = prev;
8243
8244 do {
8245 prog = __bpf_program__iter(prog, obj, true);
8246 } while (prog && prog_is_subprog(obj, prog));
8247
8248 return prog;
8249}
8250
8251struct bpf_program *
8252bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8253{
8254 struct bpf_program *prog = next;
8255
8256 do {
8257 prog = __bpf_program__iter(prog, obj, false);
8258 } while (prog && prog_is_subprog(obj, prog));
8259
8260 return prog;
8261}
8262
8263int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8264 bpf_program_clear_priv_t clear_priv)
8265{
8266 if (prog->priv && prog->clear_priv)
8267 prog->clear_priv(prog, prog->priv);
8268
8269 prog->priv = priv;
8270 prog->clear_priv = clear_priv;
8271 return 0;
8272}
8273
8274void *bpf_program__priv(const struct bpf_program *prog)
8275{
8276 return prog ? prog->priv : ERR_PTR(-EINVAL);
8277}
8278
8279void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8280{
8281 prog->prog_ifindex = ifindex;
8282}
8283
8284const char *bpf_program__name(const struct bpf_program *prog)
8285{
8286 return prog->name;
8287}
8288
8289const char *bpf_program__section_name(const struct bpf_program *prog)
8290{
8291 return prog->sec_name;
8292}
8293
8294const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8295{
8296 const char *title;
8297
8298 title = prog->sec_name;
8299 if (needs_copy) {
8300 title = strdup(title);
8301 if (!title) {
8302 pr_warn("failed to strdup program title\n");
8303 return ERR_PTR(-ENOMEM);
8304 }
8305 }
8306
8307 return title;
8308}
8309
8310bool bpf_program__autoload(const struct bpf_program *prog)
8311{
8312 return prog->load;
8313}
8314
8315int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8316{
8317 if (prog->obj->loaded)
8318 return -EINVAL;
8319
8320 prog->load = autoload;
8321 return 0;
8322}
8323
8324int bpf_program__fd(const struct bpf_program *prog)
8325{
8326 return bpf_program__nth_fd(prog, 0);
8327}
8328
8329size_t bpf_program__size(const struct bpf_program *prog)
8330{
8331 return prog->insns_cnt * BPF_INSN_SZ;
8332}
8333
8334int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8335 bpf_program_prep_t prep)
8336{
8337 int *instances_fds;
8338
8339 if (nr_instances <= 0 || !prep)
8340 return -EINVAL;
8341
8342 if (prog->instances.nr > 0 || prog->instances.fds) {
8343 pr_warn("Can't set pre-processor after loading\n");
8344 return -EINVAL;
8345 }
8346
8347 instances_fds = malloc(sizeof(int) * nr_instances);
8348 if (!instances_fds) {
8349 pr_warn("alloc memory failed for fds\n");
8350 return -ENOMEM;
8351 }
8352
8353
8354 memset(instances_fds, -1, sizeof(int) * nr_instances);
8355
8356 prog->instances.nr = nr_instances;
8357 prog->instances.fds = instances_fds;
8358 prog->preprocessor = prep;
8359 return 0;
8360}
8361
8362int bpf_program__nth_fd(const struct bpf_program *prog, int n)
8363{
8364 int fd;
8365
8366 if (!prog)
8367 return -EINVAL;
8368
8369 if (n >= prog->instances.nr || n < 0) {
8370 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8371 n, prog->name, prog->instances.nr);
8372 return -EINVAL;
8373 }
8374
8375 fd = prog->instances.fds[n];
8376 if (fd < 0) {
8377 pr_warn("%dth instance of program '%s' is invalid\n",
8378 n, prog->name);
8379 return -ENOENT;
8380 }
8381
8382 return fd;
8383}
8384
8385enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
8386{
8387 return prog->type;
8388}
8389
8390void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8391{
8392 prog->type = type;
8393}
8394
8395static bool bpf_program__is_type(const struct bpf_program *prog,
8396 enum bpf_prog_type type)
8397{
8398 return prog ? (prog->type == type) : false;
8399}
8400
8401#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
8402int bpf_program__set_##NAME(struct bpf_program *prog) \
8403{ \
8404 if (!prog) \
8405 return -EINVAL; \
8406 bpf_program__set_type(prog, TYPE); \
8407 return 0; \
8408} \
8409 \
8410bool bpf_program__is_##NAME(const struct bpf_program *prog) \
8411{ \
8412 return bpf_program__is_type(prog, TYPE); \
8413} \
8414
8415BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
8416BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
8417BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
8418BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8419BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
8420BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
8421BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
8422BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8423BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
8424BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
8425BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
8426BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
8427BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
8428
8429enum bpf_attach_type
8430bpf_program__get_expected_attach_type(struct bpf_program *prog)
8431{
8432 return prog->expected_attach_type;
8433}
8434
8435void bpf_program__set_expected_attach_type(struct bpf_program *prog,
8436 enum bpf_attach_type type)
8437{
8438 prog->expected_attach_type = type;
8439}
8440
8441#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \
8442 attachable, attach_btf) \
8443 { \
8444 .sec = string, \
8445 .len = sizeof(string) - 1, \
8446 .prog_type = ptype, \
8447 .expected_attach_type = eatype, \
8448 .is_exp_attach_type_optional = eatype_optional, \
8449 .is_attachable = attachable, \
8450 .is_attach_btf = attach_btf, \
8451 }
8452
8453
8454#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
8455
8456
8457#define BPF_APROG_SEC(string, ptype, atype) \
8458 BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
8459
8460
8461#define BPF_EAPROG_SEC(string, ptype, eatype) \
8462 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
8463
8464
8465#define BPF_PROG_BTF(string, ptype, eatype) \
8466 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
8467
8468
8469
8470
8471#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
8472
8473#define SEC_DEF(sec_pfx, ptype, ...) { \
8474 .sec = sec_pfx, \
8475 .len = sizeof(sec_pfx) - 1, \
8476 .prog_type = BPF_PROG_TYPE_##ptype, \
8477 __VA_ARGS__ \
8478}
8479
8480static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
8481 struct bpf_program *prog);
8482static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
8483 struct bpf_program *prog);
8484static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
8485 struct bpf_program *prog);
8486static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
8487 struct bpf_program *prog);
8488static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
8489 struct bpf_program *prog);
8490static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
8491 struct bpf_program *prog);
8492
8493static const struct bpf_sec_def section_defs[] = {
8494 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
8495 BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
8496 SEC_DEF("kprobe/", KPROBE,
8497 .attach_fn = attach_kprobe),
8498 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
8499 SEC_DEF("kretprobe/", KPROBE,
8500 .attach_fn = attach_kprobe),
8501 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
8502 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
8503 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
8504 SEC_DEF("tracepoint/", TRACEPOINT,
8505 .attach_fn = attach_tp),
8506 SEC_DEF("tp/", TRACEPOINT,
8507 .attach_fn = attach_tp),
8508 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
8509 .attach_fn = attach_raw_tp),
8510 SEC_DEF("raw_tp/", RAW_TRACEPOINT,
8511 .attach_fn = attach_raw_tp),
8512 SEC_DEF("tp_btf/", TRACING,
8513 .expected_attach_type = BPF_TRACE_RAW_TP,
8514 .is_attach_btf = true,
8515 .attach_fn = attach_trace),
8516 SEC_DEF("fentry/", TRACING,
8517 .expected_attach_type = BPF_TRACE_FENTRY,
8518 .is_attach_btf = true,
8519 .attach_fn = attach_trace),
8520 SEC_DEF("fmod_ret/", TRACING,
8521 .expected_attach_type = BPF_MODIFY_RETURN,
8522 .is_attach_btf = true,
8523 .attach_fn = attach_trace),
8524 SEC_DEF("fexit/", TRACING,
8525 .expected_attach_type = BPF_TRACE_FEXIT,
8526 .is_attach_btf = true,
8527 .attach_fn = attach_trace),
8528 SEC_DEF("fentry.s/", TRACING,
8529 .expected_attach_type = BPF_TRACE_FENTRY,
8530 .is_attach_btf = true,
8531 .is_sleepable = true,
8532 .attach_fn = attach_trace),
8533 SEC_DEF("fmod_ret.s/", TRACING,
8534 .expected_attach_type = BPF_MODIFY_RETURN,
8535 .is_attach_btf = true,
8536 .is_sleepable = true,
8537 .attach_fn = attach_trace),
8538 SEC_DEF("fexit.s/", TRACING,
8539 .expected_attach_type = BPF_TRACE_FEXIT,
8540 .is_attach_btf = true,
8541 .is_sleepable = true,
8542 .attach_fn = attach_trace),
8543 SEC_DEF("freplace/", EXT,
8544 .is_attach_btf = true,
8545 .attach_fn = attach_trace),
8546 SEC_DEF("lsm/", LSM,
8547 .is_attach_btf = true,
8548 .expected_attach_type = BPF_LSM_MAC,
8549 .attach_fn = attach_lsm),
8550 SEC_DEF("lsm.s/", LSM,
8551 .is_attach_btf = true,
8552 .is_sleepable = true,
8553 .expected_attach_type = BPF_LSM_MAC,
8554 .attach_fn = attach_lsm),
8555 SEC_DEF("iter/", TRACING,
8556 .expected_attach_type = BPF_TRACE_ITER,
8557 .is_attach_btf = true,
8558 .attach_fn = attach_iter),
8559 BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
8560 BPF_XDP_DEVMAP),
8561 BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
8562 BPF_XDP_CPUMAP),
8563 BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP,
8564 BPF_XDP),
8565 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
8566 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
8567 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
8568 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
8569 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
8570 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
8571 BPF_CGROUP_INET_INGRESS),
8572 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
8573 BPF_CGROUP_INET_EGRESS),
8574 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
8575 BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK,
8576 BPF_CGROUP_INET_SOCK_CREATE),
8577 BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK,
8578 BPF_CGROUP_INET_SOCK_RELEASE),
8579 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
8580 BPF_CGROUP_INET_SOCK_CREATE),
8581 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
8582 BPF_CGROUP_INET4_POST_BIND),
8583 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
8584 BPF_CGROUP_INET6_POST_BIND),
8585 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
8586 BPF_CGROUP_DEVICE),
8587 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
8588 BPF_CGROUP_SOCK_OPS),
8589 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
8590 BPF_SK_SKB_STREAM_PARSER),
8591 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
8592 BPF_SK_SKB_STREAM_VERDICT),
8593 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
8594 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
8595 BPF_SK_MSG_VERDICT),
8596 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
8597 BPF_LIRC_MODE2),
8598 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
8599 BPF_FLOW_DISSECTOR),
8600 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8601 BPF_CGROUP_INET4_BIND),
8602 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8603 BPF_CGROUP_INET6_BIND),
8604 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8605 BPF_CGROUP_INET4_CONNECT),
8606 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8607 BPF_CGROUP_INET6_CONNECT),
8608 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8609 BPF_CGROUP_UDP4_SENDMSG),
8610 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8611 BPF_CGROUP_UDP6_SENDMSG),
8612 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8613 BPF_CGROUP_UDP4_RECVMSG),
8614 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8615 BPF_CGROUP_UDP6_RECVMSG),
8616 BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8617 BPF_CGROUP_INET4_GETPEERNAME),
8618 BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8619 BPF_CGROUP_INET6_GETPEERNAME),
8620 BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8621 BPF_CGROUP_INET4_GETSOCKNAME),
8622 BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8623 BPF_CGROUP_INET6_GETSOCKNAME),
8624 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
8625 BPF_CGROUP_SYSCTL),
8626 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8627 BPF_CGROUP_GETSOCKOPT),
8628 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8629 BPF_CGROUP_SETSOCKOPT),
8630 BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
8631 BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP,
8632 BPF_SK_LOOKUP),
8633};
8634
8635#undef BPF_PROG_SEC_IMPL
8636#undef BPF_PROG_SEC
8637#undef BPF_APROG_SEC
8638#undef BPF_EAPROG_SEC
8639#undef BPF_APROG_COMPAT
8640#undef SEC_DEF
8641
8642#define MAX_TYPE_NAME_SIZE 32
8643
8644static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8645{
8646 int i, n = ARRAY_SIZE(section_defs);
8647
8648 for (i = 0; i < n; i++) {
8649 if (strncmp(sec_name,
8650 section_defs[i].sec, section_defs[i].len))
8651 continue;
8652 return §ion_defs[i];
8653 }
8654 return NULL;
8655}
8656
8657static char *libbpf_get_type_names(bool attach_type)
8658{
8659 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
8660 char *buf;
8661
8662 buf = malloc(len);
8663 if (!buf)
8664 return NULL;
8665
8666 buf[0] = '\0';
8667
8668 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8669 if (attach_type && !section_defs[i].is_attachable)
8670 continue;
8671
8672 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
8673 free(buf);
8674 return NULL;
8675 }
8676 strcat(buf, " ");
8677 strcat(buf, section_defs[i].sec);
8678 }
8679
8680 return buf;
8681}
8682
8683int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8684 enum bpf_attach_type *expected_attach_type)
8685{
8686 const struct bpf_sec_def *sec_def;
8687 char *type_names;
8688
8689 if (!name)
8690 return -EINVAL;
8691
8692 sec_def = find_sec_def(name);
8693 if (sec_def) {
8694 *prog_type = sec_def->prog_type;
8695 *expected_attach_type = sec_def->expected_attach_type;
8696 return 0;
8697 }
8698
8699 pr_debug("failed to guess program type from ELF section '%s'\n", name);
8700 type_names = libbpf_get_type_names(false);
8701 if (type_names != NULL) {
8702 pr_debug("supported section(type) names are:%s\n", type_names);
8703 free(type_names);
8704 }
8705
8706 return -ESRCH;
8707}
8708
8709static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8710 size_t offset)
8711{
8712 struct bpf_map *map;
8713 size_t i;
8714
8715 for (i = 0; i < obj->nr_maps; i++) {
8716 map = &obj->maps[i];
8717 if (!bpf_map__is_struct_ops(map))
8718 continue;
8719 if (map->sec_offset <= offset &&
8720 offset - map->sec_offset < map->def.value_size)
8721 return map;
8722 }
8723
8724 return NULL;
8725}
8726
8727
8728static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8729 GElf_Shdr *shdr, Elf_Data *data)
8730{
8731 const struct btf_member *member;
8732 struct bpf_struct_ops *st_ops;
8733 struct bpf_program *prog;
8734 unsigned int shdr_idx;
8735 const struct btf *btf;
8736 struct bpf_map *map;
8737 Elf_Data *symbols;
8738 unsigned int moff, insn_idx;
8739 const char *name;
8740 __u32 member_idx;
8741 GElf_Sym sym;
8742 GElf_Rel rel;
8743 int i, nrels;
8744
8745 symbols = obj->efile.symbols;
8746 btf = obj->btf;
8747 nrels = shdr->sh_size / shdr->sh_entsize;
8748 for (i = 0; i < nrels; i++) {
8749 if (!gelf_getrel(data, i, &rel)) {
8750 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8751 return -LIBBPF_ERRNO__FORMAT;
8752 }
8753
8754 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
8755 pr_warn("struct_ops reloc: symbol %zx not found\n",
8756 (size_t)GELF_R_SYM(rel.r_info));
8757 return -LIBBPF_ERRNO__FORMAT;
8758 }
8759
8760 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
8761 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
8762 if (!map) {
8763 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
8764 (size_t)rel.r_offset);
8765 return -EINVAL;
8766 }
8767
8768 moff = rel.r_offset - map->sec_offset;
8769 shdr_idx = sym.st_shndx;
8770 st_ops = map->st_ops;
8771 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
8772 map->name,
8773 (long long)(rel.r_info >> 32),
8774 (long long)sym.st_value,
8775 shdr_idx, (size_t)rel.r_offset,
8776 map->sec_offset, sym.st_name, name);
8777
8778 if (shdr_idx >= SHN_LORESERVE) {
8779 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
8780 map->name, (size_t)rel.r_offset, shdr_idx);
8781 return -LIBBPF_ERRNO__RELOC;
8782 }
8783 if (sym.st_value % BPF_INSN_SZ) {
8784 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
8785 map->name, (unsigned long long)sym.st_value);
8786 return -LIBBPF_ERRNO__FORMAT;
8787 }
8788 insn_idx = sym.st_value / BPF_INSN_SZ;
8789
8790 member = find_member_by_offset(st_ops->type, moff * 8);
8791 if (!member) {
8792 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
8793 map->name, moff);
8794 return -EINVAL;
8795 }
8796 member_idx = member - btf_members(st_ops->type);
8797 name = btf__name_by_offset(btf, member->name_off);
8798
8799 if (!resolve_func_ptr(btf, member->type, NULL)) {
8800 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
8801 map->name, name);
8802 return -EINVAL;
8803 }
8804
8805 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
8806 if (!prog) {
8807 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
8808 map->name, shdr_idx, name);
8809 return -EINVAL;
8810 }
8811
8812 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
8813 const struct bpf_sec_def *sec_def;
8814
8815 sec_def = find_sec_def(prog->sec_name);
8816 if (sec_def &&
8817 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
8818
8819 prog->type = sec_def->prog_type;
8820 goto invalid_prog;
8821 }
8822
8823 prog->type = BPF_PROG_TYPE_STRUCT_OPS;
8824 prog->attach_btf_id = st_ops->type_id;
8825 prog->expected_attach_type = member_idx;
8826 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
8827 prog->attach_btf_id != st_ops->type_id ||
8828 prog->expected_attach_type != member_idx) {
8829 goto invalid_prog;
8830 }
8831 st_ops->progs[member_idx] = prog;
8832 }
8833
8834 return 0;
8835
8836invalid_prog:
8837 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
8838 map->name, prog->name, prog->sec_name, prog->type,
8839 prog->attach_btf_id, prog->expected_attach_type, name);
8840 return -EINVAL;
8841}
8842
8843#define BTF_TRACE_PREFIX "btf_trace_"
8844#define BTF_LSM_PREFIX "bpf_lsm_"
8845#define BTF_ITER_PREFIX "bpf_iter_"
8846#define BTF_MAX_NAME_SIZE 128
8847
8848static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
8849 const char *name, __u32 kind)
8850{
8851 char btf_type_name[BTF_MAX_NAME_SIZE];
8852 int ret;
8853
8854 ret = snprintf(btf_type_name, sizeof(btf_type_name),
8855 "%s%s", prefix, name);
8856
8857
8858
8859
8860 if (ret < 0 || ret >= sizeof(btf_type_name))
8861 return -ENAMETOOLONG;
8862 return btf__find_by_name_kind(btf, btf_type_name, kind);
8863}
8864
8865static inline int find_attach_btf_id(struct btf *btf, const char *name,
8866 enum bpf_attach_type attach_type)
8867{
8868 int err;
8869
8870 if (attach_type == BPF_TRACE_RAW_TP)
8871 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
8872 BTF_KIND_TYPEDEF);
8873 else if (attach_type == BPF_LSM_MAC)
8874 err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
8875 BTF_KIND_FUNC);
8876 else if (attach_type == BPF_TRACE_ITER)
8877 err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
8878 BTF_KIND_FUNC);
8879 else
8880 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8881
8882 return err;
8883}
8884
8885int libbpf_find_vmlinux_btf_id(const char *name,
8886 enum bpf_attach_type attach_type)
8887{
8888 struct btf *btf;
8889 int err;
8890
8891 btf = libbpf_find_kernel_btf();
8892 if (IS_ERR(btf)) {
8893 pr_warn("vmlinux BTF is not found\n");
8894 return -EINVAL;
8895 }
8896
8897 err = find_attach_btf_id(btf, name, attach_type);
8898 if (err <= 0)
8899 pr_warn("%s is not found in vmlinux BTF\n", name);
8900
8901 btf__free(btf);
8902 return err;
8903}
8904
8905static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
8906{
8907 struct bpf_prog_info_linear *info_linear;
8908 struct bpf_prog_info *info;
8909 struct btf *btf = NULL;
8910 int err = -EINVAL;
8911
8912 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
8913 if (IS_ERR_OR_NULL(info_linear)) {
8914 pr_warn("failed get_prog_info_linear for FD %d\n",
8915 attach_prog_fd);
8916 return -EINVAL;
8917 }
8918 info = &info_linear->info;
8919 if (!info->btf_id) {
8920 pr_warn("The target program doesn't have BTF\n");
8921 goto out;
8922 }
8923 if (btf__get_from_id(info->btf_id, &btf)) {
8924 pr_warn("Failed to get BTF of the program\n");
8925 goto out;
8926 }
8927 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8928 btf__free(btf);
8929 if (err <= 0) {
8930 pr_warn("%s is not found in prog's BTF\n", name);
8931 goto out;
8932 }
8933out:
8934 free(info_linear);
8935 return err;
8936}
8937
8938static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
8939 enum bpf_attach_type attach_type,
8940 int *btf_obj_fd, int *btf_type_id)
8941{
8942 int ret, i;
8943
8944 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
8945 if (ret > 0) {
8946 *btf_obj_fd = 0;
8947 *btf_type_id = ret;
8948 return 0;
8949 }
8950 if (ret != -ENOENT)
8951 return ret;
8952
8953 ret = load_module_btfs(obj);
8954 if (ret)
8955 return ret;
8956
8957 for (i = 0; i < obj->btf_module_cnt; i++) {
8958 const struct module_btf *mod = &obj->btf_modules[i];
8959
8960 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
8961 if (ret > 0) {
8962 *btf_obj_fd = mod->fd;
8963 *btf_type_id = ret;
8964 return 0;
8965 }
8966 if (ret == -ENOENT)
8967 continue;
8968
8969 return ret;
8970 }
8971
8972 return -ESRCH;
8973}
8974
8975static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
8976{
8977 enum bpf_attach_type attach_type = prog->expected_attach_type;
8978 __u32 attach_prog_fd = prog->attach_prog_fd;
8979 const char *name = prog->sec_name, *attach_name;
8980 const struct bpf_sec_def *sec = NULL;
8981 int i, err;
8982
8983 if (!name)
8984 return -EINVAL;
8985
8986 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8987 if (!section_defs[i].is_attach_btf)
8988 continue;
8989 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
8990 continue;
8991
8992 sec = §ion_defs[i];
8993 break;
8994 }
8995
8996 if (!sec) {
8997 pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
8998 return -ESRCH;
8999 }
9000 attach_name = name + sec->len;
9001
9002
9003 if (attach_prog_fd) {
9004 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9005 if (err < 0) {
9006 pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9007 attach_prog_fd, attach_name, err);
9008 return err;
9009 }
9010 *btf_obj_fd = 0;
9011 *btf_type_id = err;
9012 return 0;
9013 }
9014
9015
9016 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9017 if (err) {
9018 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
9019 return err;
9020 }
9021 return 0;
9022}
9023
9024int libbpf_attach_type_by_name(const char *name,
9025 enum bpf_attach_type *attach_type)
9026{
9027 char *type_names;
9028 int i;
9029
9030 if (!name)
9031 return -EINVAL;
9032
9033 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9034 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
9035 continue;
9036 if (!section_defs[i].is_attachable)
9037 return -EINVAL;
9038 *attach_type = section_defs[i].expected_attach_type;
9039 return 0;
9040 }
9041 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9042 type_names = libbpf_get_type_names(true);
9043 if (type_names != NULL) {
9044 pr_debug("attachable section(type) names are:%s\n", type_names);
9045 free(type_names);
9046 }
9047
9048 return -EINVAL;
9049}
9050
9051int bpf_map__fd(const struct bpf_map *map)
9052{
9053 return map ? map->fd : -EINVAL;
9054}
9055
9056const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9057{
9058 return map ? &map->def : ERR_PTR(-EINVAL);
9059}
9060
9061const char *bpf_map__name(const struct bpf_map *map)
9062{
9063 return map ? map->name : NULL;
9064}
9065
9066enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9067{
9068 return map->def.type;
9069}
9070
9071int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9072{
9073 if (map->fd >= 0)
9074 return -EBUSY;
9075 map->def.type = type;
9076 return 0;
9077}
9078
9079__u32 bpf_map__map_flags(const struct bpf_map *map)
9080{
9081 return map->def.map_flags;
9082}
9083
9084int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9085{
9086 if (map->fd >= 0)
9087 return -EBUSY;
9088 map->def.map_flags = flags;
9089 return 0;
9090}
9091
9092__u32 bpf_map__numa_node(const struct bpf_map *map)
9093{
9094 return map->numa_node;
9095}
9096
9097int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9098{
9099 if (map->fd >= 0)
9100 return -EBUSY;
9101 map->numa_node = numa_node;
9102 return 0;
9103}
9104
9105__u32 bpf_map__key_size(const struct bpf_map *map)
9106{
9107 return map->def.key_size;
9108}
9109
9110int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9111{
9112 if (map->fd >= 0)
9113 return -EBUSY;
9114 map->def.key_size = size;
9115 return 0;
9116}
9117
9118__u32 bpf_map__value_size(const struct bpf_map *map)
9119{
9120 return map->def.value_size;
9121}
9122
9123int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9124{
9125 if (map->fd >= 0)
9126 return -EBUSY;
9127 map->def.value_size = size;
9128 return 0;
9129}
9130
9131__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9132{
9133 return map ? map->btf_key_type_id : 0;
9134}
9135
9136__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9137{
9138 return map ? map->btf_value_type_id : 0;
9139}
9140
9141int bpf_map__set_priv(struct bpf_map *map, void *priv,
9142 bpf_map_clear_priv_t clear_priv)
9143{
9144 if (!map)
9145 return -EINVAL;
9146
9147 if (map->priv) {
9148 if (map->clear_priv)
9149 map->clear_priv(map, map->priv);
9150 }
9151
9152 map->priv = priv;
9153 map->clear_priv = clear_priv;
9154 return 0;
9155}
9156
9157void *bpf_map__priv(const struct bpf_map *map)
9158{
9159 return map ? map->priv : ERR_PTR(-EINVAL);
9160}
9161
9162int bpf_map__set_initial_value(struct bpf_map *map,
9163 const void *data, size_t size)
9164{
9165 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9166 size != map->def.value_size || map->fd >= 0)
9167 return -EINVAL;
9168
9169 memcpy(map->mmaped, data, size);
9170 return 0;
9171}
9172
9173bool bpf_map__is_offload_neutral(const struct bpf_map *map)
9174{
9175 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9176}
9177
9178bool bpf_map__is_internal(const struct bpf_map *map)
9179{
9180 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9181}
9182
9183__u32 bpf_map__ifindex(const struct bpf_map *map)
9184{
9185 return map->map_ifindex;
9186}
9187
9188int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9189{
9190 if (map->fd >= 0)
9191 return -EBUSY;
9192 map->map_ifindex = ifindex;
9193 return 0;
9194}
9195
9196int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9197{
9198 if (!bpf_map_type__is_map_in_map(map->def.type)) {
9199 pr_warn("error: unsupported map type\n");
9200 return -EINVAL;
9201 }
9202 if (map->inner_map_fd != -1) {
9203 pr_warn("error: inner_map_fd already specified\n");
9204 return -EINVAL;
9205 }
9206 map->inner_map_fd = fd;
9207 return 0;
9208}
9209
9210static struct bpf_map *
9211__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9212{
9213 ssize_t idx;
9214 struct bpf_map *s, *e;
9215
9216 if (!obj || !obj->maps)
9217 return NULL;
9218
9219 s = obj->maps;
9220 e = obj->maps + obj->nr_maps;
9221
9222 if ((m < s) || (m >= e)) {
9223 pr_warn("error in %s: map handler doesn't belong to object\n",
9224 __func__);
9225 return NULL;
9226 }
9227
9228 idx = (m - obj->maps) + i;
9229 if (idx >= obj->nr_maps || idx < 0)
9230 return NULL;
9231 return &obj->maps[idx];
9232}
9233
9234struct bpf_map *
9235bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9236{
9237 if (prev == NULL)
9238 return obj->maps;
9239
9240 return __bpf_map__iter(prev, obj, 1);
9241}
9242
9243struct bpf_map *
9244bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9245{
9246 if (next == NULL) {
9247 if (!obj->nr_maps)
9248 return NULL;
9249 return obj->maps + obj->nr_maps - 1;
9250 }
9251
9252 return __bpf_map__iter(next, obj, -1);
9253}
9254
9255struct bpf_map *
9256bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9257{
9258 struct bpf_map *pos;
9259
9260 bpf_object__for_each_map(pos, obj) {
9261 if (pos->name && !strcmp(pos->name, name))
9262 return pos;
9263 }
9264 return NULL;
9265}
9266
9267int
9268bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9269{
9270 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9271}
9272
9273struct bpf_map *
9274bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9275{
9276 return ERR_PTR(-ENOTSUP);
9277}
9278
9279long libbpf_get_error(const void *ptr)
9280{
9281 return PTR_ERR_OR_ZERO(ptr);
9282}
9283
9284int bpf_prog_load(const char *file, enum bpf_prog_type type,
9285 struct bpf_object **pobj, int *prog_fd)
9286{
9287 struct bpf_prog_load_attr attr;
9288
9289 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
9290 attr.file = file;
9291 attr.prog_type = type;
9292 attr.expected_attach_type = 0;
9293
9294 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
9295}
9296
9297int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
9298 struct bpf_object **pobj, int *prog_fd)
9299{
9300 struct bpf_object_open_attr open_attr = {};
9301 struct bpf_program *prog, *first_prog = NULL;
9302 struct bpf_object *obj;
9303 struct bpf_map *map;
9304 int err;
9305
9306 if (!attr)
9307 return -EINVAL;
9308 if (!attr->file)
9309 return -EINVAL;
9310
9311 open_attr.file = attr->file;
9312 open_attr.prog_type = attr->prog_type;
9313
9314 obj = bpf_object__open_xattr(&open_attr);
9315 if (IS_ERR_OR_NULL(obj))
9316 return -ENOENT;
9317
9318 bpf_object__for_each_program(prog, obj) {
9319 enum bpf_attach_type attach_type = attr->expected_attach_type;
9320
9321
9322
9323
9324
9325 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9326 bpf_program__set_type(prog, attr->prog_type);
9327 bpf_program__set_expected_attach_type(prog,
9328 attach_type);
9329 }
9330 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
9331
9332
9333
9334
9335 bpf_object__close(obj);
9336 return -EINVAL;
9337 }
9338
9339 prog->prog_ifindex = attr->ifindex;
9340 prog->log_level = attr->log_level;
9341 prog->prog_flags |= attr->prog_flags;
9342 if (!first_prog)
9343 first_prog = prog;
9344 }
9345
9346 bpf_object__for_each_map(map, obj) {
9347 if (!bpf_map__is_offload_neutral(map))
9348 map->map_ifindex = attr->ifindex;
9349 }
9350
9351 if (!first_prog) {
9352 pr_warn("object file doesn't contain bpf program\n");
9353 bpf_object__close(obj);
9354 return -ENOENT;
9355 }
9356
9357 err = bpf_object__load(obj);
9358 if (err) {
9359 bpf_object__close(obj);
9360 return err;
9361 }
9362
9363 *pobj = obj;
9364 *prog_fd = bpf_program__fd(first_prog);
9365 return 0;
9366}
9367
9368struct bpf_link {
9369 int (*detach)(struct bpf_link *link);
9370 int (*destroy)(struct bpf_link *link);
9371 char *pin_path;
9372 int fd;
9373 bool disconnected;
9374};
9375
9376
9377int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9378{
9379 return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9380}
9381
9382
9383
9384
9385
9386
9387
9388
9389
9390
9391
9392void bpf_link__disconnect(struct bpf_link *link)
9393{
9394 link->disconnected = true;
9395}
9396
9397int bpf_link__destroy(struct bpf_link *link)
9398{
9399 int err = 0;
9400
9401 if (IS_ERR_OR_NULL(link))
9402 return 0;
9403
9404 if (!link->disconnected && link->detach)
9405 err = link->detach(link);
9406 if (link->destroy)
9407 link->destroy(link);
9408 if (link->pin_path)
9409 free(link->pin_path);
9410 free(link);
9411
9412 return err;
9413}
9414
9415int bpf_link__fd(const struct bpf_link *link)
9416{
9417 return link->fd;
9418}
9419
9420const char *bpf_link__pin_path(const struct bpf_link *link)
9421{
9422 return link->pin_path;
9423}
9424
9425static int bpf_link__detach_fd(struct bpf_link *link)
9426{
9427 return close(link->fd);
9428}
9429
9430struct bpf_link *bpf_link__open(const char *path)
9431{
9432 struct bpf_link *link;
9433 int fd;
9434
9435 fd = bpf_obj_get(path);
9436 if (fd < 0) {
9437 fd = -errno;
9438 pr_warn("failed to open link at %s: %d\n", path, fd);
9439 return ERR_PTR(fd);
9440 }
9441
9442 link = calloc(1, sizeof(*link));
9443 if (!link) {
9444 close(fd);
9445 return ERR_PTR(-ENOMEM);
9446 }
9447 link->detach = &bpf_link__detach_fd;
9448 link->fd = fd;
9449
9450 link->pin_path = strdup(path);
9451 if (!link->pin_path) {
9452 bpf_link__destroy(link);
9453 return ERR_PTR(-ENOMEM);
9454 }
9455
9456 return link;
9457}
9458
9459int bpf_link__detach(struct bpf_link *link)
9460{
9461 return bpf_link_detach(link->fd) ? -errno : 0;
9462}
9463
9464int bpf_link__pin(struct bpf_link *link, const char *path)
9465{
9466 int err;
9467
9468 if (link->pin_path)
9469 return -EBUSY;
9470 err = make_parent_dir(path);
9471 if (err)
9472 return err;
9473 err = check_path(path);
9474 if (err)
9475 return err;
9476
9477 link->pin_path = strdup(path);
9478 if (!link->pin_path)
9479 return -ENOMEM;
9480
9481 if (bpf_obj_pin(link->fd, link->pin_path)) {
9482 err = -errno;
9483 zfree(&link->pin_path);
9484 return err;
9485 }
9486
9487 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9488 return 0;
9489}
9490
9491int bpf_link__unpin(struct bpf_link *link)
9492{
9493 int err;
9494
9495 if (!link->pin_path)
9496 return -EINVAL;
9497
9498 err = unlink(link->pin_path);
9499 if (err != 0)
9500 return -errno;
9501
9502 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9503 zfree(&link->pin_path);
9504 return 0;
9505}
9506
9507static int bpf_link__detach_perf_event(struct bpf_link *link)
9508{
9509 int err;
9510
9511 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
9512 if (err)
9513 err = -errno;
9514
9515 close(link->fd);
9516 return err;
9517}
9518
9519struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
9520 int pfd)
9521{
9522 char errmsg[STRERR_BUFSIZE];
9523 struct bpf_link *link;
9524 int prog_fd, err;
9525
9526 if (pfd < 0) {
9527 pr_warn("prog '%s': invalid perf event FD %d\n",
9528 prog->name, pfd);
9529 return ERR_PTR(-EINVAL);
9530 }
9531 prog_fd = bpf_program__fd(prog);
9532 if (prog_fd < 0) {
9533 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9534 prog->name);
9535 return ERR_PTR(-EINVAL);
9536 }
9537
9538 link = calloc(1, sizeof(*link));
9539 if (!link)
9540 return ERR_PTR(-ENOMEM);
9541 link->detach = &bpf_link__detach_perf_event;
9542 link->fd = pfd;
9543
9544 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9545 err = -errno;
9546 free(link);
9547 pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
9548 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9549 if (err == -EPROTO)
9550 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9551 prog->name, pfd);
9552 return ERR_PTR(err);
9553 }
9554 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9555 err = -errno;
9556 free(link);
9557 pr_warn("prog '%s': failed to enable pfd %d: %s\n",
9558 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9559 return ERR_PTR(err);
9560 }
9561 return link;
9562}
9563
9564
9565
9566
9567
9568
9569static int parse_uint_from_file(const char *file, const char *fmt)
9570{
9571 char buf[STRERR_BUFSIZE];
9572 int err, ret;
9573 FILE *f;
9574
9575 f = fopen(file, "r");
9576 if (!f) {
9577 err = -errno;
9578 pr_debug("failed to open '%s': %s\n", file,
9579 libbpf_strerror_r(err, buf, sizeof(buf)));
9580 return err;
9581 }
9582 err = fscanf(f, fmt, &ret);
9583 if (err != 1) {
9584 err = err == EOF ? -EIO : -errno;
9585 pr_debug("failed to parse '%s': %s\n", file,
9586 libbpf_strerror_r(err, buf, sizeof(buf)));
9587 fclose(f);
9588 return err;
9589 }
9590 fclose(f);
9591 return ret;
9592}
9593
9594static int determine_kprobe_perf_type(void)
9595{
9596 const char *file = "/sys/bus/event_source/devices/kprobe/type";
9597
9598 return parse_uint_from_file(file, "%d\n");
9599}
9600
9601static int determine_uprobe_perf_type(void)
9602{
9603 const char *file = "/sys/bus/event_source/devices/uprobe/type";
9604
9605 return parse_uint_from_file(file, "%d\n");
9606}
9607
9608static int determine_kprobe_retprobe_bit(void)
9609{
9610 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9611
9612 return parse_uint_from_file(file, "config:%d\n");
9613}
9614
9615static int determine_uprobe_retprobe_bit(void)
9616{
9617 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9618
9619 return parse_uint_from_file(file, "config:%d\n");
9620}
9621
9622static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9623 uint64_t offset, int pid)
9624{
9625 struct perf_event_attr attr = {};
9626 char errmsg[STRERR_BUFSIZE];
9627 int type, pfd, err;
9628
9629 type = uprobe ? determine_uprobe_perf_type()
9630 : determine_kprobe_perf_type();
9631 if (type < 0) {
9632 pr_warn("failed to determine %s perf type: %s\n",
9633 uprobe ? "uprobe" : "kprobe",
9634 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
9635 return type;
9636 }
9637 if (retprobe) {
9638 int bit = uprobe ? determine_uprobe_retprobe_bit()
9639 : determine_kprobe_retprobe_bit();
9640
9641 if (bit < 0) {
9642 pr_warn("failed to determine %s retprobe bit: %s\n",
9643 uprobe ? "uprobe" : "kprobe",
9644 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
9645 return bit;
9646 }
9647 attr.config |= 1 << bit;
9648 }
9649 attr.size = sizeof(attr);
9650 attr.type = type;
9651 attr.config1 = ptr_to_u64(name);
9652 attr.config2 = offset;
9653
9654
9655 pfd = syscall(__NR_perf_event_open, &attr,
9656 pid < 0 ? -1 : pid ,
9657 pid == -1 ? 0 : -1 ,
9658 -1 , PERF_FLAG_FD_CLOEXEC);
9659 if (pfd < 0) {
9660 err = -errno;
9661 pr_warn("%s perf_event_open() failed: %s\n",
9662 uprobe ? "uprobe" : "kprobe",
9663 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9664 return err;
9665 }
9666 return pfd;
9667}
9668
9669struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
9670 bool retprobe,
9671 const char *func_name)
9672{
9673 char errmsg[STRERR_BUFSIZE];
9674 struct bpf_link *link;
9675 int pfd, err;
9676
9677 pfd = perf_event_open_probe(false , retprobe, func_name,
9678 0 , -1 );
9679 if (pfd < 0) {
9680 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
9681 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9682 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9683 return ERR_PTR(pfd);
9684 }
9685 link = bpf_program__attach_perf_event(prog, pfd);
9686 if (IS_ERR(link)) {
9687 close(pfd);
9688 err = PTR_ERR(link);
9689 pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
9690 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
9691 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9692 return link;
9693 }
9694 return link;
9695}
9696
9697static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
9698 struct bpf_program *prog)
9699{
9700 const char *func_name;
9701 bool retprobe;
9702
9703 func_name = prog->sec_name + sec->len;
9704 retprobe = strcmp(sec->sec, "kretprobe/") == 0;
9705
9706 return bpf_program__attach_kprobe(prog, retprobe, func_name);
9707}
9708
9709struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
9710 bool retprobe, pid_t pid,
9711 const char *binary_path,
9712 size_t func_offset)
9713{
9714 char errmsg[STRERR_BUFSIZE];
9715 struct bpf_link *link;
9716 int pfd, err;
9717
9718 pfd = perf_event_open_probe(true , retprobe,
9719 binary_path, func_offset, pid);
9720 if (pfd < 0) {
9721 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
9722 prog->name, retprobe ? "uretprobe" : "uprobe",
9723 binary_path, func_offset,
9724 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9725 return ERR_PTR(pfd);
9726 }
9727 link = bpf_program__attach_perf_event(prog, pfd);
9728 if (IS_ERR(link)) {
9729 close(pfd);
9730 err = PTR_ERR(link);
9731 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
9732 prog->name, retprobe ? "uretprobe" : "uprobe",
9733 binary_path, func_offset,
9734 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9735 return link;
9736 }
9737 return link;
9738}
9739
9740static int determine_tracepoint_id(const char *tp_category,
9741 const char *tp_name)
9742{
9743 char file[PATH_MAX];
9744 int ret;
9745
9746 ret = snprintf(file, sizeof(file),
9747 "/sys/kernel/debug/tracing/events/%s/%s/id",
9748 tp_category, tp_name);
9749 if (ret < 0)
9750 return -errno;
9751 if (ret >= sizeof(file)) {
9752 pr_debug("tracepoint %s/%s path is too long\n",
9753 tp_category, tp_name);
9754 return -E2BIG;
9755 }
9756 return parse_uint_from_file(file, "%d\n");
9757}
9758
9759static int perf_event_open_tracepoint(const char *tp_category,
9760 const char *tp_name)
9761{
9762 struct perf_event_attr attr = {};
9763 char errmsg[STRERR_BUFSIZE];
9764 int tp_id, pfd, err;
9765
9766 tp_id = determine_tracepoint_id(tp_category, tp_name);
9767 if (tp_id < 0) {
9768 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
9769 tp_category, tp_name,
9770 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
9771 return tp_id;
9772 }
9773
9774 attr.type = PERF_TYPE_TRACEPOINT;
9775 attr.size = sizeof(attr);
9776 attr.config = tp_id;
9777
9778 pfd = syscall(__NR_perf_event_open, &attr, -1 , 0 ,
9779 -1 , PERF_FLAG_FD_CLOEXEC);
9780 if (pfd < 0) {
9781 err = -errno;
9782 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
9783 tp_category, tp_name,
9784 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9785 return err;
9786 }
9787 return pfd;
9788}
9789
9790struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
9791 const char *tp_category,
9792 const char *tp_name)
9793{
9794 char errmsg[STRERR_BUFSIZE];
9795 struct bpf_link *link;
9796 int pfd, err;
9797
9798 pfd = perf_event_open_tracepoint(tp_category, tp_name);
9799 if (pfd < 0) {
9800 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
9801 prog->name, tp_category, tp_name,
9802 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9803 return ERR_PTR(pfd);
9804 }
9805 link = bpf_program__attach_perf_event(prog, pfd);
9806 if (IS_ERR(link)) {
9807 close(pfd);
9808 err = PTR_ERR(link);
9809 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
9810 prog->name, tp_category, tp_name,
9811 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
9812 return link;
9813 }
9814 return link;
9815}
9816
9817static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
9818 struct bpf_program *prog)
9819{
9820 char *sec_name, *tp_cat, *tp_name;
9821 struct bpf_link *link;
9822
9823 sec_name = strdup(prog->sec_name);
9824 if (!sec_name)
9825 return ERR_PTR(-ENOMEM);
9826
9827
9828 tp_cat = sec_name + sec->len;
9829 tp_name = strchr(tp_cat, '/');
9830 if (!tp_name) {
9831 link = ERR_PTR(-EINVAL);
9832 goto out;
9833 }
9834 *tp_name = '\0';
9835 tp_name++;
9836
9837 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
9838out:
9839 free(sec_name);
9840 return link;
9841}
9842
9843struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
9844 const char *tp_name)
9845{
9846 char errmsg[STRERR_BUFSIZE];
9847 struct bpf_link *link;
9848 int prog_fd, pfd;
9849
9850 prog_fd = bpf_program__fd(prog);
9851 if (prog_fd < 0) {
9852 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9853 return ERR_PTR(-EINVAL);
9854 }
9855
9856 link = calloc(1, sizeof(*link));
9857 if (!link)
9858 return ERR_PTR(-ENOMEM);
9859 link->detach = &bpf_link__detach_fd;
9860
9861 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
9862 if (pfd < 0) {
9863 pfd = -errno;
9864 free(link);
9865 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
9866 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9867 return ERR_PTR(pfd);
9868 }
9869 link->fd = pfd;
9870 return link;
9871}
9872
9873static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
9874 struct bpf_program *prog)
9875{
9876 const char *tp_name = prog->sec_name + sec->len;
9877
9878 return bpf_program__attach_raw_tracepoint(prog, tp_name);
9879}
9880
9881
9882static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
9883{
9884 char errmsg[STRERR_BUFSIZE];
9885 struct bpf_link *link;
9886 int prog_fd, pfd;
9887
9888 prog_fd = bpf_program__fd(prog);
9889 if (prog_fd < 0) {
9890 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9891 return ERR_PTR(-EINVAL);
9892 }
9893
9894 link = calloc(1, sizeof(*link));
9895 if (!link)
9896 return ERR_PTR(-ENOMEM);
9897 link->detach = &bpf_link__detach_fd;
9898
9899 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
9900 if (pfd < 0) {
9901 pfd = -errno;
9902 free(link);
9903 pr_warn("prog '%s': failed to attach: %s\n",
9904 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
9905 return ERR_PTR(pfd);
9906 }
9907 link->fd = pfd;
9908 return (struct bpf_link *)link;
9909}
9910
9911struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
9912{
9913 return bpf_program__attach_btf_id(prog);
9914}
9915
9916struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
9917{
9918 return bpf_program__attach_btf_id(prog);
9919}
9920
9921static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
9922 struct bpf_program *prog)
9923{
9924 return bpf_program__attach_trace(prog);
9925}
9926
9927static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
9928 struct bpf_program *prog)
9929{
9930 return bpf_program__attach_lsm(prog);
9931}
9932
9933static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
9934 struct bpf_program *prog)
9935{
9936 return bpf_program__attach_iter(prog, NULL);
9937}
9938
9939static struct bpf_link *
9940bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
9941 const char *target_name)
9942{
9943 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
9944 .target_btf_id = btf_id);
9945 enum bpf_attach_type attach_type;
9946 char errmsg[STRERR_BUFSIZE];
9947 struct bpf_link *link;
9948 int prog_fd, link_fd;
9949
9950 prog_fd = bpf_program__fd(prog);
9951 if (prog_fd < 0) {
9952 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
9953 return ERR_PTR(-EINVAL);
9954 }
9955
9956 link = calloc(1, sizeof(*link));
9957 if (!link)
9958 return ERR_PTR(-ENOMEM);
9959 link->detach = &bpf_link__detach_fd;
9960
9961 attach_type = bpf_program__get_expected_attach_type(prog);
9962 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
9963 if (link_fd < 0) {
9964 link_fd = -errno;
9965 free(link);
9966 pr_warn("prog '%s': failed to attach to %s: %s\n",
9967 prog->name, target_name,
9968 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
9969 return ERR_PTR(link_fd);
9970 }
9971 link->fd = link_fd;
9972 return link;
9973}
9974
9975struct bpf_link *
9976bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
9977{
9978 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
9979}
9980
9981struct bpf_link *
9982bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
9983{
9984 return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
9985}
9986
9987struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
9988{
9989
9990 return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
9991}
9992
9993struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
9994 int target_fd,
9995 const char *attach_func_name)
9996{
9997 int btf_id;
9998
9999 if (!!target_fd != !!attach_func_name) {
10000 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10001 prog->name);
10002 return ERR_PTR(-EINVAL);
10003 }
10004
10005 if (prog->type != BPF_PROG_TYPE_EXT) {
10006 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10007 prog->name);
10008 return ERR_PTR(-EINVAL);
10009 }
10010
10011 if (target_fd) {
10012 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10013 if (btf_id < 0)
10014 return ERR_PTR(btf_id);
10015
10016 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10017 } else {
10018
10019
10020
10021 return bpf_program__attach_trace(prog);
10022 }
10023}
10024
10025struct bpf_link *
10026bpf_program__attach_iter(struct bpf_program *prog,
10027 const struct bpf_iter_attach_opts *opts)
10028{
10029 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
10030 char errmsg[STRERR_BUFSIZE];
10031 struct bpf_link *link;
10032 int prog_fd, link_fd;
10033 __u32 target_fd = 0;
10034
10035 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10036 return ERR_PTR(-EINVAL);
10037
10038 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10039 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
10040
10041 prog_fd = bpf_program__fd(prog);
10042 if (prog_fd < 0) {
10043 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
10044 return ERR_PTR(-EINVAL);
10045 }
10046
10047 link = calloc(1, sizeof(*link));
10048 if (!link)
10049 return ERR_PTR(-ENOMEM);
10050 link->detach = &bpf_link__detach_fd;
10051
10052 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10053 &link_create_opts);
10054 if (link_fd < 0) {
10055 link_fd = -errno;
10056 free(link);
10057 pr_warn("prog '%s': failed to attach to iterator: %s\n",
10058 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10059 return ERR_PTR(link_fd);
10060 }
10061 link->fd = link_fd;
10062 return link;
10063}
10064
10065struct bpf_link *bpf_program__attach(struct bpf_program *prog)
10066{
10067 const struct bpf_sec_def *sec_def;
10068
10069 sec_def = find_sec_def(prog->sec_name);
10070 if (!sec_def || !sec_def->attach_fn)
10071 return ERR_PTR(-ESRCH);
10072
10073 return sec_def->attach_fn(sec_def, prog);
10074}
10075
10076static int bpf_link__detach_struct_ops(struct bpf_link *link)
10077{
10078 __u32 zero = 0;
10079
10080 if (bpf_map_delete_elem(link->fd, &zero))
10081 return -errno;
10082
10083 return 0;
10084}
10085
10086struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
10087{
10088 struct bpf_struct_ops *st_ops;
10089 struct bpf_link *link;
10090 __u32 i, zero = 0;
10091 int err;
10092
10093 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
10094 return ERR_PTR(-EINVAL);
10095
10096 link = calloc(1, sizeof(*link));
10097 if (!link)
10098 return ERR_PTR(-EINVAL);
10099
10100 st_ops = map->st_ops;
10101 for (i = 0; i < btf_vlen(st_ops->type); i++) {
10102 struct bpf_program *prog = st_ops->progs[i];
10103 void *kern_data;
10104 int prog_fd;
10105
10106 if (!prog)
10107 continue;
10108
10109 prog_fd = bpf_program__fd(prog);
10110 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
10111 *(unsigned long *)kern_data = prog_fd;
10112 }
10113
10114 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
10115 if (err) {
10116 err = -errno;
10117 free(link);
10118 return ERR_PTR(err);
10119 }
10120
10121 link->detach = bpf_link__detach_struct_ops;
10122 link->fd = map->fd;
10123
10124 return link;
10125}
10126
10127enum bpf_perf_event_ret
10128bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10129 void **copy_mem, size_t *copy_size,
10130 bpf_perf_event_print_t fn, void *private_data)
10131{
10132 struct perf_event_mmap_page *header = mmap_mem;
10133 __u64 data_head = ring_buffer_read_head(header);
10134 __u64 data_tail = header->data_tail;
10135 void *base = ((__u8 *)header) + page_size;
10136 int ret = LIBBPF_PERF_EVENT_CONT;
10137 struct perf_event_header *ehdr;
10138 size_t ehdr_size;
10139
10140 while (data_head != data_tail) {
10141 ehdr = base + (data_tail & (mmap_size - 1));
10142 ehdr_size = ehdr->size;
10143
10144 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
10145 void *copy_start = ehdr;
10146 size_t len_first = base + mmap_size - copy_start;
10147 size_t len_secnd = ehdr_size - len_first;
10148
10149 if (*copy_size < ehdr_size) {
10150 free(*copy_mem);
10151 *copy_mem = malloc(ehdr_size);
10152 if (!*copy_mem) {
10153 *copy_size = 0;
10154 ret = LIBBPF_PERF_EVENT_ERROR;
10155 break;
10156 }
10157 *copy_size = ehdr_size;
10158 }
10159
10160 memcpy(*copy_mem, copy_start, len_first);
10161 memcpy(*copy_mem + len_first, base, len_secnd);
10162 ehdr = *copy_mem;
10163 }
10164
10165 ret = fn(ehdr, private_data);
10166 data_tail += ehdr_size;
10167 if (ret != LIBBPF_PERF_EVENT_CONT)
10168 break;
10169 }
10170
10171 ring_buffer_write_tail(header, data_tail);
10172 return ret;
10173}
10174
10175struct perf_buffer;
10176
10177struct perf_buffer_params {
10178 struct perf_event_attr *attr;
10179
10180 perf_buffer_event_fn event_cb;
10181
10182 perf_buffer_sample_fn sample_cb;
10183 perf_buffer_lost_fn lost_cb;
10184 void *ctx;
10185 int cpu_cnt;
10186 int *cpus;
10187 int *map_keys;
10188};
10189
10190struct perf_cpu_buf {
10191 struct perf_buffer *pb;
10192 void *base;
10193 void *buf;
10194 size_t buf_size;
10195 int fd;
10196 int cpu;
10197 int map_key;
10198};
10199
10200struct perf_buffer {
10201 perf_buffer_event_fn event_cb;
10202 perf_buffer_sample_fn sample_cb;
10203 perf_buffer_lost_fn lost_cb;
10204 void *ctx;
10205
10206 size_t page_size;
10207 size_t mmap_size;
10208 struct perf_cpu_buf **cpu_bufs;
10209 struct epoll_event *events;
10210 int cpu_cnt;
10211 int epoll_fd;
10212 int map_fd;
10213};
10214
10215static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
10216 struct perf_cpu_buf *cpu_buf)
10217{
10218 if (!cpu_buf)
10219 return;
10220 if (cpu_buf->base &&
10221 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
10222 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
10223 if (cpu_buf->fd >= 0) {
10224 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
10225 close(cpu_buf->fd);
10226 }
10227 free(cpu_buf->buf);
10228 free(cpu_buf);
10229}
10230
10231void perf_buffer__free(struct perf_buffer *pb)
10232{
10233 int i;
10234
10235 if (IS_ERR_OR_NULL(pb))
10236 return;
10237 if (pb->cpu_bufs) {
10238 for (i = 0; i < pb->cpu_cnt; i++) {
10239 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10240
10241 if (!cpu_buf)
10242 continue;
10243
10244 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
10245 perf_buffer__free_cpu_buf(pb, cpu_buf);
10246 }
10247 free(pb->cpu_bufs);
10248 }
10249 if (pb->epoll_fd >= 0)
10250 close(pb->epoll_fd);
10251 free(pb->events);
10252 free(pb);
10253}
10254
10255static struct perf_cpu_buf *
10256perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
10257 int cpu, int map_key)
10258{
10259 struct perf_cpu_buf *cpu_buf;
10260 char msg[STRERR_BUFSIZE];
10261 int err;
10262
10263 cpu_buf = calloc(1, sizeof(*cpu_buf));
10264 if (!cpu_buf)
10265 return ERR_PTR(-ENOMEM);
10266
10267 cpu_buf->pb = pb;
10268 cpu_buf->cpu = cpu;
10269 cpu_buf->map_key = map_key;
10270
10271 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 , cpu,
10272 -1, PERF_FLAG_FD_CLOEXEC);
10273 if (cpu_buf->fd < 0) {
10274 err = -errno;
10275 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
10276 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10277 goto error;
10278 }
10279
10280 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
10281 PROT_READ | PROT_WRITE, MAP_SHARED,
10282 cpu_buf->fd, 0);
10283 if (cpu_buf->base == MAP_FAILED) {
10284 cpu_buf->base = NULL;
10285 err = -errno;
10286 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
10287 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10288 goto error;
10289 }
10290
10291 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10292 err = -errno;
10293 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
10294 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
10295 goto error;
10296 }
10297
10298 return cpu_buf;
10299
10300error:
10301 perf_buffer__free_cpu_buf(pb, cpu_buf);
10302 return (struct perf_cpu_buf *)ERR_PTR(err);
10303}
10304
10305static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10306 struct perf_buffer_params *p);
10307
10308struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
10309 const struct perf_buffer_opts *opts)
10310{
10311 struct perf_buffer_params p = {};
10312 struct perf_event_attr attr = { 0, };
10313
10314 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
10315 attr.type = PERF_TYPE_SOFTWARE;
10316 attr.sample_type = PERF_SAMPLE_RAW;
10317 attr.sample_period = 1;
10318 attr.wakeup_events = 1;
10319
10320 p.attr = &attr;
10321 p.sample_cb = opts ? opts->sample_cb : NULL;
10322 p.lost_cb = opts ? opts->lost_cb : NULL;
10323 p.ctx = opts ? opts->ctx : NULL;
10324
10325 return __perf_buffer__new(map_fd, page_cnt, &p);
10326}
10327
10328struct perf_buffer *
10329perf_buffer__new_raw(int map_fd, size_t page_cnt,
10330 const struct perf_buffer_raw_opts *opts)
10331{
10332 struct perf_buffer_params p = {};
10333
10334 p.attr = opts->attr;
10335 p.event_cb = opts->event_cb;
10336 p.ctx = opts->ctx;
10337 p.cpu_cnt = opts->cpu_cnt;
10338 p.cpus = opts->cpus;
10339 p.map_keys = opts->map_keys;
10340
10341 return __perf_buffer__new(map_fd, page_cnt, &p);
10342}
10343
10344static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10345 struct perf_buffer_params *p)
10346{
10347 const char *online_cpus_file = "/sys/devices/system/cpu/online";
10348 struct bpf_map_info map;
10349 char msg[STRERR_BUFSIZE];
10350 struct perf_buffer *pb;
10351 bool *online = NULL;
10352 __u32 map_info_len;
10353 int err, i, j, n;
10354
10355 if (page_cnt & (page_cnt - 1)) {
10356 pr_warn("page count should be power of two, but is %zu\n",
10357 page_cnt);
10358 return ERR_PTR(-EINVAL);
10359 }
10360
10361
10362 memset(&map, 0, sizeof(map));
10363 map_info_len = sizeof(map);
10364 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
10365 if (err) {
10366 err = -errno;
10367
10368
10369
10370 if (err != -EINVAL) {
10371 pr_warn("failed to get map info for map FD %d: %s\n",
10372 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
10373 return ERR_PTR(err);
10374 }
10375 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
10376 map_fd);
10377 } else {
10378 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
10379 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
10380 map.name);
10381 return ERR_PTR(-EINVAL);
10382 }
10383 }
10384
10385 pb = calloc(1, sizeof(*pb));
10386 if (!pb)
10387 return ERR_PTR(-ENOMEM);
10388
10389 pb->event_cb = p->event_cb;
10390 pb->sample_cb = p->sample_cb;
10391 pb->lost_cb = p->lost_cb;
10392 pb->ctx = p->ctx;
10393
10394 pb->page_size = getpagesize();
10395 pb->mmap_size = pb->page_size * page_cnt;
10396 pb->map_fd = map_fd;
10397
10398 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
10399 if (pb->epoll_fd < 0) {
10400 err = -errno;
10401 pr_warn("failed to create epoll instance: %s\n",
10402 libbpf_strerror_r(err, msg, sizeof(msg)));
10403 goto error;
10404 }
10405
10406 if (p->cpu_cnt > 0) {
10407 pb->cpu_cnt = p->cpu_cnt;
10408 } else {
10409 pb->cpu_cnt = libbpf_num_possible_cpus();
10410 if (pb->cpu_cnt < 0) {
10411 err = pb->cpu_cnt;
10412 goto error;
10413 }
10414 if (map.max_entries && map.max_entries < pb->cpu_cnt)
10415 pb->cpu_cnt = map.max_entries;
10416 }
10417
10418 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
10419 if (!pb->events) {
10420 err = -ENOMEM;
10421 pr_warn("failed to allocate events: out of memory\n");
10422 goto error;
10423 }
10424 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
10425 if (!pb->cpu_bufs) {
10426 err = -ENOMEM;
10427 pr_warn("failed to allocate buffers: out of memory\n");
10428 goto error;
10429 }
10430
10431 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
10432 if (err) {
10433 pr_warn("failed to get online CPU mask: %d\n", err);
10434 goto error;
10435 }
10436
10437 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
10438 struct perf_cpu_buf *cpu_buf;
10439 int cpu, map_key;
10440
10441 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
10442 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
10443
10444
10445
10446
10447 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
10448 continue;
10449
10450 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
10451 if (IS_ERR(cpu_buf)) {
10452 err = PTR_ERR(cpu_buf);
10453 goto error;
10454 }
10455
10456 pb->cpu_bufs[j] = cpu_buf;
10457
10458 err = bpf_map_update_elem(pb->map_fd, &map_key,
10459 &cpu_buf->fd, 0);
10460 if (err) {
10461 err = -errno;
10462 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
10463 cpu, map_key, cpu_buf->fd,
10464 libbpf_strerror_r(err, msg, sizeof(msg)));
10465 goto error;
10466 }
10467
10468 pb->events[j].events = EPOLLIN;
10469 pb->events[j].data.ptr = cpu_buf;
10470 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
10471 &pb->events[j]) < 0) {
10472 err = -errno;
10473 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
10474 cpu, cpu_buf->fd,
10475 libbpf_strerror_r(err, msg, sizeof(msg)));
10476 goto error;
10477 }
10478 j++;
10479 }
10480 pb->cpu_cnt = j;
10481 free(online);
10482
10483 return pb;
10484
10485error:
10486 free(online);
10487 if (pb)
10488 perf_buffer__free(pb);
10489 return ERR_PTR(err);
10490}
10491
10492struct perf_sample_raw {
10493 struct perf_event_header header;
10494 uint32_t size;
10495 char data[];
10496};
10497
10498struct perf_sample_lost {
10499 struct perf_event_header header;
10500 uint64_t id;
10501 uint64_t lost;
10502 uint64_t sample_id;
10503};
10504
10505static enum bpf_perf_event_ret
10506perf_buffer__process_record(struct perf_event_header *e, void *ctx)
10507{
10508 struct perf_cpu_buf *cpu_buf = ctx;
10509 struct perf_buffer *pb = cpu_buf->pb;
10510 void *data = e;
10511
10512
10513 if (pb->event_cb)
10514 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
10515
10516 switch (e->type) {
10517 case PERF_RECORD_SAMPLE: {
10518 struct perf_sample_raw *s = data;
10519
10520 if (pb->sample_cb)
10521 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
10522 break;
10523 }
10524 case PERF_RECORD_LOST: {
10525 struct perf_sample_lost *s = data;
10526
10527 if (pb->lost_cb)
10528 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
10529 break;
10530 }
10531 default:
10532 pr_warn("unknown perf sample type %d\n", e->type);
10533 return LIBBPF_PERF_EVENT_ERROR;
10534 }
10535 return LIBBPF_PERF_EVENT_CONT;
10536}
10537
10538static int perf_buffer__process_records(struct perf_buffer *pb,
10539 struct perf_cpu_buf *cpu_buf)
10540{
10541 enum bpf_perf_event_ret ret;
10542
10543 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
10544 pb->page_size, &cpu_buf->buf,
10545 &cpu_buf->buf_size,
10546 perf_buffer__process_record, cpu_buf);
10547 if (ret != LIBBPF_PERF_EVENT_CONT)
10548 return ret;
10549 return 0;
10550}
10551
10552int perf_buffer__epoll_fd(const struct perf_buffer *pb)
10553{
10554 return pb->epoll_fd;
10555}
10556
10557int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
10558{
10559 int i, cnt, err;
10560
10561 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
10562 for (i = 0; i < cnt; i++) {
10563 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
10564
10565 err = perf_buffer__process_records(pb, cpu_buf);
10566 if (err) {
10567 pr_warn("error while processing records: %d\n", err);
10568 return err;
10569 }
10570 }
10571 return cnt < 0 ? -errno : cnt;
10572}
10573
10574
10575
10576
10577size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
10578{
10579 return pb->cpu_cnt;
10580}
10581
10582
10583
10584
10585
10586
10587int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
10588{
10589 struct perf_cpu_buf *cpu_buf;
10590
10591 if (buf_idx >= pb->cpu_cnt)
10592 return -EINVAL;
10593
10594 cpu_buf = pb->cpu_bufs[buf_idx];
10595 if (!cpu_buf)
10596 return -ENOENT;
10597
10598 return cpu_buf->fd;
10599}
10600
10601
10602
10603
10604
10605
10606
10607
10608
10609int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
10610{
10611 struct perf_cpu_buf *cpu_buf;
10612
10613 if (buf_idx >= pb->cpu_cnt)
10614 return -EINVAL;
10615
10616 cpu_buf = pb->cpu_bufs[buf_idx];
10617 if (!cpu_buf)
10618 return -ENOENT;
10619
10620 return perf_buffer__process_records(pb, cpu_buf);
10621}
10622
10623int perf_buffer__consume(struct perf_buffer *pb)
10624{
10625 int i, err;
10626
10627 for (i = 0; i < pb->cpu_cnt; i++) {
10628 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10629
10630 if (!cpu_buf)
10631 continue;
10632
10633 err = perf_buffer__process_records(pb, cpu_buf);
10634 if (err) {
10635 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
10636 return err;
10637 }
10638 }
10639 return 0;
10640}
10641
10642struct bpf_prog_info_array_desc {
10643 int array_offset;
10644 int count_offset;
10645 int size_offset;
10646
10647
10648};
10649
10650static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
10651 [BPF_PROG_INFO_JITED_INSNS] = {
10652 offsetof(struct bpf_prog_info, jited_prog_insns),
10653 offsetof(struct bpf_prog_info, jited_prog_len),
10654 -1,
10655 },
10656 [BPF_PROG_INFO_XLATED_INSNS] = {
10657 offsetof(struct bpf_prog_info, xlated_prog_insns),
10658 offsetof(struct bpf_prog_info, xlated_prog_len),
10659 -1,
10660 },
10661 [BPF_PROG_INFO_MAP_IDS] = {
10662 offsetof(struct bpf_prog_info, map_ids),
10663 offsetof(struct bpf_prog_info, nr_map_ids),
10664 -(int)sizeof(__u32),
10665 },
10666 [BPF_PROG_INFO_JITED_KSYMS] = {
10667 offsetof(struct bpf_prog_info, jited_ksyms),
10668 offsetof(struct bpf_prog_info, nr_jited_ksyms),
10669 -(int)sizeof(__u64),
10670 },
10671 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
10672 offsetof(struct bpf_prog_info, jited_func_lens),
10673 offsetof(struct bpf_prog_info, nr_jited_func_lens),
10674 -(int)sizeof(__u32),
10675 },
10676 [BPF_PROG_INFO_FUNC_INFO] = {
10677 offsetof(struct bpf_prog_info, func_info),
10678 offsetof(struct bpf_prog_info, nr_func_info),
10679 offsetof(struct bpf_prog_info, func_info_rec_size),
10680 },
10681 [BPF_PROG_INFO_LINE_INFO] = {
10682 offsetof(struct bpf_prog_info, line_info),
10683 offsetof(struct bpf_prog_info, nr_line_info),
10684 offsetof(struct bpf_prog_info, line_info_rec_size),
10685 },
10686 [BPF_PROG_INFO_JITED_LINE_INFO] = {
10687 offsetof(struct bpf_prog_info, jited_line_info),
10688 offsetof(struct bpf_prog_info, nr_jited_line_info),
10689 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
10690 },
10691 [BPF_PROG_INFO_PROG_TAGS] = {
10692 offsetof(struct bpf_prog_info, prog_tags),
10693 offsetof(struct bpf_prog_info, nr_prog_tags),
10694 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10695 },
10696
10697};
10698
10699static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
10700 int offset)
10701{
10702 __u32 *array = (__u32 *)info;
10703
10704 if (offset >= 0)
10705 return array[offset / sizeof(__u32)];
10706 return -(int)offset;
10707}
10708
10709static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
10710 int offset)
10711{
10712 __u64 *array = (__u64 *)info;
10713
10714 if (offset >= 0)
10715 return array[offset / sizeof(__u64)];
10716 return -(int)offset;
10717}
10718
10719static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
10720 __u32 val)
10721{
10722 __u32 *array = (__u32 *)info;
10723
10724 if (offset >= 0)
10725 array[offset / sizeof(__u32)] = val;
10726}
10727
10728static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
10729 __u64 val)
10730{
10731 __u64 *array = (__u64 *)info;
10732
10733 if (offset >= 0)
10734 array[offset / sizeof(__u64)] = val;
10735}
10736
10737struct bpf_prog_info_linear *
10738bpf_program__get_prog_info_linear(int fd, __u64 arrays)
10739{
10740 struct bpf_prog_info_linear *info_linear;
10741 struct bpf_prog_info info = {};
10742 __u32 info_len = sizeof(info);
10743 __u32 data_len = 0;
10744 int i, err;
10745 void *ptr;
10746
10747 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
10748 return ERR_PTR(-EINVAL);
10749
10750
10751 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
10752 if (err) {
10753 pr_debug("can't get prog info: %s", strerror(errno));
10754 return ERR_PTR(-EFAULT);
10755 }
10756
10757
10758 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10759 bool include_array = (arrays & (1UL << i)) > 0;
10760 struct bpf_prog_info_array_desc *desc;
10761 __u32 count, size;
10762
10763 desc = bpf_prog_info_array_desc + i;
10764
10765
10766 if (info_len < desc->array_offset + sizeof(__u32) ||
10767 info_len < desc->count_offset + sizeof(__u32) ||
10768 (desc->size_offset > 0 && info_len < desc->size_offset))
10769 include_array = false;
10770
10771 if (!include_array) {
10772 arrays &= ~(1UL << i);
10773 continue;
10774 }
10775
10776 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10777 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10778
10779 data_len += count * size;
10780 }
10781
10782
10783 data_len = roundup(data_len, sizeof(__u64));
10784 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
10785 if (!info_linear)
10786 return ERR_PTR(-ENOMEM);
10787
10788
10789 info_linear->arrays = arrays;
10790 memset(&info_linear->info, 0, sizeof(info));
10791 ptr = info_linear->data;
10792
10793 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10794 struct bpf_prog_info_array_desc *desc;
10795 __u32 count, size;
10796
10797 if ((arrays & (1UL << i)) == 0)
10798 continue;
10799
10800 desc = bpf_prog_info_array_desc + i;
10801 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10802 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10803 bpf_prog_info_set_offset_u32(&info_linear->info,
10804 desc->count_offset, count);
10805 bpf_prog_info_set_offset_u32(&info_linear->info,
10806 desc->size_offset, size);
10807 bpf_prog_info_set_offset_u64(&info_linear->info,
10808 desc->array_offset,
10809 ptr_to_u64(ptr));
10810 ptr += count * size;
10811 }
10812
10813
10814 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
10815 if (err) {
10816 pr_debug("can't get prog info: %s", strerror(errno));
10817 free(info_linear);
10818 return ERR_PTR(-EFAULT);
10819 }
10820
10821
10822 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10823 struct bpf_prog_info_array_desc *desc;
10824 __u32 v1, v2;
10825
10826 if ((arrays & (1UL << i)) == 0)
10827 continue;
10828
10829 desc = bpf_prog_info_array_desc + i;
10830 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10831 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10832 desc->count_offset);
10833 if (v1 != v2)
10834 pr_warn("%s: mismatch in element count\n", __func__);
10835
10836 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10837 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10838 desc->size_offset);
10839 if (v1 != v2)
10840 pr_warn("%s: mismatch in rec size\n", __func__);
10841 }
10842
10843
10844 info_linear->info_len = sizeof(struct bpf_prog_info);
10845 info_linear->data_len = data_len;
10846
10847 return info_linear;
10848}
10849
10850void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
10851{
10852 int i;
10853
10854 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10855 struct bpf_prog_info_array_desc *desc;
10856 __u64 addr, offs;
10857
10858 if ((info_linear->arrays & (1UL << i)) == 0)
10859 continue;
10860
10861 desc = bpf_prog_info_array_desc + i;
10862 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
10863 desc->array_offset);
10864 offs = addr - ptr_to_u64(info_linear->data);
10865 bpf_prog_info_set_offset_u64(&info_linear->info,
10866 desc->array_offset, offs);
10867 }
10868}
10869
10870void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
10871{
10872 int i;
10873
10874 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10875 struct bpf_prog_info_array_desc *desc;
10876 __u64 addr, offs;
10877
10878 if ((info_linear->arrays & (1UL << i)) == 0)
10879 continue;
10880
10881 desc = bpf_prog_info_array_desc + i;
10882 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
10883 desc->array_offset);
10884 addr = offs + ptr_to_u64(info_linear->data);
10885 bpf_prog_info_set_offset_u64(&info_linear->info,
10886 desc->array_offset, addr);
10887 }
10888}
10889
10890int bpf_program__set_attach_target(struct bpf_program *prog,
10891 int attach_prog_fd,
10892 const char *attach_func_name)
10893{
10894 int btf_obj_fd = 0, btf_id = 0, err;
10895
10896 if (!prog || attach_prog_fd < 0 || !attach_func_name)
10897 return -EINVAL;
10898
10899 if (prog->obj->loaded)
10900 return -EINVAL;
10901
10902 if (attach_prog_fd) {
10903 btf_id = libbpf_find_prog_btf_id(attach_func_name,
10904 attach_prog_fd);
10905 if (btf_id < 0)
10906 return btf_id;
10907 } else {
10908
10909 err = bpf_object__load_vmlinux_btf(prog->obj, true);
10910 if (err)
10911 return err;
10912 err = find_kernel_btf_id(prog->obj, attach_func_name,
10913 prog->expected_attach_type,
10914 &btf_obj_fd, &btf_id);
10915 if (err)
10916 return err;
10917 }
10918
10919 prog->attach_btf_id = btf_id;
10920 prog->attach_btf_obj_fd = btf_obj_fd;
10921 prog->attach_prog_fd = attach_prog_fd;
10922 return 0;
10923}
10924
10925int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
10926{
10927 int err = 0, n, len, start, end = -1;
10928 bool *tmp;
10929
10930 *mask = NULL;
10931 *mask_sz = 0;
10932
10933
10934 while (*s) {
10935 if (*s == ',' || *s == '\n') {
10936 s++;
10937 continue;
10938 }
10939 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
10940 if (n <= 0 || n > 2) {
10941 pr_warn("Failed to get CPU range %s: %d\n", s, n);
10942 err = -EINVAL;
10943 goto cleanup;
10944 } else if (n == 1) {
10945 end = start;
10946 }
10947 if (start < 0 || start > end) {
10948 pr_warn("Invalid CPU range [%d,%d] in %s\n",
10949 start, end, s);
10950 err = -EINVAL;
10951 goto cleanup;
10952 }
10953 tmp = realloc(*mask, end + 1);
10954 if (!tmp) {
10955 err = -ENOMEM;
10956 goto cleanup;
10957 }
10958 *mask = tmp;
10959 memset(tmp + *mask_sz, 0, start - *mask_sz);
10960 memset(tmp + start, 1, end - start + 1);
10961 *mask_sz = end + 1;
10962 s += len;
10963 }
10964 if (!*mask_sz) {
10965 pr_warn("Empty CPU range\n");
10966 return -EINVAL;
10967 }
10968 return 0;
10969cleanup:
10970 free(*mask);
10971 *mask = NULL;
10972 return err;
10973}
10974
10975int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
10976{
10977 int fd, err = 0, len;
10978 char buf[128];
10979
10980 fd = open(fcpu, O_RDONLY);
10981 if (fd < 0) {
10982 err = -errno;
10983 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
10984 return err;
10985 }
10986 len = read(fd, buf, sizeof(buf));
10987 close(fd);
10988 if (len <= 0) {
10989 err = len ? -errno : -EINVAL;
10990 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
10991 return err;
10992 }
10993 if (len >= sizeof(buf)) {
10994 pr_warn("CPU mask is too big in file %s\n", fcpu);
10995 return -E2BIG;
10996 }
10997 buf[len] = '\0';
10998
10999 return parse_cpu_mask_str(buf, mask, mask_sz);
11000}
11001
11002int libbpf_num_possible_cpus(void)
11003{
11004 static const char *fcpu = "/sys/devices/system/cpu/possible";
11005 static int cpus;
11006 int err, n, i, tmp_cpus;
11007 bool *mask;
11008
11009 tmp_cpus = READ_ONCE(cpus);
11010 if (tmp_cpus > 0)
11011 return tmp_cpus;
11012
11013 err = parse_cpu_mask_file(fcpu, &mask, &n);
11014 if (err)
11015 return err;
11016
11017 tmp_cpus = 0;
11018 for (i = 0; i < n; i++) {
11019 if (mask[i])
11020 tmp_cpus++;
11021 }
11022 free(mask);
11023
11024 WRITE_ONCE(cpus, tmp_cpus);
11025 return tmp_cpus;
11026}
11027
11028int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
11029 const struct bpf_object_open_opts *opts)
11030{
11031 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
11032 .object_name = s->name,
11033 );
11034 struct bpf_object *obj;
11035 int i;
11036
11037
11038
11039
11040
11041
11042
11043 if (opts) {
11044 memcpy(&skel_opts, opts, sizeof(*opts));
11045 if (!opts->object_name)
11046 skel_opts.object_name = s->name;
11047 }
11048
11049 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
11050 if (IS_ERR(obj)) {
11051 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
11052 s->name, PTR_ERR(obj));
11053 return PTR_ERR(obj);
11054 }
11055
11056 *s->obj = obj;
11057
11058 for (i = 0; i < s->map_cnt; i++) {
11059 struct bpf_map **map = s->maps[i].map;
11060 const char *name = s->maps[i].name;
11061 void **mmaped = s->maps[i].mmaped;
11062
11063 *map = bpf_object__find_map_by_name(obj, name);
11064 if (!*map) {
11065 pr_warn("failed to find skeleton map '%s'\n", name);
11066 return -ESRCH;
11067 }
11068
11069
11070 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
11071 *mmaped = (*map)->mmaped;
11072 }
11073
11074 for (i = 0; i < s->prog_cnt; i++) {
11075 struct bpf_program **prog = s->progs[i].prog;
11076 const char *name = s->progs[i].name;
11077
11078 *prog = bpf_object__find_program_by_name(obj, name);
11079 if (!*prog) {
11080 pr_warn("failed to find skeleton program '%s'\n", name);
11081 return -ESRCH;
11082 }
11083 }
11084
11085 return 0;
11086}
11087
11088int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
11089{
11090 int i, err;
11091
11092 err = bpf_object__load(*s->obj);
11093 if (err) {
11094 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
11095 return err;
11096 }
11097
11098 for (i = 0; i < s->map_cnt; i++) {
11099 struct bpf_map *map = *s->maps[i].map;
11100 size_t mmap_sz = bpf_map_mmap_sz(map);
11101 int prot, map_fd = bpf_map__fd(map);
11102 void **mmaped = s->maps[i].mmaped;
11103
11104 if (!mmaped)
11105 continue;
11106
11107 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
11108 *mmaped = NULL;
11109 continue;
11110 }
11111
11112 if (map->def.map_flags & BPF_F_RDONLY_PROG)
11113 prot = PROT_READ;
11114 else
11115 prot = PROT_READ | PROT_WRITE;
11116
11117
11118
11119
11120
11121
11122
11123
11124
11125
11126
11127 *mmaped = mmap(map->mmaped, mmap_sz, prot,
11128 MAP_SHARED | MAP_FIXED, map_fd, 0);
11129 if (*mmaped == MAP_FAILED) {
11130 err = -errno;
11131 *mmaped = NULL;
11132 pr_warn("failed to re-mmap() map '%s': %d\n",
11133 bpf_map__name(map), err);
11134 return err;
11135 }
11136 }
11137
11138 return 0;
11139}
11140
11141int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
11142{
11143 int i;
11144
11145 for (i = 0; i < s->prog_cnt; i++) {
11146 struct bpf_program *prog = *s->progs[i].prog;
11147 struct bpf_link **link = s->progs[i].link;
11148 const struct bpf_sec_def *sec_def;
11149
11150 if (!prog->load)
11151 continue;
11152
11153 sec_def = find_sec_def(prog->sec_name);
11154 if (!sec_def || !sec_def->attach_fn)
11155 continue;
11156
11157 *link = sec_def->attach_fn(sec_def, prog);
11158 if (IS_ERR(*link)) {
11159 pr_warn("failed to auto-attach program '%s': %ld\n",
11160 bpf_program__name(prog), PTR_ERR(*link));
11161 return PTR_ERR(*link);
11162 }
11163 }
11164
11165 return 0;
11166}
11167
11168void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
11169{
11170 int i;
11171
11172 for (i = 0; i < s->prog_cnt; i++) {
11173 struct bpf_link **link = s->progs[i].link;
11174
11175 bpf_link__destroy(*link);
11176 *link = NULL;
11177 }
11178}
11179
11180void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
11181{
11182 if (s->progs)
11183 bpf_object__detach_skeleton(s);
11184 if (s->obj)
11185 bpf_object__close(*s->obj);
11186 free(s->maps);
11187 free(s->progs);
11188 free(s);
11189}
11190