1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _GNU_SOURCE
14#define _GNU_SOURCE
15#endif
16#include <stdlib.h>
17#include <stdio.h>
18#include <stdarg.h>
19#include <libgen.h>
20#include <inttypes.h>
21#include <string.h>
22#include <unistd.h>
23#include <fcntl.h>
24#include <errno.h>
25#include <asm/unistd.h>
26#include <linux/err.h>
27#include <linux/kernel.h>
28#include <linux/bpf.h>
29#include <linux/btf.h>
30#include <linux/filter.h>
31#include <linux/list.h>
32#include <linux/limits.h>
33#include <linux/perf_event.h>
34#include <linux/ring_buffer.h>
35#include <sys/stat.h>
36#include <sys/types.h>
37#include <sys/vfs.h>
38#include <tools/libc_compat.h>
39#include <libelf.h>
40#include <gelf.h>
41
42#include "libbpf.h"
43#include "bpf.h"
44#include "btf.h"
45#include "str_error.h"
46#include "libbpf_internal.h"
47
48#ifndef EM_BPF
49#define EM_BPF 247
50#endif
51
52#ifndef BPF_FS_MAGIC
53#define BPF_FS_MAGIC 0xcafe4a11
54#endif
55
56
57
58
59#pragma GCC diagnostic ignored "-Wformat-nonliteral"
60
61#define __printf(a, b) __attribute__((format(printf, a, b)))
62
63static int __base_pr(enum libbpf_print_level level, const char *format,
64 va_list args)
65{
66 if (level == LIBBPF_DEBUG)
67 return 0;
68
69 return vfprintf(stderr, format, args);
70}
71
72static libbpf_print_fn_t __libbpf_pr = __base_pr;
73
74void libbpf_set_print(libbpf_print_fn_t fn)
75{
76 __libbpf_pr = fn;
77}
78
79__printf(2, 3)
80void libbpf_print(enum libbpf_print_level level, const char *format, ...)
81{
82 va_list args;
83
84 if (!__libbpf_pr)
85 return;
86
87 va_start(args, format);
88 __libbpf_pr(level, format, args);
89 va_end(args);
90}
91
92#define STRERR_BUFSIZE 128
93
94#define CHECK_ERR(action, err, out) do { \
95 err = action; \
96 if (err) \
97 goto out; \
98} while(0)
99
100
101
102#ifndef zfree
103# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
104#endif
105
106#ifndef zclose
107# define zclose(fd) ({ \
108 int ___err = 0; \
109 if ((fd) >= 0) \
110 ___err = close((fd)); \
111 fd = -1; \
112 ___err; })
113#endif
114
115#ifdef HAVE_LIBELF_MMAP_SUPPORT
116# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
117#else
118# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
119#endif
120
121static inline __u64 ptr_to_u64(const void *ptr)
122{
123 return (__u64) (unsigned long) ptr;
124}
125
126struct bpf_capabilities {
127
128 __u32 name:1;
129
130 __u32 global_data:1;
131
132 __u32 btf_func:1;
133
134 __u32 btf_datasec:1;
135};
136
137
138
139
140
141struct bpf_program {
142
143 int idx;
144 char *name;
145 int prog_ifindex;
146 char *section_name;
147
148
149
150 char *pin_name;
151 struct bpf_insn *insns;
152 size_t insns_cnt, main_prog_cnt;
153 enum bpf_prog_type type;
154
155 struct reloc_desc {
156 enum {
157 RELO_LD64,
158 RELO_CALL,
159 RELO_DATA,
160 } type;
161 int insn_idx;
162 union {
163 int map_idx;
164 int text_off;
165 };
166 } *reloc_desc;
167 int nr_reloc;
168 int log_level;
169
170 struct {
171 int nr;
172 int *fds;
173 } instances;
174 bpf_program_prep_t preprocessor;
175
176 struct bpf_object *obj;
177 void *priv;
178 bpf_program_clear_priv_t clear_priv;
179
180 enum bpf_attach_type expected_attach_type;
181 int btf_fd;
182 void *func_info;
183 __u32 func_info_rec_size;
184 __u32 func_info_cnt;
185
186 struct bpf_capabilities *caps;
187
188 void *line_info;
189 __u32 line_info_rec_size;
190 __u32 line_info_cnt;
191};
192
193enum libbpf_map_type {
194 LIBBPF_MAP_UNSPEC,
195 LIBBPF_MAP_DATA,
196 LIBBPF_MAP_BSS,
197 LIBBPF_MAP_RODATA,
198};
199
200static const char * const libbpf_type_to_btf_name[] = {
201 [LIBBPF_MAP_DATA] = ".data",
202 [LIBBPF_MAP_BSS] = ".bss",
203 [LIBBPF_MAP_RODATA] = ".rodata",
204};
205
206struct bpf_map {
207 int fd;
208 char *name;
209 size_t offset;
210 int map_ifindex;
211 int inner_map_fd;
212 struct bpf_map_def def;
213 __u32 btf_key_type_id;
214 __u32 btf_value_type_id;
215 void *priv;
216 bpf_map_clear_priv_t clear_priv;
217 enum libbpf_map_type libbpf_type;
218};
219
220struct bpf_secdata {
221 void *rodata;
222 void *data;
223};
224
225static LIST_HEAD(bpf_objects_list);
226
227struct bpf_object {
228 char name[BPF_OBJ_NAME_LEN];
229 char license[64];
230 __u32 kern_version;
231
232 struct bpf_program *programs;
233 size_t nr_programs;
234 struct bpf_map *maps;
235 size_t nr_maps;
236 struct bpf_secdata sections;
237
238 bool loaded;
239 bool has_pseudo_calls;
240
241
242
243
244
245 struct {
246 int fd;
247 void *obj_buf;
248 size_t obj_buf_sz;
249 Elf *elf;
250 GElf_Ehdr ehdr;
251 Elf_Data *symbols;
252 Elf_Data *data;
253 Elf_Data *rodata;
254 Elf_Data *bss;
255 size_t strtabidx;
256 struct {
257 GElf_Shdr shdr;
258 Elf_Data *data;
259 } *reloc;
260 int nr_reloc;
261 int maps_shndx;
262 int text_shndx;
263 int data_shndx;
264 int rodata_shndx;
265 int bss_shndx;
266 } efile;
267
268
269
270
271
272 struct list_head list;
273
274 struct btf *btf;
275 struct btf_ext *btf_ext;
276
277 void *priv;
278 bpf_object_clear_priv_t clear_priv;
279
280 struct bpf_capabilities caps;
281
282 char path[];
283};
284#define obj_elf_valid(o) ((o)->efile.elf)
285
286void bpf_program__unload(struct bpf_program *prog)
287{
288 int i;
289
290 if (!prog)
291 return;
292
293
294
295
296
297 if (prog->instances.nr > 0) {
298 for (i = 0; i < prog->instances.nr; i++)
299 zclose(prog->instances.fds[i]);
300 } else if (prog->instances.nr != -1) {
301 pr_warning("Internal error: instances.nr is %d\n",
302 prog->instances.nr);
303 }
304
305 prog->instances.nr = -1;
306 zfree(&prog->instances.fds);
307
308 zclose(prog->btf_fd);
309 zfree(&prog->func_info);
310 zfree(&prog->line_info);
311}
312
313static void bpf_program__exit(struct bpf_program *prog)
314{
315 if (!prog)
316 return;
317
318 if (prog->clear_priv)
319 prog->clear_priv(prog, prog->priv);
320
321 prog->priv = NULL;
322 prog->clear_priv = NULL;
323
324 bpf_program__unload(prog);
325 zfree(&prog->name);
326 zfree(&prog->section_name);
327 zfree(&prog->pin_name);
328 zfree(&prog->insns);
329 zfree(&prog->reloc_desc);
330
331 prog->nr_reloc = 0;
332 prog->insns_cnt = 0;
333 prog->idx = -1;
334}
335
336static char *__bpf_program__pin_name(struct bpf_program *prog)
337{
338 char *name, *p;
339
340 name = p = strdup(prog->section_name);
341 while ((p = strchr(p, '/')))
342 *p = '_';
343
344 return name;
345}
346
347static int
348bpf_program__init(void *data, size_t size, char *section_name, int idx,
349 struct bpf_program *prog)
350{
351 if (size < sizeof(struct bpf_insn)) {
352 pr_warning("corrupted section '%s'\n", section_name);
353 return -EINVAL;
354 }
355
356 memset(prog, 0, sizeof(*prog));
357
358 prog->section_name = strdup(section_name);
359 if (!prog->section_name) {
360 pr_warning("failed to alloc name for prog under section(%d) %s\n",
361 idx, section_name);
362 goto errout;
363 }
364
365 prog->pin_name = __bpf_program__pin_name(prog);
366 if (!prog->pin_name) {
367 pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
368 idx, section_name);
369 goto errout;
370 }
371
372 prog->insns = malloc(size);
373 if (!prog->insns) {
374 pr_warning("failed to alloc insns for prog under section %s\n",
375 section_name);
376 goto errout;
377 }
378 prog->insns_cnt = size / sizeof(struct bpf_insn);
379 memcpy(prog->insns, data,
380 prog->insns_cnt * sizeof(struct bpf_insn));
381 prog->idx = idx;
382 prog->instances.fds = NULL;
383 prog->instances.nr = -1;
384 prog->type = BPF_PROG_TYPE_UNSPEC;
385 prog->btf_fd = -1;
386
387 return 0;
388errout:
389 bpf_program__exit(prog);
390 return -ENOMEM;
391}
392
393static int
394bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
395 char *section_name, int idx)
396{
397 struct bpf_program prog, *progs;
398 int nr_progs, err;
399
400 err = bpf_program__init(data, size, section_name, idx, &prog);
401 if (err)
402 return err;
403
404 prog.caps = &obj->caps;
405 progs = obj->programs;
406 nr_progs = obj->nr_programs;
407
408 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
409 if (!progs) {
410
411
412
413
414
415 pr_warning("failed to alloc a new program under section '%s'\n",
416 section_name);
417 bpf_program__exit(&prog);
418 return -ENOMEM;
419 }
420
421 pr_debug("found program %s\n", prog.section_name);
422 obj->programs = progs;
423 obj->nr_programs = nr_progs + 1;
424 prog.obj = obj;
425 progs[nr_progs] = prog;
426 return 0;
427}
428
429static int
430bpf_object__init_prog_names(struct bpf_object *obj)
431{
432 Elf_Data *symbols = obj->efile.symbols;
433 struct bpf_program *prog;
434 size_t pi, si;
435
436 for (pi = 0; pi < obj->nr_programs; pi++) {
437 const char *name = NULL;
438
439 prog = &obj->programs[pi];
440
441 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
442 si++) {
443 GElf_Sym sym;
444
445 if (!gelf_getsym(symbols, si, &sym))
446 continue;
447 if (sym.st_shndx != prog->idx)
448 continue;
449 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
450 continue;
451
452 name = elf_strptr(obj->efile.elf,
453 obj->efile.strtabidx,
454 sym.st_name);
455 if (!name) {
456 pr_warning("failed to get sym name string for prog %s\n",
457 prog->section_name);
458 return -LIBBPF_ERRNO__LIBELF;
459 }
460 }
461
462 if (!name && prog->idx == obj->efile.text_shndx)
463 name = ".text";
464
465 if (!name) {
466 pr_warning("failed to find sym for prog %s\n",
467 prog->section_name);
468 return -EINVAL;
469 }
470
471 prog->name = strdup(name);
472 if (!prog->name) {
473 pr_warning("failed to allocate memory for prog sym %s\n",
474 name);
475 return -ENOMEM;
476 }
477 }
478
479 return 0;
480}
481
482static struct bpf_object *bpf_object__new(const char *path,
483 void *obj_buf,
484 size_t obj_buf_sz)
485{
486 struct bpf_object *obj;
487 char *end;
488
489 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
490 if (!obj) {
491 pr_warning("alloc memory failed for %s\n", path);
492 return ERR_PTR(-ENOMEM);
493 }
494
495 strcpy(obj->path, path);
496
497 strncpy(obj->name, basename((void *)path),
498 sizeof(obj->name) - 1);
499 end = strchr(obj->name, '.');
500 if (end)
501 *end = 0;
502
503 obj->efile.fd = -1;
504
505
506
507
508
509
510 obj->efile.obj_buf = obj_buf;
511 obj->efile.obj_buf_sz = obj_buf_sz;
512 obj->efile.maps_shndx = -1;
513 obj->efile.data_shndx = -1;
514 obj->efile.rodata_shndx = -1;
515 obj->efile.bss_shndx = -1;
516
517 obj->loaded = false;
518
519 INIT_LIST_HEAD(&obj->list);
520 list_add(&obj->list, &bpf_objects_list);
521 return obj;
522}
523
524static void bpf_object__elf_finish(struct bpf_object *obj)
525{
526 if (!obj_elf_valid(obj))
527 return;
528
529 if (obj->efile.elf) {
530 elf_end(obj->efile.elf);
531 obj->efile.elf = NULL;
532 }
533 obj->efile.symbols = NULL;
534 obj->efile.data = NULL;
535 obj->efile.rodata = NULL;
536 obj->efile.bss = NULL;
537
538 zfree(&obj->efile.reloc);
539 obj->efile.nr_reloc = 0;
540 zclose(obj->efile.fd);
541 obj->efile.obj_buf = NULL;
542 obj->efile.obj_buf_sz = 0;
543}
544
545static int bpf_object__elf_init(struct bpf_object *obj)
546{
547 int err = 0;
548 GElf_Ehdr *ep;
549
550 if (obj_elf_valid(obj)) {
551 pr_warning("elf init: internal error\n");
552 return -LIBBPF_ERRNO__LIBELF;
553 }
554
555 if (obj->efile.obj_buf_sz > 0) {
556
557
558
559
560 obj->efile.elf = elf_memory(obj->efile.obj_buf,
561 obj->efile.obj_buf_sz);
562 } else {
563 obj->efile.fd = open(obj->path, O_RDONLY);
564 if (obj->efile.fd < 0) {
565 char errmsg[STRERR_BUFSIZE];
566 char *cp = libbpf_strerror_r(errno, errmsg,
567 sizeof(errmsg));
568
569 pr_warning("failed to open %s: %s\n", obj->path, cp);
570 return -errno;
571 }
572
573 obj->efile.elf = elf_begin(obj->efile.fd,
574 LIBBPF_ELF_C_READ_MMAP,
575 NULL);
576 }
577
578 if (!obj->efile.elf) {
579 pr_warning("failed to open %s as ELF file\n",
580 obj->path);
581 err = -LIBBPF_ERRNO__LIBELF;
582 goto errout;
583 }
584
585 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
586 pr_warning("failed to get EHDR from %s\n",
587 obj->path);
588 err = -LIBBPF_ERRNO__FORMAT;
589 goto errout;
590 }
591 ep = &obj->efile.ehdr;
592
593
594 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
595 pr_warning("%s is not an eBPF object file\n",
596 obj->path);
597 err = -LIBBPF_ERRNO__FORMAT;
598 goto errout;
599 }
600
601 return 0;
602errout:
603 bpf_object__elf_finish(obj);
604 return err;
605}
606
607static int
608bpf_object__check_endianness(struct bpf_object *obj)
609{
610 static unsigned int const endian = 1;
611
612 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
613 case ELFDATA2LSB:
614
615 if (*(unsigned char const *)&endian != 1)
616 goto mismatch;
617 break;
618
619 case ELFDATA2MSB:
620
621 if (*(unsigned char const *)&endian != 0)
622 goto mismatch;
623 break;
624 default:
625 return -LIBBPF_ERRNO__ENDIAN;
626 }
627
628 return 0;
629
630mismatch:
631 pr_warning("Error: endianness mismatch.\n");
632 return -LIBBPF_ERRNO__ENDIAN;
633}
634
635static int
636bpf_object__init_license(struct bpf_object *obj,
637 void *data, size_t size)
638{
639 memcpy(obj->license, data,
640 min(size, sizeof(obj->license) - 1));
641 pr_debug("license of %s is %s\n", obj->path, obj->license);
642 return 0;
643}
644
645static int
646bpf_object__init_kversion(struct bpf_object *obj,
647 void *data, size_t size)
648{
649 __u32 kver;
650
651 if (size != sizeof(kver)) {
652 pr_warning("invalid kver section in %s\n", obj->path);
653 return -LIBBPF_ERRNO__FORMAT;
654 }
655 memcpy(&kver, data, sizeof(kver));
656 obj->kern_version = kver;
657 pr_debug("kernel version of %s is %x\n", obj->path,
658 obj->kern_version);
659 return 0;
660}
661
662static int compare_bpf_map(const void *_a, const void *_b)
663{
664 const struct bpf_map *a = _a;
665 const struct bpf_map *b = _b;
666
667 return a->offset - b->offset;
668}
669
670static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
671{
672 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
673 type == BPF_MAP_TYPE_HASH_OF_MAPS)
674 return true;
675 return false;
676}
677
678static int bpf_object_search_section_size(const struct bpf_object *obj,
679 const char *name, size_t *d_size)
680{
681 const GElf_Ehdr *ep = &obj->efile.ehdr;
682 Elf *elf = obj->efile.elf;
683 Elf_Scn *scn = NULL;
684 int idx = 0;
685
686 while ((scn = elf_nextscn(elf, scn)) != NULL) {
687 const char *sec_name;
688 Elf_Data *data;
689 GElf_Shdr sh;
690
691 idx++;
692 if (gelf_getshdr(scn, &sh) != &sh) {
693 pr_warning("failed to get section(%d) header from %s\n",
694 idx, obj->path);
695 return -EIO;
696 }
697
698 sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
699 if (!sec_name) {
700 pr_warning("failed to get section(%d) name from %s\n",
701 idx, obj->path);
702 return -EIO;
703 }
704
705 if (strcmp(name, sec_name))
706 continue;
707
708 data = elf_getdata(scn, 0);
709 if (!data) {
710 pr_warning("failed to get section(%d) data from %s(%s)\n",
711 idx, name, obj->path);
712 return -EIO;
713 }
714
715 *d_size = data->d_size;
716 return 0;
717 }
718
719 return -ENOENT;
720}
721
722int bpf_object__section_size(const struct bpf_object *obj, const char *name,
723 __u32 *size)
724{
725 int ret = -ENOENT;
726 size_t d_size;
727
728 *size = 0;
729 if (!name) {
730 return -EINVAL;
731 } else if (!strcmp(name, ".data")) {
732 if (obj->efile.data)
733 *size = obj->efile.data->d_size;
734 } else if (!strcmp(name, ".bss")) {
735 if (obj->efile.bss)
736 *size = obj->efile.bss->d_size;
737 } else if (!strcmp(name, ".rodata")) {
738 if (obj->efile.rodata)
739 *size = obj->efile.rodata->d_size;
740 } else {
741 ret = bpf_object_search_section_size(obj, name, &d_size);
742 if (!ret)
743 *size = d_size;
744 }
745
746 return *size ? 0 : ret;
747}
748
749int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
750 __u32 *off)
751{
752 Elf_Data *symbols = obj->efile.symbols;
753 const char *sname;
754 size_t si;
755
756 if (!name || !off)
757 return -EINVAL;
758
759 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
760 GElf_Sym sym;
761
762 if (!gelf_getsym(symbols, si, &sym))
763 continue;
764 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
765 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
766 continue;
767
768 sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
769 sym.st_name);
770 if (!sname) {
771 pr_warning("failed to get sym name string for var %s\n",
772 name);
773 return -EIO;
774 }
775 if (strcmp(name, sname) == 0) {
776 *off = sym.st_value;
777 return 0;
778 }
779 }
780
781 return -ENOENT;
782}
783
784static bool bpf_object__has_maps(const struct bpf_object *obj)
785{
786 return obj->efile.maps_shndx >= 0 ||
787 obj->efile.data_shndx >= 0 ||
788 obj->efile.rodata_shndx >= 0 ||
789 obj->efile.bss_shndx >= 0;
790}
791
792static int
793bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
794 enum libbpf_map_type type, Elf_Data *data,
795 void **data_buff)
796{
797 struct bpf_map_def *def = &map->def;
798 char map_name[BPF_OBJ_NAME_LEN];
799
800 map->libbpf_type = type;
801 map->offset = ~(typeof(map->offset))0;
802 snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
803 libbpf_type_to_btf_name[type]);
804 map->name = strdup(map_name);
805 if (!map->name) {
806 pr_warning("failed to alloc map name\n");
807 return -ENOMEM;
808 }
809
810 def->type = BPF_MAP_TYPE_ARRAY;
811 def->key_size = sizeof(int);
812 def->value_size = data->d_size;
813 def->max_entries = 1;
814 def->map_flags = type == LIBBPF_MAP_RODATA ?
815 BPF_F_RDONLY_PROG : 0;
816 if (data_buff) {
817 *data_buff = malloc(data->d_size);
818 if (!*data_buff) {
819 zfree(&map->name);
820 pr_warning("failed to alloc map content buffer\n");
821 return -ENOMEM;
822 }
823 memcpy(*data_buff, data->d_buf, data->d_size);
824 }
825
826 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
827 return 0;
828}
829
830static int
831bpf_object__init_maps(struct bpf_object *obj, int flags)
832{
833 int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0;
834 bool strict = !(flags & MAPS_RELAX_COMPAT);
835 Elf_Data *symbols = obj->efile.symbols;
836 Elf_Data *data = NULL;
837 int ret = 0;
838
839 if (!symbols)
840 return -EINVAL;
841 nr_syms = symbols->d_size / sizeof(GElf_Sym);
842
843 if (obj->efile.maps_shndx >= 0) {
844 Elf_Scn *scn = elf_getscn(obj->efile.elf,
845 obj->efile.maps_shndx);
846
847 if (scn)
848 data = elf_getdata(scn, NULL);
849 if (!scn || !data) {
850 pr_warning("failed to get Elf_Data from map section %d\n",
851 obj->efile.maps_shndx);
852 return -EINVAL;
853 }
854 }
855
856
857
858
859
860
861
862
863 if (obj->caps.global_data) {
864 if (obj->efile.data_shndx >= 0)
865 nr_maps_glob++;
866 if (obj->efile.rodata_shndx >= 0)
867 nr_maps_glob++;
868 if (obj->efile.bss_shndx >= 0)
869 nr_maps_glob++;
870 }
871
872 for (i = 0; data && i < nr_syms; i++) {
873 GElf_Sym sym;
874
875 if (!gelf_getsym(symbols, i, &sym))
876 continue;
877 if (sym.st_shndx != obj->efile.maps_shndx)
878 continue;
879 nr_maps++;
880 }
881
882 if (!nr_maps && !nr_maps_glob)
883 return 0;
884
885
886 if (data) {
887 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
888 nr_maps, data->d_size);
889
890 map_def_sz = data->d_size / nr_maps;
891 if (!data->d_size || (data->d_size % nr_maps) != 0) {
892 pr_warning("unable to determine map definition size "
893 "section %s, %d maps in %zd bytes\n",
894 obj->path, nr_maps, data->d_size);
895 return -EINVAL;
896 }
897 }
898
899 nr_maps += nr_maps_glob;
900 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
901 if (!obj->maps) {
902 pr_warning("alloc maps for object failed\n");
903 return -ENOMEM;
904 }
905 obj->nr_maps = nr_maps;
906
907 for (i = 0; i < nr_maps; i++) {
908
909
910
911
912
913 obj->maps[i].fd = -1;
914 obj->maps[i].inner_map_fd = -1;
915 }
916
917
918
919
920 for (i = 0, map_idx = 0; data && i < nr_syms; i++) {
921 GElf_Sym sym;
922 const char *map_name;
923 struct bpf_map_def *def;
924
925 if (!gelf_getsym(symbols, i, &sym))
926 continue;
927 if (sym.st_shndx != obj->efile.maps_shndx)
928 continue;
929
930 map_name = elf_strptr(obj->efile.elf,
931 obj->efile.strtabidx,
932 sym.st_name);
933
934 obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC;
935 obj->maps[map_idx].offset = sym.st_value;
936 if (sym.st_value + map_def_sz > data->d_size) {
937 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
938 obj->path, map_name);
939 return -EINVAL;
940 }
941
942 obj->maps[map_idx].name = strdup(map_name);
943 if (!obj->maps[map_idx].name) {
944 pr_warning("failed to alloc map name\n");
945 return -ENOMEM;
946 }
947 pr_debug("map %d is \"%s\"\n", map_idx,
948 obj->maps[map_idx].name);
949 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
950
951
952
953
954
955
956 if (map_def_sz <= sizeof(struct bpf_map_def)) {
957 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
958 } else {
959
960
961
962
963
964
965 char *b;
966 for (b = ((char *)def) + sizeof(struct bpf_map_def);
967 b < ((char *)def) + map_def_sz; b++) {
968 if (*b != 0) {
969 pr_warning("maps section in %s: \"%s\" "
970 "has unrecognized, non-zero "
971 "options\n",
972 obj->path, map_name);
973 if (strict)
974 return -EINVAL;
975 }
976 }
977 memcpy(&obj->maps[map_idx].def, def,
978 sizeof(struct bpf_map_def));
979 }
980 map_idx++;
981 }
982
983 if (!obj->caps.global_data)
984 goto finalize;
985
986
987
988
989 if (obj->efile.data_shndx >= 0)
990 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
991 LIBBPF_MAP_DATA,
992 obj->efile.data,
993 &obj->sections.data);
994 if (!ret && obj->efile.rodata_shndx >= 0)
995 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
996 LIBBPF_MAP_RODATA,
997 obj->efile.rodata,
998 &obj->sections.rodata);
999 if (!ret && obj->efile.bss_shndx >= 0)
1000 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
1001 LIBBPF_MAP_BSS,
1002 obj->efile.bss, NULL);
1003finalize:
1004 if (!ret)
1005 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
1006 compare_bpf_map);
1007 return ret;
1008}
1009
1010static bool section_have_execinstr(struct bpf_object *obj, int idx)
1011{
1012 Elf_Scn *scn;
1013 GElf_Shdr sh;
1014
1015 scn = elf_getscn(obj->efile.elf, idx);
1016 if (!scn)
1017 return false;
1018
1019 if (gelf_getshdr(scn, &sh) != &sh)
1020 return false;
1021
1022 if (sh.sh_flags & SHF_EXECINSTR)
1023 return true;
1024
1025 return false;
1026}
1027
1028static void bpf_object__sanitize_btf(struct bpf_object *obj)
1029{
1030 bool has_datasec = obj->caps.btf_datasec;
1031 bool has_func = obj->caps.btf_func;
1032 struct btf *btf = obj->btf;
1033 struct btf_type *t;
1034 int i, j, vlen;
1035 __u16 kind;
1036
1037 if (!obj->btf || (has_func && has_datasec))
1038 return;
1039
1040 for (i = 1; i <= btf__get_nr_types(btf); i++) {
1041 t = (struct btf_type *)btf__type_by_id(btf, i);
1042 kind = BTF_INFO_KIND(t->info);
1043
1044 if (!has_datasec && kind == BTF_KIND_VAR) {
1045
1046 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1047 t->size = sizeof(int);
1048 *(int *)(t+1) = BTF_INT_ENC(0, 0, 32);
1049 } else if (!has_datasec && kind == BTF_KIND_DATASEC) {
1050
1051 struct btf_var_secinfo *v = (void *)(t + 1);
1052 struct btf_member *m = (void *)(t + 1);
1053 struct btf_type *vt;
1054 char *name;
1055
1056 name = (char *)btf__name_by_offset(btf, t->name_off);
1057 while (*name) {
1058 if (*name == '.')
1059 *name = '_';
1060 name++;
1061 }
1062
1063 vlen = BTF_INFO_VLEN(t->info);
1064 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
1065 for (j = 0; j < vlen; j++, v++, m++) {
1066
1067 m->offset = v->offset * 8;
1068 m->type = v->type;
1069
1070 vt = (void *)btf__type_by_id(btf, v->type);
1071 m->name_off = vt->name_off;
1072 }
1073 } else if (!has_func && kind == BTF_KIND_FUNC_PROTO) {
1074
1075 vlen = BTF_INFO_VLEN(t->info);
1076 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
1077 t->size = sizeof(__u32);
1078 } else if (!has_func && kind == BTF_KIND_FUNC) {
1079
1080 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
1081 }
1082 }
1083}
1084
1085static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
1086{
1087 if (!obj->btf_ext)
1088 return;
1089
1090 if (!obj->caps.btf_func) {
1091 btf_ext__free(obj->btf_ext);
1092 obj->btf_ext = NULL;
1093 }
1094}
1095
1096static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
1097{
1098 Elf *elf = obj->efile.elf;
1099 GElf_Ehdr *ep = &obj->efile.ehdr;
1100 Elf_Data *btf_ext_data = NULL;
1101 Elf_Data *btf_data = NULL;
1102 Elf_Scn *scn = NULL;
1103 int idx = 0, err = 0;
1104
1105
1106 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
1107 pr_warning("failed to get e_shstrndx from %s\n",
1108 obj->path);
1109 return -LIBBPF_ERRNO__FORMAT;
1110 }
1111
1112 while ((scn = elf_nextscn(elf, scn)) != NULL) {
1113 char *name;
1114 GElf_Shdr sh;
1115 Elf_Data *data;
1116
1117 idx++;
1118 if (gelf_getshdr(scn, &sh) != &sh) {
1119 pr_warning("failed to get section(%d) header from %s\n",
1120 idx, obj->path);
1121 err = -LIBBPF_ERRNO__FORMAT;
1122 goto out;
1123 }
1124
1125 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1126 if (!name) {
1127 pr_warning("failed to get section(%d) name from %s\n",
1128 idx, obj->path);
1129 err = -LIBBPF_ERRNO__FORMAT;
1130 goto out;
1131 }
1132
1133 data = elf_getdata(scn, 0);
1134 if (!data) {
1135 pr_warning("failed to get section(%d) data from %s(%s)\n",
1136 idx, name, obj->path);
1137 err = -LIBBPF_ERRNO__FORMAT;
1138 goto out;
1139 }
1140 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
1141 idx, name, (unsigned long)data->d_size,
1142 (int)sh.sh_link, (unsigned long)sh.sh_flags,
1143 (int)sh.sh_type);
1144
1145 if (strcmp(name, "license") == 0) {
1146 err = bpf_object__init_license(obj,
1147 data->d_buf,
1148 data->d_size);
1149 } else if (strcmp(name, "version") == 0) {
1150 err = bpf_object__init_kversion(obj,
1151 data->d_buf,
1152 data->d_size);
1153 } else if (strcmp(name, "maps") == 0) {
1154 obj->efile.maps_shndx = idx;
1155 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
1156 btf_data = data;
1157 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1158 btf_ext_data = data;
1159 } else if (sh.sh_type == SHT_SYMTAB) {
1160 if (obj->efile.symbols) {
1161 pr_warning("bpf: multiple SYMTAB in %s\n",
1162 obj->path);
1163 err = -LIBBPF_ERRNO__FORMAT;
1164 } else {
1165 obj->efile.symbols = data;
1166 obj->efile.strtabidx = sh.sh_link;
1167 }
1168 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
1169 if (sh.sh_flags & SHF_EXECINSTR) {
1170 if (strcmp(name, ".text") == 0)
1171 obj->efile.text_shndx = idx;
1172 err = bpf_object__add_program(obj, data->d_buf,
1173 data->d_size, name, idx);
1174 if (err) {
1175 char errmsg[STRERR_BUFSIZE];
1176 char *cp = libbpf_strerror_r(-err, errmsg,
1177 sizeof(errmsg));
1178
1179 pr_warning("failed to alloc program %s (%s): %s",
1180 name, obj->path, cp);
1181 }
1182 } else if (strcmp(name, ".data") == 0) {
1183 obj->efile.data = data;
1184 obj->efile.data_shndx = idx;
1185 } else if (strcmp(name, ".rodata") == 0) {
1186 obj->efile.rodata = data;
1187 obj->efile.rodata_shndx = idx;
1188 } else {
1189 pr_debug("skip section(%d) %s\n", idx, name);
1190 }
1191 } else if (sh.sh_type == SHT_REL) {
1192 void *reloc = obj->efile.reloc;
1193 int nr_reloc = obj->efile.nr_reloc + 1;
1194 int sec = sh.sh_info;
1195
1196
1197 if (!section_have_execinstr(obj, sec)) {
1198 pr_debug("skip relo %s(%d) for section(%d)\n",
1199 name, idx, sec);
1200 continue;
1201 }
1202
1203 reloc = reallocarray(reloc, nr_reloc,
1204 sizeof(*obj->efile.reloc));
1205 if (!reloc) {
1206 pr_warning("realloc failed\n");
1207 err = -ENOMEM;
1208 } else {
1209 int n = nr_reloc - 1;
1210
1211 obj->efile.reloc = reloc;
1212 obj->efile.nr_reloc = nr_reloc;
1213
1214 obj->efile.reloc[n].shdr = sh;
1215 obj->efile.reloc[n].data = data;
1216 }
1217 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
1218 obj->efile.bss = data;
1219 obj->efile.bss_shndx = idx;
1220 } else {
1221 pr_debug("skip section(%d) %s\n", idx, name);
1222 }
1223 if (err)
1224 goto out;
1225 }
1226
1227 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
1228 pr_warning("Corrupted ELF file: index of strtab invalid\n");
1229 return LIBBPF_ERRNO__FORMAT;
1230 }
1231 if (btf_data) {
1232 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
1233 if (IS_ERR(obj->btf)) {
1234 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
1235 BTF_ELF_SEC, PTR_ERR(obj->btf));
1236 obj->btf = NULL;
1237 } else {
1238 err = btf__finalize_data(obj, obj->btf);
1239 if (!err) {
1240 bpf_object__sanitize_btf(obj);
1241 err = btf__load(obj->btf);
1242 }
1243 if (err) {
1244 pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n",
1245 BTF_ELF_SEC, err);
1246 btf__free(obj->btf);
1247 obj->btf = NULL;
1248 err = 0;
1249 }
1250 }
1251 }
1252 if (btf_ext_data) {
1253 if (!obj->btf) {
1254 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
1255 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
1256 } else {
1257 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
1258 btf_ext_data->d_size);
1259 if (IS_ERR(obj->btf_ext)) {
1260 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
1261 BTF_EXT_ELF_SEC,
1262 PTR_ERR(obj->btf_ext));
1263 obj->btf_ext = NULL;
1264 } else {
1265 bpf_object__sanitize_btf_ext(obj);
1266 }
1267 }
1268 }
1269 if (bpf_object__has_maps(obj)) {
1270 err = bpf_object__init_maps(obj, flags);
1271 if (err)
1272 goto out;
1273 }
1274 err = bpf_object__init_prog_names(obj);
1275out:
1276 return err;
1277}
1278
1279static struct bpf_program *
1280bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
1281{
1282 struct bpf_program *prog;
1283 size_t i;
1284
1285 for (i = 0; i < obj->nr_programs; i++) {
1286 prog = &obj->programs[i];
1287 if (prog->idx == idx)
1288 return prog;
1289 }
1290 return NULL;
1291}
1292
1293struct bpf_program *
1294bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
1295{
1296 struct bpf_program *pos;
1297
1298 bpf_object__for_each_program(pos, obj) {
1299 if (pos->section_name && !strcmp(pos->section_name, title))
1300 return pos;
1301 }
1302 return NULL;
1303}
1304
1305static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
1306 int shndx)
1307{
1308 return shndx == obj->efile.data_shndx ||
1309 shndx == obj->efile.bss_shndx ||
1310 shndx == obj->efile.rodata_shndx;
1311}
1312
1313static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
1314 int shndx)
1315{
1316 return shndx == obj->efile.maps_shndx;
1317}
1318
1319static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
1320 int shndx)
1321{
1322 return shndx == obj->efile.text_shndx ||
1323 bpf_object__shndx_is_maps(obj, shndx) ||
1324 bpf_object__shndx_is_data(obj, shndx);
1325}
1326
1327static enum libbpf_map_type
1328bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
1329{
1330 if (shndx == obj->efile.data_shndx)
1331 return LIBBPF_MAP_DATA;
1332 else if (shndx == obj->efile.bss_shndx)
1333 return LIBBPF_MAP_BSS;
1334 else if (shndx == obj->efile.rodata_shndx)
1335 return LIBBPF_MAP_RODATA;
1336 else
1337 return LIBBPF_MAP_UNSPEC;
1338}
1339
1340static int
1341bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1342 Elf_Data *data, struct bpf_object *obj)
1343{
1344 Elf_Data *symbols = obj->efile.symbols;
1345 struct bpf_map *maps = obj->maps;
1346 size_t nr_maps = obj->nr_maps;
1347 int i, nrels;
1348
1349 pr_debug("collecting relocating info for: '%s'\n",
1350 prog->section_name);
1351 nrels = shdr->sh_size / shdr->sh_entsize;
1352
1353 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
1354 if (!prog->reloc_desc) {
1355 pr_warning("failed to alloc memory in relocation\n");
1356 return -ENOMEM;
1357 }
1358 prog->nr_reloc = nrels;
1359
1360 for (i = 0; i < nrels; i++) {
1361 GElf_Sym sym;
1362 GElf_Rel rel;
1363 unsigned int insn_idx;
1364 unsigned int shdr_idx;
1365 struct bpf_insn *insns = prog->insns;
1366 enum libbpf_map_type type;
1367 const char *name;
1368 size_t map_idx;
1369
1370 if (!gelf_getrel(data, i, &rel)) {
1371 pr_warning("relocation: failed to get %d reloc\n", i);
1372 return -LIBBPF_ERRNO__FORMAT;
1373 }
1374
1375 if (!gelf_getsym(symbols,
1376 GELF_R_SYM(rel.r_info),
1377 &sym)) {
1378 pr_warning("relocation: symbol %"PRIx64" not found\n",
1379 GELF_R_SYM(rel.r_info));
1380 return -LIBBPF_ERRNO__FORMAT;
1381 }
1382
1383 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1384 sym.st_name) ? : "<?>";
1385
1386 pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
1387 (long long) (rel.r_info >> 32),
1388 (long long) sym.st_value, sym.st_name, name);
1389
1390 shdr_idx = sym.st_shndx;
1391 if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
1392 pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
1393 prog->section_name, shdr_idx);
1394 return -LIBBPF_ERRNO__RELOC;
1395 }
1396
1397 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1398 pr_debug("relocation: insn_idx=%u\n", insn_idx);
1399
1400 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1401 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1402 pr_warning("incorrect bpf_call opcode\n");
1403 return -LIBBPF_ERRNO__RELOC;
1404 }
1405 prog->reloc_desc[i].type = RELO_CALL;
1406 prog->reloc_desc[i].insn_idx = insn_idx;
1407 prog->reloc_desc[i].text_off = sym.st_value;
1408 obj->has_pseudo_calls = true;
1409 continue;
1410 }
1411
1412 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1413 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1414 insn_idx, insns[insn_idx].code);
1415 return -LIBBPF_ERRNO__RELOC;
1416 }
1417
1418 if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
1419 bpf_object__shndx_is_data(obj, shdr_idx)) {
1420 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
1421 if (type != LIBBPF_MAP_UNSPEC) {
1422 if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
1423 pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
1424 name, insn_idx, insns[insn_idx].code);
1425 return -LIBBPF_ERRNO__RELOC;
1426 }
1427 if (!obj->caps.global_data) {
1428 pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
1429 name, insn_idx);
1430 return -LIBBPF_ERRNO__RELOC;
1431 }
1432 }
1433
1434 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1435 if (maps[map_idx].libbpf_type != type)
1436 continue;
1437 if (type != LIBBPF_MAP_UNSPEC ||
1438 (type == LIBBPF_MAP_UNSPEC &&
1439 maps[map_idx].offset == sym.st_value)) {
1440 pr_debug("relocation: find map %zd (%s) for insn %u\n",
1441 map_idx, maps[map_idx].name, insn_idx);
1442 break;
1443 }
1444 }
1445
1446 if (map_idx >= nr_maps) {
1447 pr_warning("bpf relocation: map_idx %d large than %d\n",
1448 (int)map_idx, (int)nr_maps - 1);
1449 return -LIBBPF_ERRNO__RELOC;
1450 }
1451
1452 prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
1453 RELO_DATA : RELO_LD64;
1454 prog->reloc_desc[i].insn_idx = insn_idx;
1455 prog->reloc_desc[i].map_idx = map_idx;
1456 }
1457 }
1458 return 0;
1459}
1460
1461static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1462{
1463 struct bpf_map_def *def = &map->def;
1464 __u32 key_type_id = 0, value_type_id = 0;
1465 int ret;
1466
1467 if (!bpf_map__is_internal(map)) {
1468 ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
1469 def->value_size, &key_type_id,
1470 &value_type_id);
1471 } else {
1472
1473
1474
1475
1476 ret = btf__find_by_name(btf,
1477 libbpf_type_to_btf_name[map->libbpf_type]);
1478 }
1479 if (ret < 0)
1480 return ret;
1481
1482 map->btf_key_type_id = key_type_id;
1483 map->btf_value_type_id = bpf_map__is_internal(map) ?
1484 ret : value_type_id;
1485 return 0;
1486}
1487
1488int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1489{
1490 struct bpf_map_info info = {};
1491 __u32 len = sizeof(info);
1492 int new_fd, err;
1493 char *new_name;
1494
1495 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1496 if (err)
1497 return err;
1498
1499 new_name = strdup(info.name);
1500 if (!new_name)
1501 return -errno;
1502
1503 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1504 if (new_fd < 0)
1505 goto err_free_new_name;
1506
1507 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1508 if (new_fd < 0)
1509 goto err_close_new_fd;
1510
1511 err = zclose(map->fd);
1512 if (err)
1513 goto err_close_new_fd;
1514 free(map->name);
1515
1516 map->fd = new_fd;
1517 map->name = new_name;
1518 map->def.type = info.type;
1519 map->def.key_size = info.key_size;
1520 map->def.value_size = info.value_size;
1521 map->def.max_entries = info.max_entries;
1522 map->def.map_flags = info.map_flags;
1523 map->btf_key_type_id = info.btf_key_type_id;
1524 map->btf_value_type_id = info.btf_value_type_id;
1525
1526 return 0;
1527
1528err_close_new_fd:
1529 close(new_fd);
1530err_free_new_name:
1531 free(new_name);
1532 return -errno;
1533}
1534
1535int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
1536{
1537 if (!map || !max_entries)
1538 return -EINVAL;
1539
1540
1541 if (map->fd >= 0)
1542 return -EBUSY;
1543
1544 map->def.max_entries = max_entries;
1545
1546 return 0;
1547}
1548
1549static int
1550bpf_object__probe_name(struct bpf_object *obj)
1551{
1552 struct bpf_load_program_attr attr;
1553 char *cp, errmsg[STRERR_BUFSIZE];
1554 struct bpf_insn insns[] = {
1555 BPF_MOV64_IMM(BPF_REG_0, 0),
1556 BPF_EXIT_INSN(),
1557 };
1558 int ret;
1559
1560
1561
1562 memset(&attr, 0, sizeof(attr));
1563 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1564 attr.insns = insns;
1565 attr.insns_cnt = ARRAY_SIZE(insns);
1566 attr.license = "GPL";
1567
1568 ret = bpf_load_program_xattr(&attr, NULL, 0);
1569 if (ret < 0) {
1570 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1571 pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1572 __func__, cp, errno);
1573 return -errno;
1574 }
1575 close(ret);
1576
1577
1578
1579 attr.name = "test";
1580 ret = bpf_load_program_xattr(&attr, NULL, 0);
1581 if (ret >= 0) {
1582 obj->caps.name = 1;
1583 close(ret);
1584 }
1585
1586 return 0;
1587}
1588
1589static int
1590bpf_object__probe_global_data(struct bpf_object *obj)
1591{
1592 struct bpf_load_program_attr prg_attr;
1593 struct bpf_create_map_attr map_attr;
1594 char *cp, errmsg[STRERR_BUFSIZE];
1595 struct bpf_insn insns[] = {
1596 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
1597 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1598 BPF_MOV64_IMM(BPF_REG_0, 0),
1599 BPF_EXIT_INSN(),
1600 };
1601 int ret, map;
1602
1603 memset(&map_attr, 0, sizeof(map_attr));
1604 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
1605 map_attr.key_size = sizeof(int);
1606 map_attr.value_size = 32;
1607 map_attr.max_entries = 1;
1608
1609 map = bpf_create_map_xattr(&map_attr);
1610 if (map < 0) {
1611 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1612 pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n",
1613 __func__, cp, errno);
1614 return -errno;
1615 }
1616
1617 insns[0].imm = map;
1618
1619 memset(&prg_attr, 0, sizeof(prg_attr));
1620 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1621 prg_attr.insns = insns;
1622 prg_attr.insns_cnt = ARRAY_SIZE(insns);
1623 prg_attr.license = "GPL";
1624
1625 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
1626 if (ret >= 0) {
1627 obj->caps.global_data = 1;
1628 close(ret);
1629 }
1630
1631 close(map);
1632 return 0;
1633}
1634
1635static int bpf_object__probe_btf_func(struct bpf_object *obj)
1636{
1637 const char strs[] = "\0int\0x\0a";
1638
1639 __u32 types[] = {
1640
1641 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
1642
1643 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
1644 BTF_PARAM_ENC(7, 1),
1645
1646 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
1647 };
1648 int btf_fd;
1649
1650 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
1651 strs, sizeof(strs));
1652 if (btf_fd >= 0) {
1653 obj->caps.btf_func = 1;
1654 close(btf_fd);
1655 return 1;
1656 }
1657
1658 return 0;
1659}
1660
1661static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
1662{
1663 const char strs[] = "\0x\0.data";
1664
1665 __u32 types[] = {
1666
1667 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
1668
1669 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
1670 BTF_VAR_STATIC,
1671
1672 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
1673 BTF_VAR_SECINFO_ENC(2, 0, 4),
1674 };
1675 int btf_fd;
1676
1677 btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
1678 strs, sizeof(strs));
1679 if (btf_fd >= 0) {
1680 obj->caps.btf_datasec = 1;
1681 close(btf_fd);
1682 return 1;
1683 }
1684
1685 return 0;
1686}
1687
1688static int
1689bpf_object__probe_caps(struct bpf_object *obj)
1690{
1691 int (*probe_fn[])(struct bpf_object *obj) = {
1692 bpf_object__probe_name,
1693 bpf_object__probe_global_data,
1694 bpf_object__probe_btf_func,
1695 bpf_object__probe_btf_datasec,
1696 };
1697 int i, ret;
1698
1699 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
1700 ret = probe_fn[i](obj);
1701 if (ret < 0)
1702 pr_debug("Probe #%d failed with %d.\n", i, ret);
1703 }
1704
1705 return 0;
1706}
1707
1708static int
1709bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
1710{
1711 char *cp, errmsg[STRERR_BUFSIZE];
1712 int err, zero = 0;
1713 __u8 *data;
1714
1715
1716 if (map->libbpf_type == LIBBPF_MAP_BSS)
1717 return 0;
1718
1719 data = map->libbpf_type == LIBBPF_MAP_DATA ?
1720 obj->sections.data : obj->sections.rodata;
1721
1722 err = bpf_map_update_elem(map->fd, &zero, data, 0);
1723
1724 if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
1725 err = bpf_map_freeze(map->fd);
1726 if (err) {
1727 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1728 pr_warning("Error freezing map(%s) as read-only: %s\n",
1729 map->name, cp);
1730 err = 0;
1731 }
1732 }
1733 return err;
1734}
1735
1736static int
1737bpf_object__create_maps(struct bpf_object *obj)
1738{
1739 struct bpf_create_map_attr create_attr = {};
1740 unsigned int i;
1741 int err;
1742
1743 for (i = 0; i < obj->nr_maps; i++) {
1744 struct bpf_map *map = &obj->maps[i];
1745 struct bpf_map_def *def = &map->def;
1746 char *cp, errmsg[STRERR_BUFSIZE];
1747 int *pfd = &map->fd;
1748
1749 if (map->fd >= 0) {
1750 pr_debug("skip map create (preset) %s: fd=%d\n",
1751 map->name, map->fd);
1752 continue;
1753 }
1754
1755 if (obj->caps.name)
1756 create_attr.name = map->name;
1757 create_attr.map_ifindex = map->map_ifindex;
1758 create_attr.map_type = def->type;
1759 create_attr.map_flags = def->map_flags;
1760 create_attr.key_size = def->key_size;
1761 create_attr.value_size = def->value_size;
1762 create_attr.max_entries = def->max_entries;
1763 create_attr.btf_fd = 0;
1764 create_attr.btf_key_type_id = 0;
1765 create_attr.btf_value_type_id = 0;
1766 if (bpf_map_type__is_map_in_map(def->type) &&
1767 map->inner_map_fd >= 0)
1768 create_attr.inner_map_fd = map->inner_map_fd;
1769
1770 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1771 create_attr.btf_fd = btf__fd(obj->btf);
1772 create_attr.btf_key_type_id = map->btf_key_type_id;
1773 create_attr.btf_value_type_id = map->btf_value_type_id;
1774 }
1775
1776 *pfd = bpf_create_map_xattr(&create_attr);
1777 if (*pfd < 0 && create_attr.btf_key_type_id) {
1778 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1779 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1780 map->name, cp, errno);
1781 create_attr.btf_fd = 0;
1782 create_attr.btf_key_type_id = 0;
1783 create_attr.btf_value_type_id = 0;
1784 map->btf_key_type_id = 0;
1785 map->btf_value_type_id = 0;
1786 *pfd = bpf_create_map_xattr(&create_attr);
1787 }
1788
1789 if (*pfd < 0) {
1790 size_t j;
1791
1792 err = *pfd;
1793err_out:
1794 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1795 pr_warning("failed to create map (name: '%s'): %s\n",
1796 map->name, cp);
1797 for (j = 0; j < i; j++)
1798 zclose(obj->maps[j].fd);
1799 return err;
1800 }
1801
1802 if (bpf_map__is_internal(map)) {
1803 err = bpf_object__populate_internal_map(obj, map);
1804 if (err < 0) {
1805 zclose(*pfd);
1806 goto err_out;
1807 }
1808 }
1809
1810 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1811 }
1812
1813 return 0;
1814}
1815
1816static int
1817check_btf_ext_reloc_err(struct bpf_program *prog, int err,
1818 void *btf_prog_info, const char *info_name)
1819{
1820 if (err != -ENOENT) {
1821 pr_warning("Error in loading %s for sec %s.\n",
1822 info_name, prog->section_name);
1823 return err;
1824 }
1825
1826
1827
1828 if (btf_prog_info) {
1829
1830
1831
1832
1833
1834 pr_warning("Error in relocating %s for sec %s.\n",
1835 info_name, prog->section_name);
1836 return err;
1837 }
1838
1839
1840
1841
1842
1843 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
1844 info_name, prog->section_name, info_name);
1845 return 0;
1846}
1847
1848static int
1849bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
1850 const char *section_name, __u32 insn_offset)
1851{
1852 int err;
1853
1854 if (!insn_offset || prog->func_info) {
1855
1856
1857
1858
1859
1860
1861 err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
1862 section_name, insn_offset,
1863 &prog->func_info,
1864 &prog->func_info_cnt);
1865 if (err)
1866 return check_btf_ext_reloc_err(prog, err,
1867 prog->func_info,
1868 "bpf_func_info");
1869
1870 prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
1871 }
1872
1873 if (!insn_offset || prog->line_info) {
1874 err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
1875 section_name, insn_offset,
1876 &prog->line_info,
1877 &prog->line_info_cnt);
1878 if (err)
1879 return check_btf_ext_reloc_err(prog, err,
1880 prog->line_info,
1881 "bpf_line_info");
1882
1883 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
1884 }
1885
1886 if (!insn_offset)
1887 prog->btf_fd = btf__fd(obj->btf);
1888
1889 return 0;
1890}
1891
1892static int
1893bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1894 struct reloc_desc *relo)
1895{
1896 struct bpf_insn *insn, *new_insn;
1897 struct bpf_program *text;
1898 size_t new_cnt;
1899 int err;
1900
1901 if (relo->type != RELO_CALL)
1902 return -LIBBPF_ERRNO__RELOC;
1903
1904 if (prog->idx == obj->efile.text_shndx) {
1905 pr_warning("relo in .text insn %d into off %d\n",
1906 relo->insn_idx, relo->text_off);
1907 return -LIBBPF_ERRNO__RELOC;
1908 }
1909
1910 if (prog->main_prog_cnt == 0) {
1911 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1912 if (!text) {
1913 pr_warning("no .text section found yet relo into text exist\n");
1914 return -LIBBPF_ERRNO__RELOC;
1915 }
1916 new_cnt = prog->insns_cnt + text->insns_cnt;
1917 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1918 if (!new_insn) {
1919 pr_warning("oom in prog realloc\n");
1920 return -ENOMEM;
1921 }
1922
1923 if (obj->btf_ext) {
1924 err = bpf_program_reloc_btf_ext(prog, obj,
1925 text->section_name,
1926 prog->insns_cnt);
1927 if (err)
1928 return err;
1929 }
1930
1931 memcpy(new_insn + prog->insns_cnt, text->insns,
1932 text->insns_cnt * sizeof(*insn));
1933 prog->insns = new_insn;
1934 prog->main_prog_cnt = prog->insns_cnt;
1935 prog->insns_cnt = new_cnt;
1936 pr_debug("added %zd insn from %s to prog %s\n",
1937 text->insns_cnt, text->section_name,
1938 prog->section_name);
1939 }
1940 insn = &prog->insns[relo->insn_idx];
1941 insn->imm += prog->main_prog_cnt - relo->insn_idx;
1942 return 0;
1943}
1944
1945static int
1946bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1947{
1948 int i, err;
1949
1950 if (!prog)
1951 return 0;
1952
1953 if (obj->btf_ext) {
1954 err = bpf_program_reloc_btf_ext(prog, obj,
1955 prog->section_name, 0);
1956 if (err)
1957 return err;
1958 }
1959
1960 if (!prog->reloc_desc)
1961 return 0;
1962
1963 for (i = 0; i < prog->nr_reloc; i++) {
1964 if (prog->reloc_desc[i].type == RELO_LD64 ||
1965 prog->reloc_desc[i].type == RELO_DATA) {
1966 bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
1967 struct bpf_insn *insns = prog->insns;
1968 int insn_idx, map_idx;
1969
1970 insn_idx = prog->reloc_desc[i].insn_idx;
1971 map_idx = prog->reloc_desc[i].map_idx;
1972
1973 if (insn_idx + 1 >= (int)prog->insns_cnt) {
1974 pr_warning("relocation out of range: '%s'\n",
1975 prog->section_name);
1976 return -LIBBPF_ERRNO__RELOC;
1977 }
1978
1979 if (!relo_data) {
1980 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1981 } else {
1982 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
1983 insns[insn_idx + 1].imm = insns[insn_idx].imm;
1984 }
1985 insns[insn_idx].imm = obj->maps[map_idx].fd;
1986 } else if (prog->reloc_desc[i].type == RELO_CALL) {
1987 err = bpf_program__reloc_text(prog, obj,
1988 &prog->reloc_desc[i]);
1989 if (err)
1990 return err;
1991 }
1992 }
1993
1994 zfree(&prog->reloc_desc);
1995 prog->nr_reloc = 0;
1996 return 0;
1997}
1998
1999
2000static int
2001bpf_object__relocate(struct bpf_object *obj)
2002{
2003 struct bpf_program *prog;
2004 size_t i;
2005 int err;
2006
2007 for (i = 0; i < obj->nr_programs; i++) {
2008 prog = &obj->programs[i];
2009
2010 err = bpf_program__relocate(prog, obj);
2011 if (err) {
2012 pr_warning("failed to relocate '%s'\n",
2013 prog->section_name);
2014 return err;
2015 }
2016 }
2017 return 0;
2018}
2019
2020static int bpf_object__collect_reloc(struct bpf_object *obj)
2021{
2022 int i, err;
2023
2024 if (!obj_elf_valid(obj)) {
2025 pr_warning("Internal error: elf object is closed\n");
2026 return -LIBBPF_ERRNO__INTERNAL;
2027 }
2028
2029 for (i = 0; i < obj->efile.nr_reloc; i++) {
2030 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
2031 Elf_Data *data = obj->efile.reloc[i].data;
2032 int idx = shdr->sh_info;
2033 struct bpf_program *prog;
2034
2035 if (shdr->sh_type != SHT_REL) {
2036 pr_warning("internal error at %d\n", __LINE__);
2037 return -LIBBPF_ERRNO__INTERNAL;
2038 }
2039
2040 prog = bpf_object__find_prog_by_idx(obj, idx);
2041 if (!prog) {
2042 pr_warning("relocation failed: no section(%d)\n", idx);
2043 return -LIBBPF_ERRNO__RELOC;
2044 }
2045
2046 err = bpf_program__collect_reloc(prog,
2047 shdr, data,
2048 obj);
2049 if (err)
2050 return err;
2051 }
2052 return 0;
2053}
2054
2055static int
2056load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2057 char *license, __u32 kern_version, int *pfd)
2058{
2059 struct bpf_load_program_attr load_attr;
2060 char *cp, errmsg[STRERR_BUFSIZE];
2061 int log_buf_size = BPF_LOG_BUF_SIZE;
2062 char *log_buf;
2063 int ret;
2064
2065 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
2066 load_attr.prog_type = prog->type;
2067 load_attr.expected_attach_type = prog->expected_attach_type;
2068 if (prog->caps->name)
2069 load_attr.name = prog->name;
2070 load_attr.insns = insns;
2071 load_attr.insns_cnt = insns_cnt;
2072 load_attr.license = license;
2073 load_attr.kern_version = kern_version;
2074 load_attr.prog_ifindex = prog->prog_ifindex;
2075 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
2076 load_attr.func_info = prog->func_info;
2077 load_attr.func_info_rec_size = prog->func_info_rec_size;
2078 load_attr.func_info_cnt = prog->func_info_cnt;
2079 load_attr.line_info = prog->line_info;
2080 load_attr.line_info_rec_size = prog->line_info_rec_size;
2081 load_attr.line_info_cnt = prog->line_info_cnt;
2082 load_attr.log_level = prog->log_level;
2083 if (!load_attr.insns || !load_attr.insns_cnt)
2084 return -EINVAL;
2085
2086retry_load:
2087 log_buf = malloc(log_buf_size);
2088 if (!log_buf)
2089 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
2090
2091 ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
2092
2093 if (ret >= 0) {
2094 if (load_attr.log_level)
2095 pr_debug("verifier log:\n%s", log_buf);
2096 *pfd = ret;
2097 ret = 0;
2098 goto out;
2099 }
2100
2101 if (errno == ENOSPC) {
2102 log_buf_size <<= 1;
2103 free(log_buf);
2104 goto retry_load;
2105 }
2106 ret = -LIBBPF_ERRNO__LOAD;
2107 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2108 pr_warning("load bpf program failed: %s\n", cp);
2109
2110 if (log_buf && log_buf[0] != '\0') {
2111 ret = -LIBBPF_ERRNO__VERIFY;
2112 pr_warning("-- BEGIN DUMP LOG ---\n");
2113 pr_warning("\n%s\n", log_buf);
2114 pr_warning("-- END LOG --\n");
2115 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
2116 pr_warning("Program too large (%zu insns), at most %d insns\n",
2117 load_attr.insns_cnt, BPF_MAXINSNS);
2118 ret = -LIBBPF_ERRNO__PROG2BIG;
2119 } else {
2120
2121 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
2122 int fd;
2123
2124 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
2125 load_attr.expected_attach_type = 0;
2126 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
2127 if (fd >= 0) {
2128 close(fd);
2129 ret = -LIBBPF_ERRNO__PROGTYPE;
2130 goto out;
2131 }
2132 }
2133
2134 if (log_buf)
2135 ret = -LIBBPF_ERRNO__KVER;
2136 }
2137
2138out:
2139 free(log_buf);
2140 return ret;
2141}
2142
2143int
2144bpf_program__load(struct bpf_program *prog,
2145 char *license, __u32 kern_version)
2146{
2147 int err = 0, fd, i;
2148
2149 if (prog->instances.nr < 0 || !prog->instances.fds) {
2150 if (prog->preprocessor) {
2151 pr_warning("Internal error: can't load program '%s'\n",
2152 prog->section_name);
2153 return -LIBBPF_ERRNO__INTERNAL;
2154 }
2155
2156 prog->instances.fds = malloc(sizeof(int));
2157 if (!prog->instances.fds) {
2158 pr_warning("Not enough memory for BPF fds\n");
2159 return -ENOMEM;
2160 }
2161 prog->instances.nr = 1;
2162 prog->instances.fds[0] = -1;
2163 }
2164
2165 if (!prog->preprocessor) {
2166 if (prog->instances.nr != 1) {
2167 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
2168 prog->section_name, prog->instances.nr);
2169 }
2170 err = load_program(prog, prog->insns, prog->insns_cnt,
2171 license, kern_version, &fd);
2172 if (!err)
2173 prog->instances.fds[0] = fd;
2174 goto out;
2175 }
2176
2177 for (i = 0; i < prog->instances.nr; i++) {
2178 struct bpf_prog_prep_result result;
2179 bpf_program_prep_t preprocessor = prog->preprocessor;
2180
2181 memset(&result, 0, sizeof(result));
2182 err = preprocessor(prog, i, prog->insns,
2183 prog->insns_cnt, &result);
2184 if (err) {
2185 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
2186 i, prog->section_name);
2187 goto out;
2188 }
2189
2190 if (!result.new_insn_ptr || !result.new_insn_cnt) {
2191 pr_debug("Skip loading the %dth instance of program '%s'\n",
2192 i, prog->section_name);
2193 prog->instances.fds[i] = -1;
2194 if (result.pfd)
2195 *result.pfd = -1;
2196 continue;
2197 }
2198
2199 err = load_program(prog, result.new_insn_ptr,
2200 result.new_insn_cnt,
2201 license, kern_version, &fd);
2202
2203 if (err) {
2204 pr_warning("Loading the %dth instance of program '%s' failed\n",
2205 i, prog->section_name);
2206 goto out;
2207 }
2208
2209 if (result.pfd)
2210 *result.pfd = fd;
2211 prog->instances.fds[i] = fd;
2212 }
2213out:
2214 if (err)
2215 pr_warning("failed to load program '%s'\n",
2216 prog->section_name);
2217 zfree(&prog->insns);
2218 prog->insns_cnt = 0;
2219 return err;
2220}
2221
2222static bool bpf_program__is_function_storage(struct bpf_program *prog,
2223 struct bpf_object *obj)
2224{
2225 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
2226}
2227
2228static int
2229bpf_object__load_progs(struct bpf_object *obj)
2230{
2231 size_t i;
2232 int err;
2233
2234 for (i = 0; i < obj->nr_programs; i++) {
2235 if (bpf_program__is_function_storage(&obj->programs[i], obj))
2236 continue;
2237 err = bpf_program__load(&obj->programs[i],
2238 obj->license,
2239 obj->kern_version);
2240 if (err)
2241 return err;
2242 }
2243 return 0;
2244}
2245
2246static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
2247{
2248 switch (type) {
2249 case BPF_PROG_TYPE_SOCKET_FILTER:
2250 case BPF_PROG_TYPE_SCHED_CLS:
2251 case BPF_PROG_TYPE_SCHED_ACT:
2252 case BPF_PROG_TYPE_XDP:
2253 case BPF_PROG_TYPE_CGROUP_SKB:
2254 case BPF_PROG_TYPE_CGROUP_SOCK:
2255 case BPF_PROG_TYPE_LWT_IN:
2256 case BPF_PROG_TYPE_LWT_OUT:
2257 case BPF_PROG_TYPE_LWT_XMIT:
2258 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2259 case BPF_PROG_TYPE_SOCK_OPS:
2260 case BPF_PROG_TYPE_SK_SKB:
2261 case BPF_PROG_TYPE_CGROUP_DEVICE:
2262 case BPF_PROG_TYPE_SK_MSG:
2263 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2264 case BPF_PROG_TYPE_LIRC_MODE2:
2265 case BPF_PROG_TYPE_SK_REUSEPORT:
2266 case BPF_PROG_TYPE_FLOW_DISSECTOR:
2267 case BPF_PROG_TYPE_UNSPEC:
2268 case BPF_PROG_TYPE_TRACEPOINT:
2269 case BPF_PROG_TYPE_RAW_TRACEPOINT:
2270 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2271 case BPF_PROG_TYPE_PERF_EVENT:
2272 case BPF_PROG_TYPE_CGROUP_SYSCTL:
2273 return false;
2274 case BPF_PROG_TYPE_KPROBE:
2275 default:
2276 return true;
2277 }
2278}
2279
2280static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
2281{
2282 if (needs_kver && obj->kern_version == 0) {
2283 pr_warning("%s doesn't provide kernel version\n",
2284 obj->path);
2285 return -LIBBPF_ERRNO__KVERSION;
2286 }
2287 return 0;
2288}
2289
2290static struct bpf_object *
2291__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
2292 bool needs_kver, int flags)
2293{
2294 struct bpf_object *obj;
2295 int err;
2296
2297 if (elf_version(EV_CURRENT) == EV_NONE) {
2298 pr_warning("failed to init libelf for %s\n", path);
2299 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
2300 }
2301
2302 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
2303 if (IS_ERR(obj))
2304 return obj;
2305
2306 CHECK_ERR(bpf_object__elf_init(obj), err, out);
2307 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
2308 CHECK_ERR(bpf_object__probe_caps(obj), err, out);
2309 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
2310 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
2311 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
2312
2313 bpf_object__elf_finish(obj);
2314 return obj;
2315out:
2316 bpf_object__close(obj);
2317 return ERR_PTR(err);
2318}
2319
2320struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
2321 int flags)
2322{
2323
2324 if (!attr->file)
2325 return NULL;
2326
2327 pr_debug("loading %s\n", attr->file);
2328
2329 return __bpf_object__open(attr->file, NULL, 0,
2330 bpf_prog_type__needs_kver(attr->prog_type),
2331 flags);
2332}
2333
2334struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
2335{
2336 return __bpf_object__open_xattr(attr, 0);
2337}
2338
2339struct bpf_object *bpf_object__open(const char *path)
2340{
2341 struct bpf_object_open_attr attr = {
2342 .file = path,
2343 .prog_type = BPF_PROG_TYPE_UNSPEC,
2344 };
2345
2346 return bpf_object__open_xattr(&attr);
2347}
2348
2349struct bpf_object *bpf_object__open_buffer(void *obj_buf,
2350 size_t obj_buf_sz,
2351 const char *name)
2352{
2353 char tmp_name[64];
2354
2355
2356 if (!obj_buf || obj_buf_sz <= 0)
2357 return NULL;
2358
2359 if (!name) {
2360 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
2361 (unsigned long)obj_buf,
2362 (unsigned long)obj_buf_sz);
2363 tmp_name[sizeof(tmp_name) - 1] = '\0';
2364 name = tmp_name;
2365 }
2366 pr_debug("loading object '%s' from buffer\n",
2367 name);
2368
2369 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
2370}
2371
2372int bpf_object__unload(struct bpf_object *obj)
2373{
2374 size_t i;
2375
2376 if (!obj)
2377 return -EINVAL;
2378
2379 for (i = 0; i < obj->nr_maps; i++)
2380 zclose(obj->maps[i].fd);
2381
2382 for (i = 0; i < obj->nr_programs; i++)
2383 bpf_program__unload(&obj->programs[i]);
2384
2385 return 0;
2386}
2387
2388int bpf_object__load(struct bpf_object *obj)
2389{
2390 int err;
2391
2392 if (!obj)
2393 return -EINVAL;
2394
2395 if (obj->loaded) {
2396 pr_warning("object should not be loaded twice\n");
2397 return -EINVAL;
2398 }
2399
2400 obj->loaded = true;
2401
2402 CHECK_ERR(bpf_object__create_maps(obj), err, out);
2403 CHECK_ERR(bpf_object__relocate(obj), err, out);
2404 CHECK_ERR(bpf_object__load_progs(obj), err, out);
2405
2406 return 0;
2407out:
2408 bpf_object__unload(obj);
2409 pr_warning("failed to load object '%s'\n", obj->path);
2410 return err;
2411}
2412
2413static int check_path(const char *path)
2414{
2415 char *cp, errmsg[STRERR_BUFSIZE];
2416 struct statfs st_fs;
2417 char *dname, *dir;
2418 int err = 0;
2419
2420 if (path == NULL)
2421 return -EINVAL;
2422
2423 dname = strdup(path);
2424 if (dname == NULL)
2425 return -ENOMEM;
2426
2427 dir = dirname(dname);
2428 if (statfs(dir, &st_fs)) {
2429 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2430 pr_warning("failed to statfs %s: %s\n", dir, cp);
2431 err = -errno;
2432 }
2433 free(dname);
2434
2435 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
2436 pr_warning("specified path %s is not on BPF FS\n", path);
2437 err = -EINVAL;
2438 }
2439
2440 return err;
2441}
2442
2443int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
2444 int instance)
2445{
2446 char *cp, errmsg[STRERR_BUFSIZE];
2447 int err;
2448
2449 err = check_path(path);
2450 if (err)
2451 return err;
2452
2453 if (prog == NULL) {
2454 pr_warning("invalid program pointer\n");
2455 return -EINVAL;
2456 }
2457
2458 if (instance < 0 || instance >= prog->instances.nr) {
2459 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
2460 instance, prog->section_name, prog->instances.nr);
2461 return -EINVAL;
2462 }
2463
2464 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
2465 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2466 pr_warning("failed to pin program: %s\n", cp);
2467 return -errno;
2468 }
2469 pr_debug("pinned program '%s'\n", path);
2470
2471 return 0;
2472}
2473
2474int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
2475 int instance)
2476{
2477 int err;
2478
2479 err = check_path(path);
2480 if (err)
2481 return err;
2482
2483 if (prog == NULL) {
2484 pr_warning("invalid program pointer\n");
2485 return -EINVAL;
2486 }
2487
2488 if (instance < 0 || instance >= prog->instances.nr) {
2489 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
2490 instance, prog->section_name, prog->instances.nr);
2491 return -EINVAL;
2492 }
2493
2494 err = unlink(path);
2495 if (err != 0)
2496 return -errno;
2497 pr_debug("unpinned program '%s'\n", path);
2498
2499 return 0;
2500}
2501
2502static int make_dir(const char *path)
2503{
2504 char *cp, errmsg[STRERR_BUFSIZE];
2505 int err = 0;
2506
2507 if (mkdir(path, 0700) && errno != EEXIST)
2508 err = -errno;
2509
2510 if (err) {
2511 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
2512 pr_warning("failed to mkdir %s: %s\n", path, cp);
2513 }
2514 return err;
2515}
2516
2517int bpf_program__pin(struct bpf_program *prog, const char *path)
2518{
2519 int i, err;
2520
2521 err = check_path(path);
2522 if (err)
2523 return err;
2524
2525 if (prog == NULL) {
2526 pr_warning("invalid program pointer\n");
2527 return -EINVAL;
2528 }
2529
2530 if (prog->instances.nr <= 0) {
2531 pr_warning("no instances of prog %s to pin\n",
2532 prog->section_name);
2533 return -EINVAL;
2534 }
2535
2536 if (prog->instances.nr == 1) {
2537
2538 return bpf_program__pin_instance(prog, path, 0);
2539 }
2540
2541 err = make_dir(path);
2542 if (err)
2543 return err;
2544
2545 for (i = 0; i < prog->instances.nr; i++) {
2546 char buf[PATH_MAX];
2547 int len;
2548
2549 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2550 if (len < 0) {
2551 err = -EINVAL;
2552 goto err_unpin;
2553 } else if (len >= PATH_MAX) {
2554 err = -ENAMETOOLONG;
2555 goto err_unpin;
2556 }
2557
2558 err = bpf_program__pin_instance(prog, buf, i);
2559 if (err)
2560 goto err_unpin;
2561 }
2562
2563 return 0;
2564
2565err_unpin:
2566 for (i = i - 1; i >= 0; i--) {
2567 char buf[PATH_MAX];
2568 int len;
2569
2570 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2571 if (len < 0)
2572 continue;
2573 else if (len >= PATH_MAX)
2574 continue;
2575
2576 bpf_program__unpin_instance(prog, buf, i);
2577 }
2578
2579 rmdir(path);
2580
2581 return err;
2582}
2583
2584int bpf_program__unpin(struct bpf_program *prog, const char *path)
2585{
2586 int i, err;
2587
2588 err = check_path(path);
2589 if (err)
2590 return err;
2591
2592 if (prog == NULL) {
2593 pr_warning("invalid program pointer\n");
2594 return -EINVAL;
2595 }
2596
2597 if (prog->instances.nr <= 0) {
2598 pr_warning("no instances of prog %s to pin\n",
2599 prog->section_name);
2600 return -EINVAL;
2601 }
2602
2603 if (prog->instances.nr == 1) {
2604
2605 return bpf_program__unpin_instance(prog, path, 0);
2606 }
2607
2608 for (i = 0; i < prog->instances.nr; i++) {
2609 char buf[PATH_MAX];
2610 int len;
2611
2612 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2613 if (len < 0)
2614 return -EINVAL;
2615 else if (len >= PATH_MAX)
2616 return -ENAMETOOLONG;
2617
2618 err = bpf_program__unpin_instance(prog, buf, i);
2619 if (err)
2620 return err;
2621 }
2622
2623 err = rmdir(path);
2624 if (err)
2625 return -errno;
2626
2627 return 0;
2628}
2629
2630int bpf_map__pin(struct bpf_map *map, const char *path)
2631{
2632 char *cp, errmsg[STRERR_BUFSIZE];
2633 int err;
2634
2635 err = check_path(path);
2636 if (err)
2637 return err;
2638
2639 if (map == NULL) {
2640 pr_warning("invalid map pointer\n");
2641 return -EINVAL;
2642 }
2643
2644 if (bpf_obj_pin(map->fd, path)) {
2645 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2646 pr_warning("failed to pin map: %s\n", cp);
2647 return -errno;
2648 }
2649
2650 pr_debug("pinned map '%s'\n", path);
2651
2652 return 0;
2653}
2654
2655int bpf_map__unpin(struct bpf_map *map, const char *path)
2656{
2657 int err;
2658
2659 err = check_path(path);
2660 if (err)
2661 return err;
2662
2663 if (map == NULL) {
2664 pr_warning("invalid map pointer\n");
2665 return -EINVAL;
2666 }
2667
2668 err = unlink(path);
2669 if (err != 0)
2670 return -errno;
2671 pr_debug("unpinned map '%s'\n", path);
2672
2673 return 0;
2674}
2675
2676int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
2677{
2678 struct bpf_map *map;
2679 int err;
2680
2681 if (!obj)
2682 return -ENOENT;
2683
2684 if (!obj->loaded) {
2685 pr_warning("object not yet loaded; load it first\n");
2686 return -ENOENT;
2687 }
2688
2689 err = make_dir(path);
2690 if (err)
2691 return err;
2692
2693 bpf_object__for_each_map(map, obj) {
2694 char buf[PATH_MAX];
2695 int len;
2696
2697 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2698 bpf_map__name(map));
2699 if (len < 0) {
2700 err = -EINVAL;
2701 goto err_unpin_maps;
2702 } else if (len >= PATH_MAX) {
2703 err = -ENAMETOOLONG;
2704 goto err_unpin_maps;
2705 }
2706
2707 err = bpf_map__pin(map, buf);
2708 if (err)
2709 goto err_unpin_maps;
2710 }
2711
2712 return 0;
2713
2714err_unpin_maps:
2715 while ((map = bpf_map__prev(map, obj))) {
2716 char buf[PATH_MAX];
2717 int len;
2718
2719 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2720 bpf_map__name(map));
2721 if (len < 0)
2722 continue;
2723 else if (len >= PATH_MAX)
2724 continue;
2725
2726 bpf_map__unpin(map, buf);
2727 }
2728
2729 return err;
2730}
2731
2732int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
2733{
2734 struct bpf_map *map;
2735 int err;
2736
2737 if (!obj)
2738 return -ENOENT;
2739
2740 bpf_object__for_each_map(map, obj) {
2741 char buf[PATH_MAX];
2742 int len;
2743
2744 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2745 bpf_map__name(map));
2746 if (len < 0)
2747 return -EINVAL;
2748 else if (len >= PATH_MAX)
2749 return -ENAMETOOLONG;
2750
2751 err = bpf_map__unpin(map, buf);
2752 if (err)
2753 return err;
2754 }
2755
2756 return 0;
2757}
2758
2759int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2760{
2761 struct bpf_program *prog;
2762 int err;
2763
2764 if (!obj)
2765 return -ENOENT;
2766
2767 if (!obj->loaded) {
2768 pr_warning("object not yet loaded; load it first\n");
2769 return -ENOENT;
2770 }
2771
2772 err = make_dir(path);
2773 if (err)
2774 return err;
2775
2776 bpf_object__for_each_program(prog, obj) {
2777 char buf[PATH_MAX];
2778 int len;
2779
2780 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2781 prog->pin_name);
2782 if (len < 0) {
2783 err = -EINVAL;
2784 goto err_unpin_programs;
2785 } else if (len >= PATH_MAX) {
2786 err = -ENAMETOOLONG;
2787 goto err_unpin_programs;
2788 }
2789
2790 err = bpf_program__pin(prog, buf);
2791 if (err)
2792 goto err_unpin_programs;
2793 }
2794
2795 return 0;
2796
2797err_unpin_programs:
2798 while ((prog = bpf_program__prev(prog, obj))) {
2799 char buf[PATH_MAX];
2800 int len;
2801
2802 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2803 prog->pin_name);
2804 if (len < 0)
2805 continue;
2806 else if (len >= PATH_MAX)
2807 continue;
2808
2809 bpf_program__unpin(prog, buf);
2810 }
2811
2812 return err;
2813}
2814
2815int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2816{
2817 struct bpf_program *prog;
2818 int err;
2819
2820 if (!obj)
2821 return -ENOENT;
2822
2823 bpf_object__for_each_program(prog, obj) {
2824 char buf[PATH_MAX];
2825 int len;
2826
2827 len = snprintf(buf, PATH_MAX, "%s/%s", path,
2828 prog->pin_name);
2829 if (len < 0)
2830 return -EINVAL;
2831 else if (len >= PATH_MAX)
2832 return -ENAMETOOLONG;
2833
2834 err = bpf_program__unpin(prog, buf);
2835 if (err)
2836 return err;
2837 }
2838
2839 return 0;
2840}
2841
2842int bpf_object__pin(struct bpf_object *obj, const char *path)
2843{
2844 int err;
2845
2846 err = bpf_object__pin_maps(obj, path);
2847 if (err)
2848 return err;
2849
2850 err = bpf_object__pin_programs(obj, path);
2851 if (err) {
2852 bpf_object__unpin_maps(obj, path);
2853 return err;
2854 }
2855
2856 return 0;
2857}
2858
2859void bpf_object__close(struct bpf_object *obj)
2860{
2861 size_t i;
2862
2863 if (!obj)
2864 return;
2865
2866 if (obj->clear_priv)
2867 obj->clear_priv(obj, obj->priv);
2868
2869 bpf_object__elf_finish(obj);
2870 bpf_object__unload(obj);
2871 btf__free(obj->btf);
2872 btf_ext__free(obj->btf_ext);
2873
2874 for (i = 0; i < obj->nr_maps; i++) {
2875 zfree(&obj->maps[i].name);
2876 if (obj->maps[i].clear_priv)
2877 obj->maps[i].clear_priv(&obj->maps[i],
2878 obj->maps[i].priv);
2879 obj->maps[i].priv = NULL;
2880 obj->maps[i].clear_priv = NULL;
2881 }
2882
2883 zfree(&obj->sections.rodata);
2884 zfree(&obj->sections.data);
2885 zfree(&obj->maps);
2886 obj->nr_maps = 0;
2887
2888 if (obj->programs && obj->nr_programs) {
2889 for (i = 0; i < obj->nr_programs; i++)
2890 bpf_program__exit(&obj->programs[i]);
2891 }
2892 zfree(&obj->programs);
2893
2894 list_del(&obj->list);
2895 free(obj);
2896}
2897
2898struct bpf_object *
2899bpf_object__next(struct bpf_object *prev)
2900{
2901 struct bpf_object *next;
2902
2903 if (!prev)
2904 next = list_first_entry(&bpf_objects_list,
2905 struct bpf_object,
2906 list);
2907 else
2908 next = list_next_entry(prev, list);
2909
2910
2911 if (&next->list == &bpf_objects_list)
2912 return NULL;
2913
2914 return next;
2915}
2916
2917const char *bpf_object__name(struct bpf_object *obj)
2918{
2919 return obj ? obj->path : ERR_PTR(-EINVAL);
2920}
2921
2922unsigned int bpf_object__kversion(struct bpf_object *obj)
2923{
2924 return obj ? obj->kern_version : 0;
2925}
2926
2927struct btf *bpf_object__btf(struct bpf_object *obj)
2928{
2929 return obj ? obj->btf : NULL;
2930}
2931
2932int bpf_object__btf_fd(const struct bpf_object *obj)
2933{
2934 return obj->btf ? btf__fd(obj->btf) : -1;
2935}
2936
2937int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2938 bpf_object_clear_priv_t clear_priv)
2939{
2940 if (obj->priv && obj->clear_priv)
2941 obj->clear_priv(obj, obj->priv);
2942
2943 obj->priv = priv;
2944 obj->clear_priv = clear_priv;
2945 return 0;
2946}
2947
2948void *bpf_object__priv(struct bpf_object *obj)
2949{
2950 return obj ? obj->priv : ERR_PTR(-EINVAL);
2951}
2952
2953static struct bpf_program *
2954__bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
2955{
2956 size_t nr_programs = obj->nr_programs;
2957 ssize_t idx;
2958
2959 if (!nr_programs)
2960 return NULL;
2961
2962 if (!p)
2963
2964 return forward ? &obj->programs[0] :
2965 &obj->programs[nr_programs - 1];
2966
2967 if (p->obj != obj) {
2968 pr_warning("error: program handler doesn't match object\n");
2969 return NULL;
2970 }
2971
2972 idx = (p - obj->programs) + (forward ? 1 : -1);
2973 if (idx >= obj->nr_programs || idx < 0)
2974 return NULL;
2975 return &obj->programs[idx];
2976}
2977
2978struct bpf_program *
2979bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2980{
2981 struct bpf_program *prog = prev;
2982
2983 do {
2984 prog = __bpf_program__iter(prog, obj, true);
2985 } while (prog && bpf_program__is_function_storage(prog, obj));
2986
2987 return prog;
2988}
2989
2990struct bpf_program *
2991bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2992{
2993 struct bpf_program *prog = next;
2994
2995 do {
2996 prog = __bpf_program__iter(prog, obj, false);
2997 } while (prog && bpf_program__is_function_storage(prog, obj));
2998
2999 return prog;
3000}
3001
3002int bpf_program__set_priv(struct bpf_program *prog, void *priv,
3003 bpf_program_clear_priv_t clear_priv)
3004{
3005 if (prog->priv && prog->clear_priv)
3006 prog->clear_priv(prog, prog->priv);
3007
3008 prog->priv = priv;
3009 prog->clear_priv = clear_priv;
3010 return 0;
3011}
3012
3013void *bpf_program__priv(struct bpf_program *prog)
3014{
3015 return prog ? prog->priv : ERR_PTR(-EINVAL);
3016}
3017
3018void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
3019{
3020 prog->prog_ifindex = ifindex;
3021}
3022
3023const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
3024{
3025 const char *title;
3026
3027 title = prog->section_name;
3028 if (needs_copy) {
3029 title = strdup(title);
3030 if (!title) {
3031 pr_warning("failed to strdup program title\n");
3032 return ERR_PTR(-ENOMEM);
3033 }
3034 }
3035
3036 return title;
3037}
3038
3039int bpf_program__fd(struct bpf_program *prog)
3040{
3041 return bpf_program__nth_fd(prog, 0);
3042}
3043
3044int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
3045 bpf_program_prep_t prep)
3046{
3047 int *instances_fds;
3048
3049 if (nr_instances <= 0 || !prep)
3050 return -EINVAL;
3051
3052 if (prog->instances.nr > 0 || prog->instances.fds) {
3053 pr_warning("Can't set pre-processor after loading\n");
3054 return -EINVAL;
3055 }
3056
3057 instances_fds = malloc(sizeof(int) * nr_instances);
3058 if (!instances_fds) {
3059 pr_warning("alloc memory failed for fds\n");
3060 return -ENOMEM;
3061 }
3062
3063
3064 memset(instances_fds, -1, sizeof(int) * nr_instances);
3065
3066 prog->instances.nr = nr_instances;
3067 prog->instances.fds = instances_fds;
3068 prog->preprocessor = prep;
3069 return 0;
3070}
3071
3072int bpf_program__nth_fd(struct bpf_program *prog, int n)
3073{
3074 int fd;
3075
3076 if (!prog)
3077 return -EINVAL;
3078
3079 if (n >= prog->instances.nr || n < 0) {
3080 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
3081 n, prog->section_name, prog->instances.nr);
3082 return -EINVAL;
3083 }
3084
3085 fd = prog->instances.fds[n];
3086 if (fd < 0) {
3087 pr_warning("%dth instance of program '%s' is invalid\n",
3088 n, prog->section_name);
3089 return -ENOENT;
3090 }
3091
3092 return fd;
3093}
3094
3095void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
3096{
3097 prog->type = type;
3098}
3099
3100static bool bpf_program__is_type(struct bpf_program *prog,
3101 enum bpf_prog_type type)
3102{
3103 return prog ? (prog->type == type) : false;
3104}
3105
3106#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
3107int bpf_program__set_##NAME(struct bpf_program *prog) \
3108{ \
3109 if (!prog) \
3110 return -EINVAL; \
3111 bpf_program__set_type(prog, TYPE); \
3112 return 0; \
3113} \
3114 \
3115bool bpf_program__is_##NAME(struct bpf_program *prog) \
3116{ \
3117 return bpf_program__is_type(prog, TYPE); \
3118} \
3119
3120BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
3121BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
3122BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
3123BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
3124BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
3125BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
3126BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
3127BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
3128
3129void bpf_program__set_expected_attach_type(struct bpf_program *prog,
3130 enum bpf_attach_type type)
3131{
3132 prog->expected_attach_type = type;
3133}
3134
3135#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
3136 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
3137
3138
3139#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
3140
3141
3142#define BPF_APROG_SEC(string, ptype, atype) \
3143 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
3144
3145
3146#define BPF_EAPROG_SEC(string, ptype, eatype) \
3147 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
3148
3149
3150
3151
3152#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
3153
3154static const struct {
3155 const char *sec;
3156 size_t len;
3157 enum bpf_prog_type prog_type;
3158 enum bpf_attach_type expected_attach_type;
3159 int is_attachable;
3160 enum bpf_attach_type attach_type;
3161} section_names[] = {
3162 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
3163 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
3164 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
3165 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
3166 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
3167 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
3168 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
3169 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
3170 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
3171 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
3172 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
3173 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
3174 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
3175 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
3176 BPF_CGROUP_INET_INGRESS),
3177 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
3178 BPF_CGROUP_INET_EGRESS),
3179 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
3180 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
3181 BPF_CGROUP_INET_SOCK_CREATE),
3182 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
3183 BPF_CGROUP_INET4_POST_BIND),
3184 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
3185 BPF_CGROUP_INET6_POST_BIND),
3186 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
3187 BPF_CGROUP_DEVICE),
3188 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
3189 BPF_CGROUP_SOCK_OPS),
3190 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
3191 BPF_SK_SKB_STREAM_PARSER),
3192 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
3193 BPF_SK_SKB_STREAM_VERDICT),
3194 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
3195 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
3196 BPF_SK_MSG_VERDICT),
3197 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
3198 BPF_LIRC_MODE2),
3199 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
3200 BPF_FLOW_DISSECTOR),
3201 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3202 BPF_CGROUP_INET4_BIND),
3203 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3204 BPF_CGROUP_INET6_BIND),
3205 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3206 BPF_CGROUP_INET4_CONNECT),
3207 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3208 BPF_CGROUP_INET6_CONNECT),
3209 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3210 BPF_CGROUP_UDP4_SENDMSG),
3211 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3212 BPF_CGROUP_UDP6_SENDMSG),
3213 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3214 BPF_CGROUP_UDP4_RECVMSG),
3215 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
3216 BPF_CGROUP_UDP6_RECVMSG),
3217 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
3218 BPF_CGROUP_SYSCTL),
3219};
3220
3221#undef BPF_PROG_SEC_IMPL
3222#undef BPF_PROG_SEC
3223#undef BPF_APROG_SEC
3224#undef BPF_EAPROG_SEC
3225#undef BPF_APROG_COMPAT
3226
3227#define MAX_TYPE_NAME_SIZE 32
3228
3229static char *libbpf_get_type_names(bool attach_type)
3230{
3231 int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
3232 char *buf;
3233
3234 buf = malloc(len);
3235 if (!buf)
3236 return NULL;
3237
3238 buf[0] = '\0';
3239
3240 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
3241 if (attach_type && !section_names[i].is_attachable)
3242 continue;
3243
3244 if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
3245 free(buf);
3246 return NULL;
3247 }
3248 strcat(buf, " ");
3249 strcat(buf, section_names[i].sec);
3250 }
3251
3252 return buf;
3253}
3254
3255int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
3256 enum bpf_attach_type *expected_attach_type)
3257{
3258 char *type_names;
3259 int i;
3260
3261 if (!name)
3262 return -EINVAL;
3263
3264 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
3265 if (strncmp(name, section_names[i].sec, section_names[i].len))
3266 continue;
3267 *prog_type = section_names[i].prog_type;
3268 *expected_attach_type = section_names[i].expected_attach_type;
3269 return 0;
3270 }
3271 pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
3272 type_names = libbpf_get_type_names(false);
3273 if (type_names != NULL) {
3274 pr_info("supported section(type) names are:%s\n", type_names);
3275 free(type_names);
3276 }
3277
3278 return -EINVAL;
3279}
3280
3281int libbpf_attach_type_by_name(const char *name,
3282 enum bpf_attach_type *attach_type)
3283{
3284 char *type_names;
3285 int i;
3286
3287 if (!name)
3288 return -EINVAL;
3289
3290 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
3291 if (strncmp(name, section_names[i].sec, section_names[i].len))
3292 continue;
3293 if (!section_names[i].is_attachable)
3294 return -EINVAL;
3295 *attach_type = section_names[i].attach_type;
3296 return 0;
3297 }
3298 pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
3299 type_names = libbpf_get_type_names(true);
3300 if (type_names != NULL) {
3301 pr_info("attachable section(type) names are:%s\n", type_names);
3302 free(type_names);
3303 }
3304
3305 return -EINVAL;
3306}
3307
3308static int
3309bpf_program__identify_section(struct bpf_program *prog,
3310 enum bpf_prog_type *prog_type,
3311 enum bpf_attach_type *expected_attach_type)
3312{
3313 return libbpf_prog_type_by_name(prog->section_name, prog_type,
3314 expected_attach_type);
3315}
3316
3317int bpf_map__fd(struct bpf_map *map)
3318{
3319 return map ? map->fd : -EINVAL;
3320}
3321
3322const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
3323{
3324 return map ? &map->def : ERR_PTR(-EINVAL);
3325}
3326
3327const char *bpf_map__name(struct bpf_map *map)
3328{
3329 return map ? map->name : NULL;
3330}
3331
3332__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
3333{
3334 return map ? map->btf_key_type_id : 0;
3335}
3336
3337__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
3338{
3339 return map ? map->btf_value_type_id : 0;
3340}
3341
3342int bpf_map__set_priv(struct bpf_map *map, void *priv,
3343 bpf_map_clear_priv_t clear_priv)
3344{
3345 if (!map)
3346 return -EINVAL;
3347
3348 if (map->priv) {
3349 if (map->clear_priv)
3350 map->clear_priv(map, map->priv);
3351 }
3352
3353 map->priv = priv;
3354 map->clear_priv = clear_priv;
3355 return 0;
3356}
3357
3358void *bpf_map__priv(struct bpf_map *map)
3359{
3360 return map ? map->priv : ERR_PTR(-EINVAL);
3361}
3362
3363bool bpf_map__is_offload_neutral(struct bpf_map *map)
3364{
3365 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
3366}
3367
3368bool bpf_map__is_internal(struct bpf_map *map)
3369{
3370 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
3371}
3372
3373void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
3374{
3375 map->map_ifindex = ifindex;
3376}
3377
3378int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
3379{
3380 if (!bpf_map_type__is_map_in_map(map->def.type)) {
3381 pr_warning("error: unsupported map type\n");
3382 return -EINVAL;
3383 }
3384 if (map->inner_map_fd != -1) {
3385 pr_warning("error: inner_map_fd already specified\n");
3386 return -EINVAL;
3387 }
3388 map->inner_map_fd = fd;
3389 return 0;
3390}
3391
3392static struct bpf_map *
3393__bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
3394{
3395 ssize_t idx;
3396 struct bpf_map *s, *e;
3397
3398 if (!obj || !obj->maps)
3399 return NULL;
3400
3401 s = obj->maps;
3402 e = obj->maps + obj->nr_maps;
3403
3404 if ((m < s) || (m >= e)) {
3405 pr_warning("error in %s: map handler doesn't belong to object\n",
3406 __func__);
3407 return NULL;
3408 }
3409
3410 idx = (m - obj->maps) + i;
3411 if (idx >= obj->nr_maps || idx < 0)
3412 return NULL;
3413 return &obj->maps[idx];
3414}
3415
3416struct bpf_map *
3417bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
3418{
3419 if (prev == NULL)
3420 return obj->maps;
3421
3422 return __bpf_map__iter(prev, obj, 1);
3423}
3424
3425struct bpf_map *
3426bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
3427{
3428 if (next == NULL) {
3429 if (!obj->nr_maps)
3430 return NULL;
3431 return obj->maps + obj->nr_maps - 1;
3432 }
3433
3434 return __bpf_map__iter(next, obj, -1);
3435}
3436
3437struct bpf_map *
3438bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
3439{
3440 struct bpf_map *pos;
3441
3442 bpf_object__for_each_map(pos, obj) {
3443 if (pos->name && !strcmp(pos->name, name))
3444 return pos;
3445 }
3446 return NULL;
3447}
3448
3449int
3450bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
3451{
3452 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
3453}
3454
3455struct bpf_map *
3456bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
3457{
3458 int i;
3459
3460 for (i = 0; i < obj->nr_maps; i++) {
3461 if (obj->maps[i].offset == offset)
3462 return &obj->maps[i];
3463 }
3464 return ERR_PTR(-ENOENT);
3465}
3466
3467long libbpf_get_error(const void *ptr)
3468{
3469 if (IS_ERR(ptr))
3470 return PTR_ERR(ptr);
3471 return 0;
3472}
3473
3474int bpf_prog_load(const char *file, enum bpf_prog_type type,
3475 struct bpf_object **pobj, int *prog_fd)
3476{
3477 struct bpf_prog_load_attr attr;
3478
3479 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
3480 attr.file = file;
3481 attr.prog_type = type;
3482 attr.expected_attach_type = 0;
3483
3484 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
3485}
3486
3487int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
3488 struct bpf_object **pobj, int *prog_fd)
3489{
3490 struct bpf_object_open_attr open_attr = {
3491 .file = attr->file,
3492 .prog_type = attr->prog_type,
3493 };
3494 struct bpf_program *prog, *first_prog = NULL;
3495 enum bpf_attach_type expected_attach_type;
3496 enum bpf_prog_type prog_type;
3497 struct bpf_object *obj;
3498 struct bpf_map *map;
3499 int err;
3500
3501 if (!attr)
3502 return -EINVAL;
3503 if (!attr->file)
3504 return -EINVAL;
3505
3506 obj = bpf_object__open_xattr(&open_attr);
3507 if (IS_ERR_OR_NULL(obj))
3508 return -ENOENT;
3509
3510 bpf_object__for_each_program(prog, obj) {
3511
3512
3513
3514
3515 prog_type = attr->prog_type;
3516 prog->prog_ifindex = attr->ifindex;
3517 expected_attach_type = attr->expected_attach_type;
3518 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
3519 err = bpf_program__identify_section(prog, &prog_type,
3520 &expected_attach_type);
3521 if (err < 0) {
3522 bpf_object__close(obj);
3523 return -EINVAL;
3524 }
3525 }
3526
3527 bpf_program__set_type(prog, prog_type);
3528 bpf_program__set_expected_attach_type(prog,
3529 expected_attach_type);
3530
3531 prog->log_level = attr->log_level;
3532 if (!first_prog)
3533 first_prog = prog;
3534 }
3535
3536 bpf_object__for_each_map(map, obj) {
3537 if (!bpf_map__is_offload_neutral(map))
3538 map->map_ifindex = attr->ifindex;
3539 }
3540
3541 if (!first_prog) {
3542 pr_warning("object file doesn't contain bpf program\n");
3543 bpf_object__close(obj);
3544 return -ENOENT;
3545 }
3546
3547 err = bpf_object__load(obj);
3548 if (err) {
3549 bpf_object__close(obj);
3550 return -EINVAL;
3551 }
3552
3553 *pobj = obj;
3554 *prog_fd = bpf_program__fd(first_prog);
3555 return 0;
3556}
3557
3558enum bpf_perf_event_ret
3559bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
3560 void **copy_mem, size_t *copy_size,
3561 bpf_perf_event_print_t fn, void *private_data)
3562{
3563 struct perf_event_mmap_page *header = mmap_mem;
3564 __u64 data_head = ring_buffer_read_head(header);
3565 __u64 data_tail = header->data_tail;
3566 void *base = ((__u8 *)header) + page_size;
3567 int ret = LIBBPF_PERF_EVENT_CONT;
3568 struct perf_event_header *ehdr;
3569 size_t ehdr_size;
3570
3571 while (data_head != data_tail) {
3572 ehdr = base + (data_tail & (mmap_size - 1));
3573 ehdr_size = ehdr->size;
3574
3575 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
3576 void *copy_start = ehdr;
3577 size_t len_first = base + mmap_size - copy_start;
3578 size_t len_secnd = ehdr_size - len_first;
3579
3580 if (*copy_size < ehdr_size) {
3581 free(*copy_mem);
3582 *copy_mem = malloc(ehdr_size);
3583 if (!*copy_mem) {
3584 *copy_size = 0;
3585 ret = LIBBPF_PERF_EVENT_ERROR;
3586 break;
3587 }
3588 *copy_size = ehdr_size;
3589 }
3590
3591 memcpy(*copy_mem, copy_start, len_first);
3592 memcpy(*copy_mem + len_first, base, len_secnd);
3593 ehdr = *copy_mem;
3594 }
3595
3596 ret = fn(ehdr, private_data);
3597 data_tail += ehdr_size;
3598 if (ret != LIBBPF_PERF_EVENT_CONT)
3599 break;
3600 }
3601
3602 ring_buffer_write_tail(header, data_tail);
3603 return ret;
3604}
3605
3606struct bpf_prog_info_array_desc {
3607 int array_offset;
3608 int count_offset;
3609 int size_offset;
3610
3611
3612};
3613
3614static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
3615 [BPF_PROG_INFO_JITED_INSNS] = {
3616 offsetof(struct bpf_prog_info, jited_prog_insns),
3617 offsetof(struct bpf_prog_info, jited_prog_len),
3618 -1,
3619 },
3620 [BPF_PROG_INFO_XLATED_INSNS] = {
3621 offsetof(struct bpf_prog_info, xlated_prog_insns),
3622 offsetof(struct bpf_prog_info, xlated_prog_len),
3623 -1,
3624 },
3625 [BPF_PROG_INFO_MAP_IDS] = {
3626 offsetof(struct bpf_prog_info, map_ids),
3627 offsetof(struct bpf_prog_info, nr_map_ids),
3628 -(int)sizeof(__u32),
3629 },
3630 [BPF_PROG_INFO_JITED_KSYMS] = {
3631 offsetof(struct bpf_prog_info, jited_ksyms),
3632 offsetof(struct bpf_prog_info, nr_jited_ksyms),
3633 -(int)sizeof(__u64),
3634 },
3635 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
3636 offsetof(struct bpf_prog_info, jited_func_lens),
3637 offsetof(struct bpf_prog_info, nr_jited_func_lens),
3638 -(int)sizeof(__u32),
3639 },
3640 [BPF_PROG_INFO_FUNC_INFO] = {
3641 offsetof(struct bpf_prog_info, func_info),
3642 offsetof(struct bpf_prog_info, nr_func_info),
3643 offsetof(struct bpf_prog_info, func_info_rec_size),
3644 },
3645 [BPF_PROG_INFO_LINE_INFO] = {
3646 offsetof(struct bpf_prog_info, line_info),
3647 offsetof(struct bpf_prog_info, nr_line_info),
3648 offsetof(struct bpf_prog_info, line_info_rec_size),
3649 },
3650 [BPF_PROG_INFO_JITED_LINE_INFO] = {
3651 offsetof(struct bpf_prog_info, jited_line_info),
3652 offsetof(struct bpf_prog_info, nr_jited_line_info),
3653 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
3654 },
3655 [BPF_PROG_INFO_PROG_TAGS] = {
3656 offsetof(struct bpf_prog_info, prog_tags),
3657 offsetof(struct bpf_prog_info, nr_prog_tags),
3658 -(int)sizeof(__u8) * BPF_TAG_SIZE,
3659 },
3660
3661};
3662
3663static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
3664{
3665 __u32 *array = (__u32 *)info;
3666
3667 if (offset >= 0)
3668 return array[offset / sizeof(__u32)];
3669 return -(int)offset;
3670}
3671
3672static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
3673{
3674 __u64 *array = (__u64 *)info;
3675
3676 if (offset >= 0)
3677 return array[offset / sizeof(__u64)];
3678 return -(int)offset;
3679}
3680
3681static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
3682 __u32 val)
3683{
3684 __u32 *array = (__u32 *)info;
3685
3686 if (offset >= 0)
3687 array[offset / sizeof(__u32)] = val;
3688}
3689
3690static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
3691 __u64 val)
3692{
3693 __u64 *array = (__u64 *)info;
3694
3695 if (offset >= 0)
3696 array[offset / sizeof(__u64)] = val;
3697}
3698
3699struct bpf_prog_info_linear *
3700bpf_program__get_prog_info_linear(int fd, __u64 arrays)
3701{
3702 struct bpf_prog_info_linear *info_linear;
3703 struct bpf_prog_info info = {};
3704 __u32 info_len = sizeof(info);
3705 __u32 data_len = 0;
3706 int i, err;
3707 void *ptr;
3708
3709 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
3710 return ERR_PTR(-EINVAL);
3711
3712
3713 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
3714 if (err) {
3715 pr_debug("can't get prog info: %s", strerror(errno));
3716 return ERR_PTR(-EFAULT);
3717 }
3718
3719
3720 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3721 bool include_array = (arrays & (1UL << i)) > 0;
3722 struct bpf_prog_info_array_desc *desc;
3723 __u32 count, size;
3724
3725 desc = bpf_prog_info_array_desc + i;
3726
3727
3728 if (info_len < desc->array_offset + sizeof(__u32) ||
3729 info_len < desc->count_offset + sizeof(__u32) ||
3730 (desc->size_offset > 0 && info_len < desc->size_offset))
3731 include_array = false;
3732
3733 if (!include_array) {
3734 arrays &= ~(1UL << i);
3735 continue;
3736 }
3737
3738 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3739 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3740
3741 data_len += count * size;
3742 }
3743
3744
3745 data_len = roundup(data_len, sizeof(__u64));
3746 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
3747 if (!info_linear)
3748 return ERR_PTR(-ENOMEM);
3749
3750
3751 info_linear->arrays = arrays;
3752 memset(&info_linear->info, 0, sizeof(info));
3753 ptr = info_linear->data;
3754
3755 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3756 struct bpf_prog_info_array_desc *desc;
3757 __u32 count, size;
3758
3759 if ((arrays & (1UL << i)) == 0)
3760 continue;
3761
3762 desc = bpf_prog_info_array_desc + i;
3763 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3764 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3765 bpf_prog_info_set_offset_u32(&info_linear->info,
3766 desc->count_offset, count);
3767 bpf_prog_info_set_offset_u32(&info_linear->info,
3768 desc->size_offset, size);
3769 bpf_prog_info_set_offset_u64(&info_linear->info,
3770 desc->array_offset,
3771 ptr_to_u64(ptr));
3772 ptr += count * size;
3773 }
3774
3775
3776 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
3777 if (err) {
3778 pr_debug("can't get prog info: %s", strerror(errno));
3779 free(info_linear);
3780 return ERR_PTR(-EFAULT);
3781 }
3782
3783
3784 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3785 struct bpf_prog_info_array_desc *desc;
3786 __u32 v1, v2;
3787
3788 if ((arrays & (1UL << i)) == 0)
3789 continue;
3790
3791 desc = bpf_prog_info_array_desc + i;
3792 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3793 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
3794 desc->count_offset);
3795 if (v1 != v2)
3796 pr_warning("%s: mismatch in element count\n", __func__);
3797
3798 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3799 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
3800 desc->size_offset);
3801 if (v1 != v2)
3802 pr_warning("%s: mismatch in rec size\n", __func__);
3803 }
3804
3805
3806 info_linear->info_len = sizeof(struct bpf_prog_info);
3807 info_linear->data_len = data_len;
3808
3809 return info_linear;
3810}
3811
3812void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
3813{
3814 int i;
3815
3816 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3817 struct bpf_prog_info_array_desc *desc;
3818 __u64 addr, offs;
3819
3820 if ((info_linear->arrays & (1UL << i)) == 0)
3821 continue;
3822
3823 desc = bpf_prog_info_array_desc + i;
3824 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
3825 desc->array_offset);
3826 offs = addr - ptr_to_u64(info_linear->data);
3827 bpf_prog_info_set_offset_u64(&info_linear->info,
3828 desc->array_offset, offs);
3829 }
3830}
3831
3832void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
3833{
3834 int i;
3835
3836 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3837 struct bpf_prog_info_array_desc *desc;
3838 __u64 addr, offs;
3839
3840 if ((info_linear->arrays & (1UL << i)) == 0)
3841 continue;
3842
3843 desc = bpf_prog_info_array_desc + i;
3844 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
3845 desc->array_offset);
3846 addr = offs + ptr_to_u64(info_linear->data);
3847 bpf_prog_info_set_offset_u64(&info_linear->info,
3848 desc->array_offset, addr);
3849 }
3850}
3851