1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <stdio.h>
15#include <stdlib.h>
16#include <unistd.h>
17#include <string.h>
18#include <stdbool.h>
19#include <stdint.h>
20#include <errno.h>
21#include <fcntl.h>
22#include <stdarg.h>
23#include <limits.h>
24#include <assert.h>
25
26#ifdef HAVE_ELF
27#include <libelf.h>
28#include <gelf.h>
29#endif
30
31#include <sys/types.h>
32#include <sys/stat.h>
33#include <sys/un.h>
34#include <sys/vfs.h>
35#include <sys/mount.h>
36#include <sys/syscall.h>
37#include <sys/sendfile.h>
38#include <sys/resource.h>
39
40#include <arpa/inet.h>
41
42#include "utils.h"
43#include "json_print.h"
44
45#include "bpf_util.h"
46#include "bpf_elf.h"
47#include "bpf_scm.h"
48
49struct bpf_prog_meta {
50 const char *type;
51 const char *subdir;
52 const char *section;
53 bool may_uds_export;
54};
55
56static const enum bpf_prog_type __bpf_types[] = {
57 BPF_PROG_TYPE_SCHED_CLS,
58 BPF_PROG_TYPE_SCHED_ACT,
59 BPF_PROG_TYPE_XDP,
60 BPF_PROG_TYPE_LWT_IN,
61 BPF_PROG_TYPE_LWT_OUT,
62 BPF_PROG_TYPE_LWT_XMIT,
63};
64
65static const struct bpf_prog_meta __bpf_prog_meta[] = {
66 [BPF_PROG_TYPE_SCHED_CLS] = {
67 .type = "cls",
68 .subdir = "tc",
69 .section = ELF_SECTION_CLASSIFIER,
70 .may_uds_export = true,
71 },
72 [BPF_PROG_TYPE_SCHED_ACT] = {
73 .type = "act",
74 .subdir = "tc",
75 .section = ELF_SECTION_ACTION,
76 .may_uds_export = true,
77 },
78 [BPF_PROG_TYPE_XDP] = {
79 .type = "xdp",
80 .subdir = "xdp",
81 .section = ELF_SECTION_PROG,
82 },
83 [BPF_PROG_TYPE_LWT_IN] = {
84 .type = "lwt_in",
85 .subdir = "ip",
86 .section = ELF_SECTION_PROG,
87 },
88 [BPF_PROG_TYPE_LWT_OUT] = {
89 .type = "lwt_out",
90 .subdir = "ip",
91 .section = ELF_SECTION_PROG,
92 },
93 [BPF_PROG_TYPE_LWT_XMIT] = {
94 .type = "lwt_xmit",
95 .subdir = "ip",
96 .section = ELF_SECTION_PROG,
97 },
98 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = {
99 .type = "lwt_seg6local",
100 .subdir = "ip",
101 .section = ELF_SECTION_PROG,
102 },
103};
104
105static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
106{
107 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
108 __bpf_prog_meta[type].subdir);
109 return __bpf_prog_meta[type].subdir;
110}
111
112const char *bpf_prog_to_default_section(enum bpf_prog_type type)
113{
114 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
115 __bpf_prog_meta[type].section);
116 return __bpf_prog_meta[type].section;
117}
118
119#ifdef HAVE_ELF
120static int bpf_obj_open(const char *path, enum bpf_prog_type type,
121 const char *sec, __u32 ifindex, bool verbose);
122#else
123static int bpf_obj_open(const char *path, enum bpf_prog_type type,
124 const char *sec, __u32 ifindex, bool verbose)
125{
126 fprintf(stderr, "No ELF library support compiled in.\n");
127 errno = ENOSYS;
128 return -1;
129}
130#endif
131
132static inline __u64 bpf_ptr_to_u64(const void *ptr)
133{
134 return (__u64)(unsigned long)ptr;
135}
136
137static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
138{
139#ifdef __NR_bpf
140 return syscall(__NR_bpf, cmd, attr, size);
141#else
142 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
143 errno = ENOSYS;
144 return -1;
145#endif
146}
147
148static int bpf_map_update(int fd, const void *key, const void *value,
149 uint64_t flags)
150{
151 union bpf_attr attr = {};
152
153 attr.map_fd = fd;
154 attr.key = bpf_ptr_to_u64(key);
155 attr.value = bpf_ptr_to_u64(value);
156 attr.flags = flags;
157
158 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
159}
160
161static int bpf_prog_fd_by_id(uint32_t id)
162{
163 union bpf_attr attr = {};
164
165 attr.prog_id = id;
166
167 return bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
168}
169
170static int bpf_prog_info_by_fd(int fd, struct bpf_prog_info *info,
171 uint32_t *info_len)
172{
173 union bpf_attr attr = {};
174 int ret;
175
176 attr.info.bpf_fd = fd;
177 attr.info.info = bpf_ptr_to_u64(info);
178 attr.info.info_len = *info_len;
179
180 *info_len = 0;
181 ret = bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
182 if (!ret)
183 *info_len = attr.info.info_len;
184
185 return ret;
186}
187
188int bpf_dump_prog_info(FILE *f, uint32_t id)
189{
190 struct bpf_prog_info info = {};
191 uint32_t len = sizeof(info);
192 int fd, ret, dump_ok = 0;
193 SPRINT_BUF(tmp);
194
195 open_json_object("prog");
196 print_uint(PRINT_ANY, "id", "id %u ", id);
197
198 fd = bpf_prog_fd_by_id(id);
199 if (fd < 0)
200 goto out;
201
202 ret = bpf_prog_info_by_fd(fd, &info, &len);
203 if (!ret && len) {
204 int jited = !!info.jited_prog_len;
205
206 print_string(PRINT_ANY, "tag", "tag %s ",
207 hexstring_n2a(info.tag, sizeof(info.tag),
208 tmp, sizeof(tmp)));
209 print_uint(PRINT_JSON, "jited", NULL, jited);
210 if (jited && !is_json_context())
211 fprintf(f, "jited ");
212 dump_ok = 1;
213 }
214
215 close(fd);
216out:
217 close_json_object();
218 return dump_ok;
219}
220
221static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
222 char **bpf_string, bool *need_release,
223 const char separator)
224{
225 char sp;
226
227 if (from_file) {
228 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
229 char *tmp_string, *pos, c_prev = ' ';
230 FILE *fp;
231 int c;
232
233 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
234 tmp_string = pos = calloc(1, tmp_len);
235 if (tmp_string == NULL)
236 return -ENOMEM;
237
238 fp = fopen(arg, "r");
239 if (fp == NULL) {
240 perror("Cannot fopen");
241 free(tmp_string);
242 return -ENOENT;
243 }
244
245 while ((c = fgetc(fp)) != EOF) {
246 switch (c) {
247 case '\n':
248 if (c_prev != ',')
249 *(pos++) = ',';
250 c_prev = ',';
251 break;
252 case ' ':
253 case '\t':
254 if (c_prev != ' ')
255 *(pos++) = c;
256 c_prev = ' ';
257 break;
258 default:
259 *(pos++) = c;
260 c_prev = c;
261 }
262 if (pos - tmp_string == tmp_len)
263 break;
264 }
265
266 if (!feof(fp)) {
267 free(tmp_string);
268 fclose(fp);
269 return -E2BIG;
270 }
271
272 fclose(fp);
273 *pos = 0;
274
275 *need_release = true;
276 *bpf_string = tmp_string;
277 } else {
278 *need_release = false;
279 *bpf_string = arg;
280 }
281
282 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
283 sp != separator) {
284 if (*need_release)
285 free(*bpf_string);
286 return -EINVAL;
287 }
288
289 return 0;
290}
291
292static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
293 bool from_file)
294{
295 char *bpf_string, *token, separator = ',';
296 int ret = 0, i = 0;
297 bool need_release;
298 __u16 bpf_len = 0;
299
300 if (argc < 1)
301 return -EINVAL;
302 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
303 &need_release, separator))
304 return -EINVAL;
305 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
306 ret = -EINVAL;
307 goto out;
308 }
309
310 token = bpf_string;
311 while ((token = strchr(token, separator)) && (++token)[0]) {
312 if (i >= bpf_len) {
313 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
314 ret = -EINVAL;
315 goto out;
316 }
317
318 if (sscanf(token, "%hu %hhu %hhu %u,",
319 &bpf_ops[i].code, &bpf_ops[i].jt,
320 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
321 fprintf(stderr, "Error at instruction %d!\n", i);
322 ret = -EINVAL;
323 goto out;
324 }
325
326 i++;
327 }
328
329 if (i != bpf_len) {
330 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
331 ret = -EINVAL;
332 goto out;
333 }
334 ret = bpf_len;
335out:
336 if (need_release)
337 free(bpf_string);
338
339 return ret;
340}
341
342void bpf_print_ops(struct rtattr *bpf_ops, __u16 len)
343{
344 struct sock_filter *ops = RTA_DATA(bpf_ops);
345 int i;
346
347 if (len == 0)
348 return;
349
350 open_json_object("bytecode");
351 print_uint(PRINT_ANY, "length", "bytecode \'%u,", len);
352 open_json_array(PRINT_JSON, "insns");
353
354 for (i = 0; i < len; i++) {
355 open_json_object(NULL);
356 print_hu(PRINT_ANY, "code", "%hu ", ops[i].code);
357 print_hhu(PRINT_ANY, "jt", "%hhu ", ops[i].jt);
358 print_hhu(PRINT_ANY, "jf", "%hhu ", ops[i].jf);
359 if (i == len - 1)
360 print_uint(PRINT_ANY, "k", "%u\'", ops[i].k);
361 else
362 print_uint(PRINT_ANY, "k", "%u,", ops[i].k);
363 close_json_object();
364 }
365
366 close_json_array(PRINT_JSON, NULL);
367 close_json_object();
368}
369
370static void bpf_map_pin_report(const struct bpf_elf_map *pin,
371 const struct bpf_elf_map *obj)
372{
373 fprintf(stderr, "Map specification differs from pinned file!\n");
374
375 if (obj->type != pin->type)
376 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
377 obj->type, pin->type);
378 if (obj->size_key != pin->size_key)
379 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
380 obj->size_key, pin->size_key);
381 if (obj->size_value != pin->size_value)
382 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
383 obj->size_value, pin->size_value);
384 if (obj->max_elem != pin->max_elem)
385 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
386 obj->max_elem, pin->max_elem);
387 if (obj->flags != pin->flags)
388 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
389 obj->flags, pin->flags);
390
391 fprintf(stderr, "\n");
392}
393
394struct bpf_prog_data {
395 unsigned int type;
396 unsigned int jited;
397};
398
399struct bpf_map_ext {
400 struct bpf_prog_data owner;
401 unsigned int btf_id_key;
402 unsigned int btf_id_val;
403};
404
405static int bpf_derive_elf_map_from_fdinfo(int fd, struct bpf_elf_map *map,
406 struct bpf_map_ext *ext)
407{
408 unsigned int val, owner_type = 0, owner_jited = 0;
409 char file[PATH_MAX], buff[4096];
410 FILE *fp;
411
412 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
413 memset(map, 0, sizeof(*map));
414
415 fp = fopen(file, "r");
416 if (!fp) {
417 fprintf(stderr, "No procfs support?!\n");
418 return -EIO;
419 }
420
421 while (fgets(buff, sizeof(buff), fp)) {
422 if (sscanf(buff, "map_type:\t%u", &val) == 1)
423 map->type = val;
424 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
425 map->size_key = val;
426 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
427 map->size_value = val;
428 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
429 map->max_elem = val;
430 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
431 map->flags = val;
432 else if (sscanf(buff, "owner_prog_type:\t%i", &val) == 1)
433 owner_type = val;
434 else if (sscanf(buff, "owner_jited:\t%i", &val) == 1)
435 owner_jited = val;
436 }
437
438 fclose(fp);
439 if (ext) {
440 memset(ext, 0, sizeof(*ext));
441 ext->owner.type = owner_type;
442 ext->owner.jited = owner_jited;
443 }
444
445 return 0;
446}
447
448static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
449 struct bpf_map_ext *ext, int length,
450 enum bpf_prog_type type)
451{
452 struct bpf_elf_map tmp, zero = {};
453 int ret;
454
455 ret = bpf_derive_elf_map_from_fdinfo(fd, &tmp, ext);
456 if (ret < 0)
457 return ret;
458
459
460
461
462 if (ext->owner.type && ext->owner.type != type)
463 fprintf(stderr, "Program array map owner types differ: %u (obj) != %u (pin)\n",
464 type, ext->owner.type);
465
466 if (!memcmp(&tmp, map, length)) {
467 return 0;
468 } else {
469
470
471
472
473
474 if (!memcmp(&tmp, &zero, length))
475 return 0;
476
477 bpf_map_pin_report(&tmp, map);
478 return -EINVAL;
479 }
480}
481
482static int bpf_mnt_fs(const char *target)
483{
484 bool bind_done = false;
485
486 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
487 if (errno != EINVAL || bind_done) {
488 fprintf(stderr, "mount --make-private %s failed: %s\n",
489 target, strerror(errno));
490 return -1;
491 }
492
493 if (mount(target, target, "none", MS_BIND, NULL)) {
494 fprintf(stderr, "mount --bind %s %s failed: %s\n",
495 target, target, strerror(errno));
496 return -1;
497 }
498
499 bind_done = true;
500 }
501
502 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
503 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
504 target, strerror(errno));
505 return -1;
506 }
507
508 return 0;
509}
510
511static int bpf_mnt_check_target(const char *target)
512{
513 struct stat sb = {};
514 int ret;
515
516 ret = stat(target, &sb);
517 if (ret) {
518 ret = mkdir(target, S_IRWXU);
519 if (ret) {
520 fprintf(stderr, "mkdir %s failed: %s\n", target,
521 strerror(errno));
522 return ret;
523 }
524 }
525
526 return 0;
527}
528
529static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
530{
531 struct statfs st_fs;
532
533 if (statfs(mnt, &st_fs) < 0)
534 return -ENOENT;
535 if ((unsigned long)st_fs.f_type != magic)
536 return -ENOENT;
537
538 return 0;
539}
540
541static const char *bpf_find_mntpt_single(unsigned long magic, char *mnt,
542 int len, const char *mntpt)
543{
544 int ret;
545
546 ret = bpf_valid_mntpt(mntpt, magic);
547 if (!ret) {
548 strlcpy(mnt, mntpt, len);
549 return mnt;
550 }
551
552 return NULL;
553}
554
555static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
556 char *mnt, int len,
557 const char * const *known_mnts)
558{
559 const char * const *ptr;
560 char type[100];
561 FILE *fp;
562
563 if (known_mnts) {
564 ptr = known_mnts;
565 while (*ptr) {
566 if (bpf_find_mntpt_single(magic, mnt, len, *ptr))
567 return mnt;
568 ptr++;
569 }
570 }
571
572 if (len != PATH_MAX)
573 return NULL;
574
575 fp = fopen("/proc/mounts", "r");
576 if (fp == NULL)
577 return NULL;
578
579 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
580 mnt, type) == 2) {
581 if (strcmp(type, fstype) == 0)
582 break;
583 }
584
585 fclose(fp);
586 if (strcmp(type, fstype) != 0)
587 return NULL;
588
589 return mnt;
590}
591
592int bpf_trace_pipe(void)
593{
594 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
595 static const char * const tracefs_known_mnts[] = {
596 TRACE_DIR_MNT,
597 "/sys/kernel/debug/tracing",
598 "/tracing",
599 "/trace",
600 0,
601 };
602 int fd_in, fd_out = STDERR_FILENO;
603 char tpipe[PATH_MAX];
604 const char *mnt;
605
606 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
607 sizeof(tracefs_mnt), tracefs_known_mnts);
608 if (!mnt) {
609 fprintf(stderr, "tracefs not mounted?\n");
610 return -1;
611 }
612
613 snprintf(tpipe, sizeof(tpipe), "%s/trace_pipe", mnt);
614
615 fd_in = open(tpipe, O_RDONLY);
616 if (fd_in < 0)
617 return -1;
618
619 fprintf(stderr, "Running! Hang up with ^C!\n\n");
620 while (1) {
621 static char buff[4096];
622 ssize_t ret;
623
624 ret = read(fd_in, buff, sizeof(buff));
625 if (ret > 0 && write(fd_out, buff, ret) == ret)
626 continue;
627 break;
628 }
629
630 close(fd_in);
631 return -1;
632}
633
634static int bpf_gen_global(const char *bpf_sub_dir)
635{
636 char bpf_glo_dir[PATH_MAX];
637 int ret;
638
639 snprintf(bpf_glo_dir, sizeof(bpf_glo_dir), "%s/%s/",
640 bpf_sub_dir, BPF_DIR_GLOBALS);
641
642 ret = mkdir(bpf_glo_dir, S_IRWXU);
643 if (ret && errno != EEXIST) {
644 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
645 strerror(errno));
646 return ret;
647 }
648
649 return 0;
650}
651
652static int bpf_gen_master(const char *base, const char *name)
653{
654 char bpf_sub_dir[PATH_MAX + NAME_MAX + 1];
655 int ret;
656
657 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s/", base, name);
658
659 ret = mkdir(bpf_sub_dir, S_IRWXU);
660 if (ret && errno != EEXIST) {
661 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
662 strerror(errno));
663 return ret;
664 }
665
666 return bpf_gen_global(bpf_sub_dir);
667}
668
669static int bpf_slave_via_bind_mnt(const char *full_name,
670 const char *full_link)
671{
672 int ret;
673
674 ret = mkdir(full_name, S_IRWXU);
675 if (ret) {
676 assert(errno != EEXIST);
677 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
678 strerror(errno));
679 return ret;
680 }
681
682 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
683 if (ret) {
684 rmdir(full_name);
685 fprintf(stderr, "mount --bind %s %s failed: %s\n",
686 full_link, full_name, strerror(errno));
687 }
688
689 return ret;
690}
691
692static int bpf_gen_slave(const char *base, const char *name,
693 const char *link)
694{
695 char bpf_lnk_dir[PATH_MAX + NAME_MAX + 1];
696 char bpf_sub_dir[PATH_MAX + NAME_MAX];
697 struct stat sb = {};
698 int ret;
699
700 snprintf(bpf_lnk_dir, sizeof(bpf_lnk_dir), "%s%s/", base, link);
701 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s", base, name);
702
703 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
704 if (ret) {
705 if (errno != EEXIST) {
706 if (errno != EPERM) {
707 fprintf(stderr, "symlink %s failed: %s\n",
708 bpf_sub_dir, strerror(errno));
709 return ret;
710 }
711
712 return bpf_slave_via_bind_mnt(bpf_sub_dir,
713 bpf_lnk_dir);
714 }
715
716 ret = lstat(bpf_sub_dir, &sb);
717 if (ret) {
718 fprintf(stderr, "lstat %s failed: %s\n",
719 bpf_sub_dir, strerror(errno));
720 return ret;
721 }
722
723 if ((sb.st_mode & S_IFMT) != S_IFLNK)
724 return bpf_gen_global(bpf_sub_dir);
725 }
726
727 return 0;
728}
729
730static int bpf_gen_hierarchy(const char *base)
731{
732 int ret, i;
733
734 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
735 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
736 ret = bpf_gen_slave(base,
737 bpf_prog_to_subdir(__bpf_types[i]),
738 bpf_prog_to_subdir(__bpf_types[0]));
739 return ret;
740}
741
742static const char *bpf_get_work_dir(enum bpf_prog_type type)
743{
744 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
745 static char bpf_wrk_dir[PATH_MAX];
746 static const char *mnt;
747 static bool bpf_mnt_cached;
748 const char *mnt_env = getenv(BPF_ENV_MNT);
749 static const char * const bpf_known_mnts[] = {
750 BPF_DIR_MNT,
751 "/bpf",
752 0,
753 };
754 int ret;
755
756 if (bpf_mnt_cached) {
757 const char *out = mnt;
758
759 if (out && type) {
760 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
761 out, bpf_prog_to_subdir(type));
762 out = bpf_tmp;
763 }
764 return out;
765 }
766
767 if (mnt_env)
768 mnt = bpf_find_mntpt_single(BPF_FS_MAGIC, bpf_tmp,
769 sizeof(bpf_tmp), mnt_env);
770 else
771 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp,
772 sizeof(bpf_tmp), bpf_known_mnts);
773 if (!mnt) {
774 mnt = mnt_env ? : BPF_DIR_MNT;
775 ret = bpf_mnt_check_target(mnt);
776 if (!ret)
777 ret = bpf_mnt_fs(mnt);
778 if (ret) {
779 mnt = NULL;
780 goto out;
781 }
782 }
783
784 ret = snprintf(bpf_wrk_dir, sizeof(bpf_wrk_dir), "%s/", mnt);
785 if (ret < 0 || ret >= sizeof(bpf_wrk_dir)) {
786 mnt = NULL;
787 goto out;
788 }
789
790 ret = bpf_gen_hierarchy(bpf_wrk_dir);
791 if (ret) {
792 mnt = NULL;
793 goto out;
794 }
795
796 mnt = bpf_wrk_dir;
797out:
798 bpf_mnt_cached = true;
799 return mnt;
800}
801
802static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
803{
804 union bpf_attr attr = {};
805 char tmp[PATH_MAX];
806
807 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
808 pathname[1] == ':' && bpf_get_work_dir(type)) {
809 snprintf(tmp, sizeof(tmp), "%s/%s",
810 bpf_get_work_dir(type), pathname + 2);
811 pathname = tmp;
812 }
813
814 attr.pathname = bpf_ptr_to_u64(pathname);
815
816 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
817}
818
819static int bpf_obj_pinned(const char *pathname, enum bpf_prog_type type)
820{
821 int prog_fd = bpf_obj_get(pathname, type);
822
823 if (prog_fd < 0)
824 fprintf(stderr, "Couldn\'t retrieve pinned program \'%s\': %s\n",
825 pathname, strerror(errno));
826 return prog_fd;
827}
828
829static int bpf_do_parse(struct bpf_cfg_in *cfg, const bool *opt_tbl)
830{
831 const char *file, *section, *uds_name;
832 bool verbose = false;
833 int i, ret, argc;
834 char **argv;
835
836 argv = cfg->argv;
837 argc = cfg->argc;
838
839 if (opt_tbl[CBPF_BYTECODE] &&
840 (matches(*argv, "bytecode") == 0 ||
841 strcmp(*argv, "bc") == 0)) {
842 cfg->mode = CBPF_BYTECODE;
843 } else if (opt_tbl[CBPF_FILE] &&
844 (matches(*argv, "bytecode-file") == 0 ||
845 strcmp(*argv, "bcf") == 0)) {
846 cfg->mode = CBPF_FILE;
847 } else if (opt_tbl[EBPF_OBJECT] &&
848 (matches(*argv, "object-file") == 0 ||
849 strcmp(*argv, "obj") == 0)) {
850 cfg->mode = EBPF_OBJECT;
851 } else if (opt_tbl[EBPF_PINNED] &&
852 (matches(*argv, "object-pinned") == 0 ||
853 matches(*argv, "pinned") == 0 ||
854 matches(*argv, "fd") == 0)) {
855 cfg->mode = EBPF_PINNED;
856 } else {
857 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
858 return -1;
859 }
860
861 NEXT_ARG();
862 file = section = uds_name = NULL;
863 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
864 file = *argv;
865 NEXT_ARG_FWD();
866
867 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
868 if (argc > 0 && matches(*argv, "type") == 0) {
869 NEXT_ARG();
870 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
871 i++) {
872 if (!__bpf_prog_meta[i].type)
873 continue;
874 if (!matches(*argv,
875 __bpf_prog_meta[i].type)) {
876 cfg->type = i;
877 break;
878 }
879 }
880
881 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
882 fprintf(stderr, "What type is \"%s\"?\n",
883 *argv);
884 return -1;
885 }
886 NEXT_ARG_FWD();
887 } else {
888 cfg->type = BPF_PROG_TYPE_SCHED_CLS;
889 }
890 }
891
892 section = bpf_prog_to_default_section(cfg->type);
893 if (argc > 0 && matches(*argv, "section") == 0) {
894 NEXT_ARG();
895 section = *argv;
896 NEXT_ARG_FWD();
897 }
898
899 if (__bpf_prog_meta[cfg->type].may_uds_export) {
900 uds_name = getenv(BPF_ENV_UDS);
901 if (argc > 0 && !uds_name &&
902 matches(*argv, "export") == 0) {
903 NEXT_ARG();
904 uds_name = *argv;
905 NEXT_ARG_FWD();
906 }
907 }
908
909 if (argc > 0 && matches(*argv, "verbose") == 0) {
910 verbose = true;
911 NEXT_ARG_FWD();
912 }
913
914 PREV_ARG();
915 }
916
917 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE) {
918 ret = bpf_ops_parse(argc, argv, cfg->opcodes,
919 cfg->mode == CBPF_FILE);
920 cfg->n_opcodes = ret;
921 } else if (cfg->mode == EBPF_OBJECT) {
922 ret = 0;
923 } else if (cfg->mode == EBPF_PINNED) {
924 ret = bpf_obj_pinned(file, cfg->type);
925 cfg->prog_fd = ret;
926 } else {
927 return -1;
928 }
929
930 cfg->object = file;
931 cfg->section = section;
932 cfg->uds = uds_name;
933 cfg->argc = argc;
934 cfg->argv = argv;
935 cfg->verbose = verbose;
936
937 return ret;
938}
939
940static int bpf_do_load(struct bpf_cfg_in *cfg)
941{
942 if (cfg->mode == EBPF_OBJECT) {
943 cfg->prog_fd = bpf_obj_open(cfg->object, cfg->type,
944 cfg->section, cfg->ifindex,
945 cfg->verbose);
946 return cfg->prog_fd;
947 }
948 return 0;
949}
950
951int bpf_load_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops,
952 void *nl)
953{
954 char annotation[256];
955 int ret;
956
957 ret = bpf_do_load(cfg);
958 if (ret < 0)
959 return ret;
960
961 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE)
962 ops->cbpf_cb(nl, cfg->opcodes, cfg->n_opcodes);
963 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
964 snprintf(annotation, sizeof(annotation), "%s:[%s]",
965 basename(cfg->object), cfg->mode == EBPF_PINNED ?
966 "*fsobj" : cfg->section);
967 ops->ebpf_cb(nl, cfg->prog_fd, annotation);
968 }
969
970 return 0;
971}
972
973int bpf_parse_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops)
974{
975 bool opt_tbl[BPF_MODE_MAX] = {};
976
977 if (ops->cbpf_cb) {
978 opt_tbl[CBPF_BYTECODE] = true;
979 opt_tbl[CBPF_FILE] = true;
980 }
981
982 if (ops->ebpf_cb) {
983 opt_tbl[EBPF_OBJECT] = true;
984 opt_tbl[EBPF_PINNED] = true;
985 }
986
987 return bpf_do_parse(cfg, opt_tbl);
988}
989
990int bpf_parse_and_load_common(struct bpf_cfg_in *cfg,
991 const struct bpf_cfg_ops *ops, void *nl)
992{
993 int ret;
994
995 ret = bpf_parse_common(cfg, ops);
996 if (ret < 0)
997 return ret;
998
999 return bpf_load_common(cfg, ops, nl);
1000}
1001
1002int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
1003{
1004 const bool opt_tbl[BPF_MODE_MAX] = {
1005 [EBPF_OBJECT] = true,
1006 [EBPF_PINNED] = true,
1007 };
1008 const struct bpf_elf_map test = {
1009 .type = BPF_MAP_TYPE_PROG_ARRAY,
1010 .size_key = sizeof(int),
1011 .size_value = sizeof(int),
1012 };
1013 struct bpf_cfg_in cfg = {
1014 .type = BPF_PROG_TYPE_UNSPEC,
1015 .argc = argc,
1016 .argv = argv,
1017 };
1018 struct bpf_map_ext ext = {};
1019 int ret, prog_fd, map_fd;
1020 uint32_t map_key;
1021
1022 ret = bpf_do_parse(&cfg, opt_tbl);
1023 if (ret < 0)
1024 return ret;
1025
1026 ret = bpf_do_load(&cfg);
1027 if (ret < 0)
1028 return ret;
1029
1030 prog_fd = cfg.prog_fd;
1031
1032 if (key) {
1033 map_key = *key;
1034 } else {
1035 ret = sscanf(cfg.section, "%*i/%i", &map_key);
1036 if (ret != 1) {
1037 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
1038 ret = -EINVAL;
1039 goto out_prog;
1040 }
1041 }
1042
1043 map_fd = bpf_obj_get(map_path, cfg.type);
1044 if (map_fd < 0) {
1045 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
1046 map_path, strerror(errno));
1047 ret = map_fd;
1048 goto out_prog;
1049 }
1050
1051 ret = bpf_map_selfcheck_pinned(map_fd, &test, &ext,
1052 offsetof(struct bpf_elf_map, max_elem),
1053 cfg.type);
1054 if (ret < 0) {
1055 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
1056 goto out_map;
1057 }
1058
1059 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
1060 if (ret < 0)
1061 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
1062out_map:
1063 close(map_fd);
1064out_prog:
1065 close(prog_fd);
1066 return ret;
1067}
1068
1069int bpf_prog_attach_fd(int prog_fd, int target_fd, enum bpf_attach_type type)
1070{
1071 union bpf_attr attr = {};
1072
1073 attr.target_fd = target_fd;
1074 attr.attach_bpf_fd = prog_fd;
1075 attr.attach_type = type;
1076
1077 return bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
1078}
1079
1080int bpf_prog_detach_fd(int target_fd, enum bpf_attach_type type)
1081{
1082 union bpf_attr attr = {};
1083
1084 attr.target_fd = target_fd;
1085 attr.attach_type = type;
1086
1087 return bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
1088}
1089
1090static int bpf_prog_load_dev(enum bpf_prog_type type,
1091 const struct bpf_insn *insns, size_t size_insns,
1092 const char *license, __u32 ifindex,
1093 char *log, size_t size_log)
1094{
1095 union bpf_attr attr = {};
1096
1097 attr.prog_type = type;
1098 attr.insns = bpf_ptr_to_u64(insns);
1099 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
1100 attr.license = bpf_ptr_to_u64(license);
1101 attr.prog_ifindex = ifindex;
1102
1103 if (size_log > 0) {
1104 attr.log_buf = bpf_ptr_to_u64(log);
1105 attr.log_size = size_log;
1106 attr.log_level = 1;
1107 }
1108
1109 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
1110}
1111
1112int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
1113 size_t size_insns, const char *license, char *log,
1114 size_t size_log)
1115{
1116 return bpf_prog_load_dev(type, insns, size_insns, license, 0,
1117 log, size_log);
1118}
1119
1120#ifdef HAVE_ELF
1121struct bpf_elf_prog {
1122 enum bpf_prog_type type;
1123 struct bpf_insn *insns;
1124 unsigned int insns_num;
1125 size_t size;
1126 const char *license;
1127};
1128
1129struct bpf_hash_entry {
1130 unsigned int pinning;
1131 const char *subpath;
1132 struct bpf_hash_entry *next;
1133};
1134
1135struct bpf_config {
1136 unsigned int jit_enabled;
1137};
1138
1139struct bpf_btf {
1140 const struct btf_header *hdr;
1141 const void *raw;
1142 const char *strings;
1143 const struct btf_type **types;
1144 int types_num;
1145};
1146
1147struct bpf_elf_ctx {
1148 struct bpf_config cfg;
1149 Elf *elf_fd;
1150 GElf_Ehdr elf_hdr;
1151 Elf_Data *sym_tab;
1152 Elf_Data *str_tab;
1153 Elf_Data *btf_data;
1154 char obj_uid[64];
1155 int obj_fd;
1156 int btf_fd;
1157 int map_fds[ELF_MAX_MAPS];
1158 struct bpf_elf_map maps[ELF_MAX_MAPS];
1159 struct bpf_map_ext maps_ext[ELF_MAX_MAPS];
1160 struct bpf_elf_prog prog_text;
1161 struct bpf_btf btf;
1162 int sym_num;
1163 int map_num;
1164 int map_len;
1165 bool *sec_done;
1166 int sec_maps;
1167 int sec_text;
1168 int sec_btf;
1169 char license[ELF_MAX_LICENSE_LEN];
1170 enum bpf_prog_type type;
1171 __u32 ifindex;
1172 bool verbose;
1173 bool noafalg;
1174 struct bpf_elf_st stat;
1175 struct bpf_hash_entry *ht[256];
1176 char *log;
1177 size_t log_size;
1178};
1179
1180struct bpf_elf_sec_data {
1181 GElf_Shdr sec_hdr;
1182 Elf_Data *sec_data;
1183 const char *sec_name;
1184};
1185
1186struct bpf_map_data {
1187 int *fds;
1188 const char *obj;
1189 struct bpf_elf_st *st;
1190 struct bpf_elf_map *ent;
1191};
1192
1193static bool bpf_log_has_data(struct bpf_elf_ctx *ctx)
1194{
1195 return ctx->log && ctx->log[0];
1196}
1197
1198static __check_format_string(2, 3) void
1199bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
1200{
1201 va_list vl;
1202
1203 va_start(vl, format);
1204 vfprintf(stderr, format, vl);
1205 va_end(vl);
1206
1207 if (bpf_log_has_data(ctx)) {
1208 if (ctx->verbose) {
1209 fprintf(stderr, "%s\n", ctx->log);
1210 } else {
1211 unsigned int off = 0, len = strlen(ctx->log);
1212
1213 if (len > BPF_MAX_LOG) {
1214 off = len - BPF_MAX_LOG;
1215 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
1216 off);
1217 }
1218 fprintf(stderr, "%s\n", ctx->log + off);
1219 }
1220
1221 memset(ctx->log, 0, ctx->log_size);
1222 }
1223}
1224
1225static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
1226{
1227 const size_t log_max = UINT_MAX >> 8;
1228 size_t log_size = ctx->log_size;
1229 char *ptr;
1230
1231 if (!ctx->log) {
1232 log_size = 65536;
1233 } else if (log_size < log_max) {
1234 log_size <<= 1;
1235 if (log_size > log_max)
1236 log_size = log_max;
1237 } else {
1238 return -EINVAL;
1239 }
1240
1241 ptr = realloc(ctx->log, log_size);
1242 if (!ptr)
1243 return -ENOMEM;
1244
1245 ptr[0] = 0;
1246 ctx->log = ptr;
1247 ctx->log_size = log_size;
1248
1249 return 0;
1250}
1251
1252static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
1253 uint32_t size_value, uint32_t max_elem,
1254 uint32_t flags, int inner_fd, int btf_fd,
1255 uint32_t ifindex, uint32_t btf_id_key,
1256 uint32_t btf_id_val)
1257{
1258 union bpf_attr attr = {};
1259
1260 attr.map_type = type;
1261 attr.key_size = size_key;
1262 attr.value_size = inner_fd ? sizeof(int) : size_value;
1263 attr.max_entries = max_elem;
1264 attr.map_flags = flags;
1265 attr.inner_map_fd = inner_fd;
1266 attr.map_ifindex = ifindex;
1267 attr.btf_fd = btf_fd;
1268 attr.btf_key_type_id = btf_id_key;
1269 attr.btf_value_type_id = btf_id_val;
1270
1271 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
1272}
1273
1274static int bpf_btf_load(void *btf, size_t size_btf,
1275 char *log, size_t size_log)
1276{
1277 union bpf_attr attr = {};
1278
1279 attr.btf = bpf_ptr_to_u64(btf);
1280 attr.btf_size = size_btf;
1281
1282 if (size_log > 0) {
1283 attr.btf_log_buf = bpf_ptr_to_u64(log);
1284 attr.btf_log_size = size_log;
1285 attr.btf_log_level = 1;
1286 }
1287
1288 return bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
1289}
1290
1291static int bpf_obj_pin(int fd, const char *pathname)
1292{
1293 union bpf_attr attr = {};
1294
1295 attr.pathname = bpf_ptr_to_u64(pathname);
1296 attr.bpf_fd = fd;
1297
1298 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
1299}
1300
1301static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
1302{
1303 struct sockaddr_alg alg = {
1304 .salg_family = AF_ALG,
1305 .salg_type = "hash",
1306 .salg_name = "sha1",
1307 };
1308 int ret, cfd, ofd, ffd;
1309 struct stat stbuff;
1310 ssize_t size;
1311
1312 if (!object || len != 20)
1313 return -EINVAL;
1314
1315 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1316 if (cfd < 0)
1317 return cfd;
1318
1319 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1320 if (ret < 0)
1321 goto out_cfd;
1322
1323 ofd = accept(cfd, NULL, 0);
1324 if (ofd < 0) {
1325 ret = ofd;
1326 goto out_cfd;
1327 }
1328
1329 ffd = open(object, O_RDONLY);
1330 if (ffd < 0) {
1331 fprintf(stderr, "Error opening object %s: %s\n",
1332 object, strerror(errno));
1333 ret = ffd;
1334 goto out_ofd;
1335 }
1336
1337 ret = fstat(ffd, &stbuff);
1338 if (ret < 0) {
1339 fprintf(stderr, "Error doing fstat: %s\n",
1340 strerror(errno));
1341 goto out_ffd;
1342 }
1343
1344 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1345 if (size != stbuff.st_size) {
1346 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1347 size, stbuff.st_size, strerror(errno));
1348 ret = -1;
1349 goto out_ffd;
1350 }
1351
1352 size = read(ofd, out, len);
1353 if (size != len) {
1354 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1355 size, len, strerror(errno));
1356 ret = -1;
1357 } else {
1358 ret = 0;
1359 }
1360out_ffd:
1361 close(ffd);
1362out_ofd:
1363 close(ofd);
1364out_cfd:
1365 close(cfd);
1366 return ret;
1367}
1368
1369static void bpf_init_env(void)
1370{
1371 struct rlimit limit = {
1372 .rlim_cur = RLIM_INFINITY,
1373 .rlim_max = RLIM_INFINITY,
1374 };
1375
1376
1377 setrlimit(RLIMIT_MEMLOCK, &limit);
1378
1379 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC))
1380 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1381}
1382
1383static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1384 uint32_t pinning)
1385{
1386 struct bpf_hash_entry *entry;
1387
1388 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1389 while (entry && entry->pinning != pinning)
1390 entry = entry->next;
1391
1392 return entry ? entry->subpath : NULL;
1393}
1394
1395static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1396 uint32_t pinning)
1397{
1398 switch (pinning) {
1399 case PIN_OBJECT_NS:
1400 case PIN_GLOBAL_NS:
1401 return false;
1402 case PIN_NONE:
1403 return true;
1404 default:
1405 return !bpf_custom_pinning(ctx, pinning);
1406 }
1407}
1408
1409static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1410 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1411{
1412 switch (pinning) {
1413 case PIN_OBJECT_NS:
1414 snprintf(pathname, len, "%s/%s/%s",
1415 bpf_get_work_dir(ctx->type),
1416 ctx->obj_uid, name);
1417 break;
1418 case PIN_GLOBAL_NS:
1419 snprintf(pathname, len, "%s/%s/%s",
1420 bpf_get_work_dir(ctx->type),
1421 BPF_DIR_GLOBALS, name);
1422 break;
1423 default:
1424 snprintf(pathname, len, "%s/../%s/%s",
1425 bpf_get_work_dir(ctx->type),
1426 bpf_custom_pinning(ctx, pinning), name);
1427 break;
1428 }
1429}
1430
1431static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1432 uint32_t pinning)
1433{
1434 char pathname[PATH_MAX];
1435
1436 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1437 return 0;
1438
1439 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1440 return bpf_obj_get(pathname, ctx->type);
1441}
1442
1443static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1444{
1445 char tmp[PATH_MAX];
1446 int ret;
1447
1448 snprintf(tmp, sizeof(tmp), "%s/%s", bpf_get_work_dir(ctx->type),
1449 ctx->obj_uid);
1450
1451 ret = mkdir(tmp, S_IRWXU);
1452 if (ret && errno != EEXIST) {
1453 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1454 return ret;
1455 }
1456
1457 return 0;
1458}
1459
1460static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1461 const char *todo)
1462{
1463 char tmp[PATH_MAX], rem[PATH_MAX], *sub;
1464 int ret;
1465
1466 snprintf(tmp, sizeof(tmp), "%s/../", bpf_get_work_dir(ctx->type));
1467 snprintf(rem, sizeof(rem), "%s/", todo);
1468 sub = strtok(rem, "/");
1469
1470 while (sub) {
1471 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1472 return -EINVAL;
1473
1474 strcat(tmp, sub);
1475 strcat(tmp, "/");
1476
1477 ret = mkdir(tmp, S_IRWXU);
1478 if (ret && errno != EEXIST) {
1479 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1480 strerror(errno));
1481 return ret;
1482 }
1483
1484 sub = strtok(NULL, "/");
1485 }
1486
1487 return 0;
1488}
1489
1490static int bpf_place_pinned(int fd, const char *name,
1491 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1492{
1493 char pathname[PATH_MAX];
1494 const char *tmp;
1495 int ret = 0;
1496
1497 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1498 return 0;
1499
1500 if (pinning == PIN_OBJECT_NS)
1501 ret = bpf_make_obj_path(ctx);
1502 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1503 ret = bpf_make_custom_path(ctx, tmp);
1504 if (ret < 0)
1505 return ret;
1506
1507 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1508 return bpf_obj_pin(fd, pathname);
1509}
1510
1511static void bpf_prog_report(int fd, const char *section,
1512 const struct bpf_elf_prog *prog,
1513 struct bpf_elf_ctx *ctx)
1514{
1515 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1516
1517 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1518 fd < 0 ? "rejected: " : "loaded",
1519 fd < 0 ? strerror(errno) : "",
1520 fd < 0 ? errno : fd);
1521
1522 fprintf(stderr, " - Type: %u\n", prog->type);
1523 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1524 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1525 fprintf(stderr, " - License: %s\n\n", prog->license);
1526
1527 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1528}
1529
1530static int bpf_prog_attach(const char *section,
1531 const struct bpf_elf_prog *prog,
1532 struct bpf_elf_ctx *ctx)
1533{
1534 int tries = 0, fd;
1535retry:
1536 errno = 0;
1537 fd = bpf_prog_load_dev(prog->type, prog->insns, prog->size,
1538 prog->license, ctx->ifindex,
1539 ctx->log, ctx->log_size);
1540 if (fd < 0 || ctx->verbose) {
1541
1542
1543
1544
1545
1546 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1547 if (tries++ < 10 && !bpf_log_realloc(ctx))
1548 goto retry;
1549
1550 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1551 ctx->log_size, tries);
1552 return fd;
1553 }
1554
1555 bpf_prog_report(fd, section, prog, ctx);
1556 }
1557
1558 return fd;
1559}
1560
1561static void bpf_map_report(int fd, const char *name,
1562 const struct bpf_elf_map *map,
1563 struct bpf_elf_ctx *ctx, int inner_fd)
1564{
1565 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1566 fd < 0 ? "rejected: " : "loaded",
1567 fd < 0 ? strerror(errno) : "",
1568 fd < 0 ? errno : fd);
1569
1570 fprintf(stderr, " - Type: %u\n", map->type);
1571 fprintf(stderr, " - Identifier: %u\n", map->id);
1572 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1573 fprintf(stderr, " - Size key: %u\n", map->size_key);
1574 fprintf(stderr, " - Size value: %u\n",
1575 inner_fd ? (int)sizeof(int) : map->size_value);
1576 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1577 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1578}
1579
1580static int bpf_find_map_id(const struct bpf_elf_ctx *ctx, uint32_t id)
1581{
1582 int i;
1583
1584 for (i = 0; i < ctx->map_num; i++) {
1585 if (ctx->maps[i].id != id)
1586 continue;
1587 if (ctx->map_fds[i] < 0)
1588 return -EINVAL;
1589
1590 return ctx->map_fds[i];
1591 }
1592
1593 return -ENOENT;
1594}
1595
1596static void bpf_report_map_in_map(int outer_fd, uint32_t idx)
1597{
1598 struct bpf_elf_map outer_map;
1599 int ret;
1600
1601 fprintf(stderr, "Cannot insert map into map! ");
1602
1603 ret = bpf_derive_elf_map_from_fdinfo(outer_fd, &outer_map, NULL);
1604 if (!ret) {
1605 if (idx >= outer_map.max_elem &&
1606 outer_map.type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
1607 fprintf(stderr, "Outer map has %u elements, index %u is invalid!\n",
1608 outer_map.max_elem, idx);
1609 return;
1610 }
1611 }
1612
1613 fprintf(stderr, "Different map specs used for outer and inner map?\n");
1614}
1615
1616static bool bpf_is_map_in_map_type(const struct bpf_elf_map *map)
1617{
1618 return map->type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1619 map->type == BPF_MAP_TYPE_HASH_OF_MAPS;
1620}
1621
1622static bool bpf_map_offload_neutral(enum bpf_map_type type)
1623{
1624 return type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
1625}
1626
1627static int bpf_map_attach(const char *name, struct bpf_elf_ctx *ctx,
1628 const struct bpf_elf_map *map, struct bpf_map_ext *ext,
1629 int *have_map_in_map)
1630{
1631 int fd, ifindex, ret, map_inner_fd = 0;
1632 bool retried = false;
1633
1634probe:
1635 fd = bpf_probe_pinned(name, ctx, map->pinning);
1636 if (fd > 0) {
1637 ret = bpf_map_selfcheck_pinned(fd, map, ext,
1638 offsetof(struct bpf_elf_map,
1639 id), ctx->type);
1640 if (ret < 0) {
1641 close(fd);
1642 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1643 name);
1644 return ret;
1645 }
1646 if (ctx->verbose)
1647 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1648 name);
1649 return fd;
1650 }
1651
1652 if (have_map_in_map && bpf_is_map_in_map_type(map)) {
1653 (*have_map_in_map)++;
1654 if (map->inner_id)
1655 return 0;
1656 fprintf(stderr, "Map \'%s\' cannot be created since no inner map ID defined!\n",
1657 name);
1658 return -EINVAL;
1659 }
1660
1661 if (!have_map_in_map && bpf_is_map_in_map_type(map)) {
1662 map_inner_fd = bpf_find_map_id(ctx, map->inner_id);
1663 if (map_inner_fd < 0) {
1664 fprintf(stderr, "Map \'%s\' cannot be loaded. Inner map with ID %u not found!\n",
1665 name, map->inner_id);
1666 return -EINVAL;
1667 }
1668 }
1669
1670 ifindex = bpf_map_offload_neutral(map->type) ? 0 : ctx->ifindex;
1671 errno = 0;
1672 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1673 map->max_elem, map->flags, map_inner_fd, ctx->btf_fd,
1674 ifindex, ext->btf_id_key, ext->btf_id_val);
1675
1676 if (fd < 0 || ctx->verbose) {
1677 bpf_map_report(fd, name, map, ctx, map_inner_fd);
1678 if (fd < 0)
1679 return fd;
1680 }
1681
1682 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1683 if (ret < 0) {
1684 close(fd);
1685 if (!retried && errno == EEXIST) {
1686 retried = true;
1687 goto probe;
1688 }
1689 fprintf(stderr, "Could not pin %s map: %s\n", name,
1690 strerror(errno));
1691 return ret;
1692 }
1693
1694 return fd;
1695}
1696
1697static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1698 const GElf_Sym *sym)
1699{
1700 return ctx->str_tab->d_buf + sym->st_name;
1701}
1702
1703static int bpf_btf_find(struct bpf_elf_ctx *ctx, const char *name)
1704{
1705 const struct btf_type *type;
1706 const char *res;
1707 int id;
1708
1709 for (id = 1; id < ctx->btf.types_num; id++) {
1710 type = ctx->btf.types[id];
1711 if (type->name_off >= ctx->btf.hdr->str_len)
1712 continue;
1713 res = &ctx->btf.strings[type->name_off];
1714 if (!strcmp(res, name))
1715 return id;
1716 }
1717
1718 return -ENOENT;
1719}
1720
1721static int bpf_btf_find_kv(struct bpf_elf_ctx *ctx, const struct bpf_elf_map *map,
1722 const char *name, uint32_t *id_key, uint32_t *id_val)
1723{
1724 const struct btf_member *key, *val;
1725 const struct btf_type *type;
1726 char btf_name[512];
1727 const char *res;
1728 int id;
1729
1730 snprintf(btf_name, sizeof(btf_name), "____btf_map_%s", name);
1731 id = bpf_btf_find(ctx, btf_name);
1732 if (id < 0)
1733 return id;
1734
1735 type = ctx->btf.types[id];
1736 if (BTF_INFO_KIND(type->info) != BTF_KIND_STRUCT)
1737 return -EINVAL;
1738 if (BTF_INFO_VLEN(type->info) != 2)
1739 return -EINVAL;
1740
1741 key = ((void *) type) + sizeof(*type);
1742 val = key + 1;
1743 if (!key->type || key->type >= ctx->btf.types_num ||
1744 !val->type || val->type >= ctx->btf.types_num)
1745 return -EINVAL;
1746
1747 if (key->name_off >= ctx->btf.hdr->str_len ||
1748 val->name_off >= ctx->btf.hdr->str_len)
1749 return -EINVAL;
1750
1751 res = &ctx->btf.strings[key->name_off];
1752 if (strcmp(res, "key"))
1753 return -EINVAL;
1754
1755 res = &ctx->btf.strings[val->name_off];
1756 if (strcmp(res, "value"))
1757 return -EINVAL;
1758
1759 *id_key = key->type;
1760 *id_val = val->type;
1761 return 0;
1762}
1763
1764static void bpf_btf_annotate(struct bpf_elf_ctx *ctx, int which, const char *name)
1765{
1766 uint32_t id_key = 0, id_val = 0;
1767
1768 if (!bpf_btf_find_kv(ctx, &ctx->maps[which], name, &id_key, &id_val)) {
1769 ctx->maps_ext[which].btf_id_key = id_key;
1770 ctx->maps_ext[which].btf_id_val = id_val;
1771 }
1772}
1773
1774static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1775{
1776 const char *name;
1777 GElf_Sym sym;
1778 int i;
1779
1780 for (i = 0; i < ctx->sym_num; i++) {
1781 int type;
1782
1783 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1784 continue;
1785
1786 type = GELF_ST_TYPE(sym.st_info);
1787 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1788 (type != STT_NOTYPE && type != STT_OBJECT) ||
1789 sym.st_shndx != ctx->sec_maps ||
1790 sym.st_value / ctx->map_len != which)
1791 continue;
1792
1793 name = bpf_str_tab_name(ctx, &sym);
1794 bpf_btf_annotate(ctx, which, name);
1795 return name;
1796 }
1797
1798 return NULL;
1799}
1800
1801static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1802{
1803 int i, j, ret, fd, inner_fd, inner_idx, have_map_in_map = 0;
1804 const char *map_name;
1805
1806 for (i = 0; i < ctx->map_num; i++) {
1807 if (ctx->maps[i].pinning == PIN_OBJECT_NS &&
1808 ctx->noafalg) {
1809 fprintf(stderr, "Missing kernel AF_ALG support for PIN_OBJECT_NS!\n");
1810 return -ENOTSUP;
1811 }
1812
1813 map_name = bpf_map_fetch_name(ctx, i);
1814 if (!map_name)
1815 return -EIO;
1816
1817 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1818 &ctx->maps_ext[i], &have_map_in_map);
1819 if (fd < 0)
1820 return fd;
1821
1822 ctx->map_fds[i] = !fd ? -1 : fd;
1823 }
1824
1825 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1826 if (ctx->map_fds[i] >= 0)
1827 continue;
1828
1829 map_name = bpf_map_fetch_name(ctx, i);
1830 if (!map_name)
1831 return -EIO;
1832
1833 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1834 &ctx->maps_ext[i], NULL);
1835 if (fd < 0)
1836 return fd;
1837
1838 ctx->map_fds[i] = fd;
1839 }
1840
1841 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1842 if (!ctx->maps[i].id ||
1843 ctx->maps[i].inner_id ||
1844 ctx->maps[i].inner_idx == -1)
1845 continue;
1846
1847 inner_fd = ctx->map_fds[i];
1848 inner_idx = ctx->maps[i].inner_idx;
1849
1850 for (j = 0; j < ctx->map_num; j++) {
1851 if (!bpf_is_map_in_map_type(&ctx->maps[j]))
1852 continue;
1853 if (ctx->maps[j].inner_id != ctx->maps[i].id)
1854 continue;
1855
1856 ret = bpf_map_update(ctx->map_fds[j], &inner_idx,
1857 &inner_fd, BPF_ANY);
1858 if (ret < 0) {
1859 bpf_report_map_in_map(ctx->map_fds[j],
1860 inner_idx);
1861 return ret;
1862 }
1863 }
1864 }
1865
1866 return 0;
1867}
1868
1869static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1870{
1871 int i, num = 0;
1872 GElf_Sym sym;
1873
1874 for (i = 0; i < ctx->sym_num; i++) {
1875 int type;
1876
1877 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1878 continue;
1879
1880 type = GELF_ST_TYPE(sym.st_info);
1881 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1882 (type != STT_NOTYPE && type != STT_OBJECT) ||
1883 sym.st_shndx != ctx->sec_maps)
1884 continue;
1885 num++;
1886 }
1887
1888 return num;
1889}
1890
1891static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1892 struct bpf_elf_sec_data *data)
1893{
1894 Elf_Data *sec_edata;
1895 GElf_Shdr sec_hdr;
1896 Elf_Scn *sec_fd;
1897 char *sec_name;
1898
1899 memset(data, 0, sizeof(*data));
1900
1901 sec_fd = elf_getscn(ctx->elf_fd, section);
1902 if (!sec_fd)
1903 return -EINVAL;
1904 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1905 return -EIO;
1906
1907 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1908 sec_hdr.sh_name);
1909 if (!sec_name || !sec_hdr.sh_size)
1910 return -ENOENT;
1911
1912 sec_edata = elf_getdata(sec_fd, NULL);
1913 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1914 return -EIO;
1915
1916 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1917
1918 data->sec_name = sec_name;
1919 data->sec_data = sec_edata;
1920 return 0;
1921}
1922
1923struct bpf_elf_map_min {
1924 __u32 type;
1925 __u32 size_key;
1926 __u32 size_value;
1927 __u32 max_elem;
1928};
1929
1930static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1931 struct bpf_elf_sec_data *data)
1932{
1933 ctx->map_num = data->sec_data->d_size;
1934 ctx->sec_maps = section;
1935 ctx->sec_done[section] = true;
1936
1937 if (ctx->map_num > sizeof(ctx->maps)) {
1938 fprintf(stderr, "Too many BPF maps in ELF section!\n");
1939 return -ENOMEM;
1940 }
1941
1942 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
1943 return 0;
1944}
1945
1946static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
1947{
1948 GElf_Sym sym;
1949 int off, i;
1950
1951 for (off = 0; off < end; off += ctx->map_len) {
1952
1953
1954
1955 for (i = 0; i < ctx->sym_num; i++) {
1956 int type;
1957
1958 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1959 continue;
1960
1961 type = GELF_ST_TYPE(sym.st_info);
1962 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1963 (type != STT_NOTYPE && type != STT_OBJECT) ||
1964 sym.st_shndx != ctx->sec_maps)
1965 continue;
1966 if (sym.st_value == off)
1967 break;
1968 if (i == ctx->sym_num - 1)
1969 return -1;
1970 }
1971 }
1972
1973 return off == end ? 0 : -1;
1974}
1975
1976static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
1977{
1978 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
1979 int i, sym_num = bpf_map_num_sym(ctx);
1980 __u8 *buff;
1981
1982 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
1983 fprintf(stderr, "%u maps not supported in current map section!\n",
1984 sym_num);
1985 return -EINVAL;
1986 }
1987
1988 if (ctx->map_num % sym_num != 0 ||
1989 ctx->map_num % sizeof(__u32) != 0) {
1990 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
1991 return -EINVAL;
1992 }
1993
1994 ctx->map_len = ctx->map_num / sym_num;
1995 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
1996 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
1997 return -EINVAL;
1998 }
1999
2000 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
2001 ctx->map_num = sym_num;
2002 return 0;
2003 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
2004 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
2005 return -EINVAL;
2006 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
2007 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
2008 return -EINVAL;
2009 }
2010
2011 ctx->map_num = sym_num;
2012 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
2013 i++, buff += ctx->map_len) {
2014
2015
2016
2017
2018 memcpy(&fixup[i], buff, ctx->map_len);
2019 }
2020
2021 memcpy(ctx->maps, fixup, sizeof(fixup));
2022 if (ctx->verbose)
2023 printf("%zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
2024 sizeof(struct bpf_elf_map) - ctx->map_len);
2025 return 0;
2026}
2027
2028static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
2029 struct bpf_elf_sec_data *data)
2030{
2031 if (data->sec_data->d_size > sizeof(ctx->license))
2032 return -ENOMEM;
2033
2034 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
2035 ctx->sec_done[section] = true;
2036 return 0;
2037}
2038
2039static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
2040 struct bpf_elf_sec_data *data)
2041{
2042 ctx->sym_tab = data->sec_data;
2043 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
2044 ctx->sec_done[section] = true;
2045 return 0;
2046}
2047
2048static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
2049 struct bpf_elf_sec_data *data)
2050{
2051 ctx->str_tab = data->sec_data;
2052 ctx->sec_done[section] = true;
2053 return 0;
2054}
2055
2056static int bpf_fetch_text(struct bpf_elf_ctx *ctx, int section,
2057 struct bpf_elf_sec_data *data)
2058{
2059 ctx->sec_text = section;
2060 ctx->sec_done[section] = true;
2061 return 0;
2062}
2063
2064static void bpf_btf_report(int fd, struct bpf_elf_ctx *ctx)
2065{
2066 fprintf(stderr, "\nBTF debug data section \'.BTF\' %s%s (%d)!\n",
2067 fd < 0 ? "rejected: " : "loaded",
2068 fd < 0 ? strerror(errno) : "",
2069 fd < 0 ? errno : fd);
2070
2071 fprintf(stderr, " - Length: %zu\n", ctx->btf_data->d_size);
2072
2073 bpf_dump_error(ctx, "Verifier analysis:\n\n");
2074}
2075
2076static int bpf_btf_attach(struct bpf_elf_ctx *ctx)
2077{
2078 int tries = 0, fd;
2079retry:
2080 errno = 0;
2081 fd = bpf_btf_load(ctx->btf_data->d_buf, ctx->btf_data->d_size,
2082 ctx->log, ctx->log_size);
2083 if (fd < 0 || ctx->verbose) {
2084 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
2085 if (tries++ < 10 && !bpf_log_realloc(ctx))
2086 goto retry;
2087
2088 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
2089 ctx->log_size, tries);
2090 return fd;
2091 }
2092
2093 if (bpf_log_has_data(ctx))
2094 bpf_btf_report(fd, ctx);
2095 }
2096
2097 return fd;
2098}
2099
2100static int bpf_fetch_btf_begin(struct bpf_elf_ctx *ctx, int section,
2101 struct bpf_elf_sec_data *data)
2102{
2103 ctx->btf_data = data->sec_data;
2104 ctx->sec_btf = section;
2105 ctx->sec_done[section] = true;
2106 return 0;
2107}
2108
2109static int bpf_btf_check_header(struct bpf_elf_ctx *ctx)
2110{
2111 const struct btf_header *hdr = ctx->btf_data->d_buf;
2112 const char *str_start, *str_end;
2113 unsigned int data_len;
2114
2115 if (hdr->magic != BTF_MAGIC) {
2116 fprintf(stderr, "Object has wrong BTF magic: %x, expected: %x!\n",
2117 hdr->magic, BTF_MAGIC);
2118 return -EINVAL;
2119 }
2120
2121 if (hdr->version != BTF_VERSION) {
2122 fprintf(stderr, "Object has wrong BTF version: %u, expected: %u!\n",
2123 hdr->version, BTF_VERSION);
2124 return -EINVAL;
2125 }
2126
2127 if (hdr->flags) {
2128 fprintf(stderr, "Object has unsupported BTF flags %x!\n",
2129 hdr->flags);
2130 return -EINVAL;
2131 }
2132
2133 data_len = ctx->btf_data->d_size - sizeof(*hdr);
2134 if (data_len < hdr->type_off ||
2135 data_len < hdr->str_off ||
2136 data_len < hdr->type_len + hdr->str_len ||
2137 hdr->type_off >= hdr->str_off ||
2138 hdr->type_off + hdr->type_len != hdr->str_off ||
2139 hdr->str_off + hdr->str_len != data_len ||
2140 (hdr->type_off & (sizeof(uint32_t) - 1))) {
2141 fprintf(stderr, "Object has malformed BTF data!\n");
2142 return -EINVAL;
2143 }
2144
2145 ctx->btf.hdr = hdr;
2146 ctx->btf.raw = hdr + 1;
2147
2148 str_start = ctx->btf.raw + hdr->str_off;
2149 str_end = str_start + hdr->str_len;
2150 if (!hdr->str_len ||
2151 hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
2152 str_start[0] || str_end[-1]) {
2153 fprintf(stderr, "Object has malformed BTF string data!\n");
2154 return -EINVAL;
2155 }
2156
2157 ctx->btf.strings = str_start;
2158 return 0;
2159}
2160
2161static int bpf_btf_register_type(struct bpf_elf_ctx *ctx,
2162 const struct btf_type *type)
2163{
2164 int cur = ctx->btf.types_num, num = cur + 1;
2165 const struct btf_type **types;
2166
2167 types = realloc(ctx->btf.types, num * sizeof(type));
2168 if (!types) {
2169 free(ctx->btf.types);
2170 ctx->btf.types = NULL;
2171 ctx->btf.types_num = 0;
2172 return -ENOMEM;
2173 }
2174
2175 ctx->btf.types = types;
2176 ctx->btf.types[cur] = type;
2177 ctx->btf.types_num = num;
2178 return 0;
2179}
2180
2181static struct btf_type btf_type_void;
2182
2183static int bpf_btf_prep_type_data(struct bpf_elf_ctx *ctx)
2184{
2185 const void *type_cur = ctx->btf.raw + ctx->btf.hdr->type_off;
2186 const void *type_end = ctx->btf.raw + ctx->btf.hdr->str_off;
2187 const struct btf_type *type;
2188 uint16_t var_len;
2189 int ret, kind;
2190
2191 ret = bpf_btf_register_type(ctx, &btf_type_void);
2192 if (ret < 0)
2193 return ret;
2194
2195 while (type_cur < type_end) {
2196 type = type_cur;
2197 type_cur += sizeof(*type);
2198
2199 var_len = BTF_INFO_VLEN(type->info);
2200 kind = BTF_INFO_KIND(type->info);
2201
2202 switch (kind) {
2203 case BTF_KIND_INT:
2204 type_cur += sizeof(int);
2205 break;
2206 case BTF_KIND_ARRAY:
2207 type_cur += sizeof(struct btf_array);
2208 break;
2209 case BTF_KIND_STRUCT:
2210 case BTF_KIND_UNION:
2211 type_cur += var_len * sizeof(struct btf_member);
2212 break;
2213 case BTF_KIND_ENUM:
2214 type_cur += var_len * sizeof(struct btf_enum);
2215 break;
2216 case BTF_KIND_FUNC_PROTO:
2217 type_cur += var_len * sizeof(struct btf_param);
2218 break;
2219 case BTF_KIND_TYPEDEF:
2220 case BTF_KIND_PTR:
2221 case BTF_KIND_FWD:
2222 case BTF_KIND_VOLATILE:
2223 case BTF_KIND_CONST:
2224 case BTF_KIND_RESTRICT:
2225 case BTF_KIND_FUNC:
2226 break;
2227 default:
2228 fprintf(stderr, "Object has unknown BTF type: %u!\n", kind);
2229 return -EINVAL;
2230 }
2231
2232 ret = bpf_btf_register_type(ctx, type);
2233 if (ret < 0)
2234 return ret;
2235 }
2236
2237 return 0;
2238}
2239
2240static int bpf_btf_prep_data(struct bpf_elf_ctx *ctx)
2241{
2242 int ret = bpf_btf_check_header(ctx);
2243
2244 if (!ret)
2245 return bpf_btf_prep_type_data(ctx);
2246 return ret;
2247}
2248
2249static void bpf_fetch_btf_end(struct bpf_elf_ctx *ctx)
2250{
2251 int fd = bpf_btf_attach(ctx);
2252
2253 if (fd < 0)
2254 return;
2255 ctx->btf_fd = fd;
2256 if (bpf_btf_prep_data(ctx) < 0) {
2257 close(ctx->btf_fd);
2258 ctx->btf_fd = 0;
2259 }
2260}
2261
2262static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
2263{
2264 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
2265}
2266
2267static bool bpf_has_btf_data(const struct bpf_elf_ctx *ctx)
2268{
2269 return ctx->sec_btf;
2270}
2271
2272static bool bpf_has_call_data(const struct bpf_elf_ctx *ctx)
2273{
2274 return ctx->sec_text;
2275}
2276
2277static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx, bool check_text_sec)
2278{
2279 struct bpf_elf_sec_data data;
2280 int i, ret = -1;
2281
2282 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2283 ret = bpf_fill_section_data(ctx, i, &data);
2284 if (ret < 0)
2285 continue;
2286
2287 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2288 !strcmp(data.sec_name, ELF_SECTION_MAPS))
2289 ret = bpf_fetch_maps_begin(ctx, i, &data);
2290 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2291 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
2292 ret = bpf_fetch_license(ctx, i, &data);
2293 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2294 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2295 !strcmp(data.sec_name, ".text") &&
2296 check_text_sec)
2297 ret = bpf_fetch_text(ctx, i, &data);
2298 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
2299 !strcmp(data.sec_name, ".symtab"))
2300 ret = bpf_fetch_symtab(ctx, i, &data);
2301 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
2302 !strcmp(data.sec_name, ".strtab"))
2303 ret = bpf_fetch_strtab(ctx, i, &data);
2304 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2305 !strcmp(data.sec_name, ".BTF"))
2306 ret = bpf_fetch_btf_begin(ctx, i, &data);
2307 if (ret < 0) {
2308 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
2309 i);
2310 return ret;
2311 }
2312 }
2313
2314 if (bpf_has_btf_data(ctx))
2315 bpf_fetch_btf_end(ctx);
2316 if (bpf_has_map_data(ctx)) {
2317 ret = bpf_fetch_maps_end(ctx);
2318 if (ret < 0) {
2319 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
2320 return ret;
2321 }
2322
2323 ret = bpf_maps_attach_all(ctx);
2324 if (ret < 0) {
2325 fprintf(stderr, "Error loading maps into kernel!\n");
2326 return ret;
2327 }
2328 }
2329
2330 return ret;
2331}
2332
2333static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
2334 bool *sseen)
2335{
2336 struct bpf_elf_sec_data data;
2337 struct bpf_elf_prog prog;
2338 int ret, i, fd = -1;
2339
2340 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2341 if (ctx->sec_done[i])
2342 continue;
2343
2344 ret = bpf_fill_section_data(ctx, i, &data);
2345 if (ret < 0 ||
2346 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
2347 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2348 !strcmp(data.sec_name, section)))
2349 continue;
2350
2351 *sseen = true;
2352
2353 memset(&prog, 0, sizeof(prog));
2354 prog.type = ctx->type;
2355 prog.license = ctx->license;
2356 prog.size = data.sec_data->d_size;
2357 prog.insns_num = prog.size / sizeof(struct bpf_insn);
2358 prog.insns = data.sec_data->d_buf;
2359
2360 fd = bpf_prog_attach(section, &prog, ctx);
2361 if (fd < 0)
2362 return fd;
2363
2364 ctx->sec_done[i] = true;
2365 break;
2366 }
2367
2368 return fd;
2369}
2370
2371struct bpf_relo_props {
2372 struct bpf_tail_call {
2373 unsigned int total;
2374 unsigned int jited;
2375 } tc;
2376 int main_num;
2377};
2378
2379static int bpf_apply_relo_map(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2380 GElf_Rel *relo, GElf_Sym *sym,
2381 struct bpf_relo_props *props)
2382{
2383 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2384 unsigned int map_idx = sym->st_value / ctx->map_len;
2385
2386 if (insn_off >= prog->insns_num)
2387 return -EINVAL;
2388 if (prog->insns[insn_off].code != (BPF_LD | BPF_IMM | BPF_DW)) {
2389 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
2390 insn_off);
2391 return -EINVAL;
2392 }
2393
2394 if (map_idx >= ARRAY_SIZE(ctx->map_fds))
2395 return -EINVAL;
2396 if (!ctx->map_fds[map_idx])
2397 return -EINVAL;
2398 if (ctx->maps[map_idx].type == BPF_MAP_TYPE_PROG_ARRAY) {
2399 props->tc.total++;
2400 if (ctx->maps_ext[map_idx].owner.jited ||
2401 (ctx->maps_ext[map_idx].owner.type == 0 &&
2402 ctx->cfg.jit_enabled))
2403 props->tc.jited++;
2404 }
2405
2406 prog->insns[insn_off].src_reg = BPF_PSEUDO_MAP_FD;
2407 prog->insns[insn_off].imm = ctx->map_fds[map_idx];
2408 return 0;
2409}
2410
2411static int bpf_apply_relo_call(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2412 GElf_Rel *relo, GElf_Sym *sym,
2413 struct bpf_relo_props *props)
2414{
2415 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2416 struct bpf_elf_prog *prog_text = &ctx->prog_text;
2417
2418 if (insn_off >= prog->insns_num)
2419 return -EINVAL;
2420 if (prog->insns[insn_off].code != (BPF_JMP | BPF_CALL) &&
2421 prog->insns[insn_off].src_reg != BPF_PSEUDO_CALL) {
2422 fprintf(stderr, "ELF contains relo data for non call instruction at offset %u! Compiler bug?!\n",
2423 insn_off);
2424 return -EINVAL;
2425 }
2426
2427 if (!props->main_num) {
2428 struct bpf_insn *insns = realloc(prog->insns,
2429 prog->size + prog_text->size);
2430 if (!insns)
2431 return -ENOMEM;
2432
2433 memcpy(insns + prog->insns_num, prog_text->insns,
2434 prog_text->size);
2435 props->main_num = prog->insns_num;
2436 prog->insns = insns;
2437 prog->insns_num += prog_text->insns_num;
2438 prog->size += prog_text->size;
2439 }
2440
2441 prog->insns[insn_off].imm += props->main_num - insn_off;
2442 return 0;
2443}
2444
2445static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
2446 struct bpf_elf_sec_data *data_relo,
2447 struct bpf_elf_prog *prog,
2448 struct bpf_relo_props *props)
2449{
2450 GElf_Shdr *rhdr = &data_relo->sec_hdr;
2451 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
2452
2453 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
2454 GElf_Rel relo;
2455 GElf_Sym sym;
2456 int ret = -EIO;
2457
2458 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
2459 return -EIO;
2460 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
2461 return -EIO;
2462
2463 if (sym.st_shndx == ctx->sec_maps)
2464 ret = bpf_apply_relo_map(ctx, prog, &relo, &sym, props);
2465 else if (sym.st_shndx == ctx->sec_text)
2466 ret = bpf_apply_relo_call(ctx, prog, &relo, &sym, props);
2467 else
2468 fprintf(stderr, "ELF contains non-{map,call} related relo data in entry %u pointing to section %u! Compiler bug?!\n",
2469 relo_ent, sym.st_shndx);
2470 if (ret < 0)
2471 return ret;
2472 }
2473
2474 return 0;
2475}
2476
2477static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
2478 bool *lderr, bool *sseen, struct bpf_elf_prog *prog)
2479{
2480 struct bpf_elf_sec_data data_relo, data_insn;
2481 int ret, idx, i, fd = -1;
2482
2483 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2484 struct bpf_relo_props props = {};
2485
2486 ret = bpf_fill_section_data(ctx, i, &data_relo);
2487 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
2488 continue;
2489
2490 idx = data_relo.sec_hdr.sh_info;
2491
2492 ret = bpf_fill_section_data(ctx, idx, &data_insn);
2493 if (ret < 0 ||
2494 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
2495 (data_insn.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2496 !strcmp(data_insn.sec_name, section)))
2497 continue;
2498 if (sseen)
2499 *sseen = true;
2500
2501 memset(prog, 0, sizeof(*prog));
2502 prog->type = ctx->type;
2503 prog->license = ctx->license;
2504 prog->size = data_insn.sec_data->d_size;
2505 prog->insns_num = prog->size / sizeof(struct bpf_insn);
2506 prog->insns = malloc(prog->size);
2507 if (!prog->insns) {
2508 *lderr = true;
2509 return -ENOMEM;
2510 }
2511
2512 memcpy(prog->insns, data_insn.sec_data->d_buf, prog->size);
2513
2514 ret = bpf_apply_relo_data(ctx, &data_relo, prog, &props);
2515 if (ret < 0) {
2516 *lderr = true;
2517 if (ctx->sec_text != idx)
2518 free(prog->insns);
2519 return ret;
2520 }
2521 if (ctx->sec_text == idx) {
2522 fd = 0;
2523 goto out;
2524 }
2525
2526 fd = bpf_prog_attach(section, prog, ctx);
2527 free(prog->insns);
2528 if (fd < 0) {
2529 *lderr = true;
2530 if (props.tc.total) {
2531 if (ctx->cfg.jit_enabled &&
2532 props.tc.total != props.tc.jited)
2533 fprintf(stderr, "JIT enabled, but only %u/%u tail call maps in the program have JITed owner!\n",
2534 props.tc.jited, props.tc.total);
2535 if (!ctx->cfg.jit_enabled &&
2536 props.tc.jited)
2537 fprintf(stderr, "JIT disabled, but %u/%u tail call maps in the program have JITed owner!\n",
2538 props.tc.jited, props.tc.total);
2539 }
2540 return fd;
2541 }
2542out:
2543 ctx->sec_done[i] = true;
2544 ctx->sec_done[idx] = true;
2545 break;
2546 }
2547
2548 return fd;
2549}
2550
2551static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
2552{
2553 bool lderr = false, sseen = false;
2554 struct bpf_elf_prog prog;
2555 int ret = -1;
2556
2557 if (bpf_has_call_data(ctx)) {
2558 ret = bpf_fetch_prog_relo(ctx, ".text", &lderr, NULL,
2559 &ctx->prog_text);
2560 if (ret < 0)
2561 return ret;
2562 }
2563
2564 if (bpf_has_map_data(ctx) || bpf_has_call_data(ctx))
2565 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen, &prog);
2566 if (ret < 0 && !lderr)
2567 ret = bpf_fetch_prog(ctx, section, &sseen);
2568 if (ret < 0 && !sseen)
2569 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
2570 section);
2571 return ret;
2572}
2573
2574static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
2575{
2576 int i;
2577
2578 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
2579 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
2580 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
2581 return i;
2582 return -1;
2583}
2584
2585struct bpf_jited_aux {
2586 int prog_fd;
2587 int map_fd;
2588 struct bpf_prog_data prog;
2589 struct bpf_map_ext map;
2590};
2591
2592static int bpf_derive_prog_from_fdinfo(int fd, struct bpf_prog_data *prog)
2593{
2594 char file[PATH_MAX], buff[4096];
2595 unsigned int val;
2596 FILE *fp;
2597
2598 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
2599 memset(prog, 0, sizeof(*prog));
2600
2601 fp = fopen(file, "r");
2602 if (!fp) {
2603 fprintf(stderr, "No procfs support?!\n");
2604 return -EIO;
2605 }
2606
2607 while (fgets(buff, sizeof(buff), fp)) {
2608 if (sscanf(buff, "prog_type:\t%u", &val) == 1)
2609 prog->type = val;
2610 else if (sscanf(buff, "prog_jited:\t%u", &val) == 1)
2611 prog->jited = val;
2612 }
2613
2614 fclose(fp);
2615 return 0;
2616}
2617
2618static int bpf_tail_call_get_aux(struct bpf_jited_aux *aux)
2619{
2620 struct bpf_elf_map tmp;
2621 int ret;
2622
2623 ret = bpf_derive_elf_map_from_fdinfo(aux->map_fd, &tmp, &aux->map);
2624 if (!ret)
2625 ret = bpf_derive_prog_from_fdinfo(aux->prog_fd, &aux->prog);
2626
2627 return ret;
2628}
2629
2630static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
2631{
2632 struct bpf_elf_sec_data data;
2633 uint32_t map_id, key_id;
2634 int fd, i, ret, idx;
2635
2636 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2637 if (ctx->sec_done[i])
2638 continue;
2639
2640 ret = bpf_fill_section_data(ctx, i, &data);
2641 if (ret < 0)
2642 continue;
2643
2644 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
2645 if (ret != 2)
2646 continue;
2647
2648 idx = bpf_find_map_by_id(ctx, map_id);
2649 if (idx < 0)
2650 continue;
2651
2652 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
2653 if (fd < 0)
2654 return -EIO;
2655
2656 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
2657 &fd, BPF_ANY);
2658 if (ret < 0) {
2659 struct bpf_jited_aux aux = {};
2660
2661 ret = -errno;
2662 if (errno == E2BIG) {
2663 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
2664 key_id, map_id);
2665 return ret;
2666 }
2667
2668 aux.map_fd = ctx->map_fds[idx];
2669 aux.prog_fd = fd;
2670
2671 if (bpf_tail_call_get_aux(&aux))
2672 return ret;
2673 if (!aux.map.owner.type)
2674 return ret;
2675
2676 if (aux.prog.type != aux.map.owner.type)
2677 fprintf(stderr, "Tail call map owned by prog type %u, but prog type is %u!\n",
2678 aux.map.owner.type, aux.prog.type);
2679 if (aux.prog.jited != aux.map.owner.jited)
2680 fprintf(stderr, "Tail call map %s jited, but prog %s!\n",
2681 aux.map.owner.jited ? "is" : "not",
2682 aux.prog.jited ? "is" : "not");
2683 return ret;
2684 }
2685
2686 ctx->sec_done[i] = true;
2687 }
2688
2689 return 0;
2690}
2691
2692static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
2693{
2694 struct stat st;
2695 int ret;
2696
2697 memset(&ctx->stat, 0, sizeof(ctx->stat));
2698
2699 ret = fstat(ctx->obj_fd, &st);
2700 if (ret < 0) {
2701 fprintf(stderr, "Stat of elf file failed: %s\n",
2702 strerror(errno));
2703 return;
2704 }
2705
2706 ctx->stat.st_dev = st.st_dev;
2707 ctx->stat.st_ino = st.st_ino;
2708}
2709
2710static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
2711{
2712 char buff[PATH_MAX];
2713
2714 while (fgets(buff, sizeof(buff), fp)) {
2715 char *ptr = buff;
2716
2717 while (*ptr == ' ' || *ptr == '\t')
2718 ptr++;
2719
2720 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
2721 continue;
2722
2723 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
2724 sscanf(ptr, "%i %s #", id, path) != 2) {
2725 strcpy(path, ptr);
2726 return -1;
2727 }
2728
2729 return 1;
2730 }
2731
2732 return 0;
2733}
2734
2735static bool bpf_pinning_reserved(uint32_t pinning)
2736{
2737 switch (pinning) {
2738 case PIN_NONE:
2739 case PIN_OBJECT_NS:
2740 case PIN_GLOBAL_NS:
2741 return true;
2742 default:
2743 return false;
2744 }
2745}
2746
2747static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
2748{
2749 struct bpf_hash_entry *entry;
2750 char subpath[PATH_MAX] = {};
2751 uint32_t pinning;
2752 FILE *fp;
2753 int ret;
2754
2755 fp = fopen(db_file, "r");
2756 if (!fp)
2757 return;
2758
2759 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
2760 if (ret == -1) {
2761 fprintf(stderr, "Database %s is corrupted at: %s\n",
2762 db_file, subpath);
2763 fclose(fp);
2764 return;
2765 }
2766
2767 if (bpf_pinning_reserved(pinning)) {
2768 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
2769 db_file, pinning);
2770 continue;
2771 }
2772
2773 entry = malloc(sizeof(*entry));
2774 if (!entry) {
2775 fprintf(stderr, "No memory left for db entry!\n");
2776 continue;
2777 }
2778
2779 entry->pinning = pinning;
2780 entry->subpath = strdup(subpath);
2781 if (!entry->subpath) {
2782 fprintf(stderr, "No memory left for db entry!\n");
2783 free(entry);
2784 continue;
2785 }
2786
2787 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
2788 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
2789 }
2790
2791 fclose(fp);
2792}
2793
2794static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
2795{
2796 struct bpf_hash_entry *entry;
2797 int i;
2798
2799 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
2800 while ((entry = ctx->ht[i]) != NULL) {
2801 ctx->ht[i] = entry->next;
2802 free((char *)entry->subpath);
2803 free(entry);
2804 }
2805 }
2806}
2807
2808static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
2809{
2810 if (ctx->elf_hdr.e_type != ET_REL ||
2811 (ctx->elf_hdr.e_machine != EM_NONE &&
2812 ctx->elf_hdr.e_machine != EM_BPF) ||
2813 ctx->elf_hdr.e_version != EV_CURRENT) {
2814 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
2815 return -EINVAL;
2816 }
2817
2818 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
2819 default:
2820 fprintf(stderr, "ELF format error, wrong endianness info?\n");
2821 return -EINVAL;
2822 case ELFDATA2LSB:
2823 if (htons(1) == 1) {
2824 fprintf(stderr,
2825 "We are big endian, eBPF object is little endian!\n");
2826 return -EIO;
2827 }
2828 break;
2829 case ELFDATA2MSB:
2830 if (htons(1) != 1) {
2831 fprintf(stderr,
2832 "We are little endian, eBPF object is big endian!\n");
2833 return -EIO;
2834 }
2835 break;
2836 }
2837
2838 return 0;
2839}
2840
2841static void bpf_get_cfg(struct bpf_elf_ctx *ctx)
2842{
2843 static const char *path_jit = "/proc/sys/net/core/bpf_jit_enable";
2844 int fd;
2845
2846 fd = open(path_jit, O_RDONLY);
2847 if (fd > 0) {
2848 char tmp[16] = {};
2849
2850 if (read(fd, tmp, sizeof(tmp)) > 0)
2851 ctx->cfg.jit_enabled = atoi(tmp);
2852 close(fd);
2853 }
2854}
2855
2856static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
2857 enum bpf_prog_type type, __u32 ifindex,
2858 bool verbose)
2859{
2860 uint8_t tmp[20];
2861 int ret;
2862
2863 if (elf_version(EV_CURRENT) == EV_NONE)
2864 return -EINVAL;
2865
2866 bpf_init_env();
2867
2868 memset(ctx, 0, sizeof(*ctx));
2869 bpf_get_cfg(ctx);
2870
2871 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
2872 if (ret)
2873 ctx->noafalg = true;
2874 else
2875 hexstring_n2a(tmp, sizeof(tmp), ctx->obj_uid,
2876 sizeof(ctx->obj_uid));
2877
2878 ctx->verbose = verbose;
2879 ctx->type = type;
2880 ctx->ifindex = ifindex;
2881
2882 ctx->obj_fd = open(pathname, O_RDONLY);
2883 if (ctx->obj_fd < 0)
2884 return ctx->obj_fd;
2885
2886 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
2887 if (!ctx->elf_fd) {
2888 ret = -EINVAL;
2889 goto out_fd;
2890 }
2891
2892 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
2893 ret = -EINVAL;
2894 goto out_fd;
2895 }
2896
2897 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
2898 &ctx->elf_hdr) {
2899 ret = -EIO;
2900 goto out_elf;
2901 }
2902
2903 ret = bpf_elf_check_ehdr(ctx);
2904 if (ret < 0)
2905 goto out_elf;
2906
2907 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2908 sizeof(*(ctx->sec_done)));
2909 if (!ctx->sec_done) {
2910 ret = -ENOMEM;
2911 goto out_elf;
2912 }
2913
2914 if (ctx->verbose && bpf_log_realloc(ctx)) {
2915 ret = -ENOMEM;
2916 goto out_free;
2917 }
2918
2919 bpf_save_finfo(ctx);
2920 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2921
2922 return 0;
2923out_free:
2924 free(ctx->sec_done);
2925out_elf:
2926 elf_end(ctx->elf_fd);
2927out_fd:
2928 close(ctx->obj_fd);
2929 return ret;
2930}
2931
2932static int bpf_maps_count(struct bpf_elf_ctx *ctx)
2933{
2934 int i, count = 0;
2935
2936 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2937 if (!ctx->map_fds[i])
2938 break;
2939 count++;
2940 }
2941
2942 return count;
2943}
2944
2945static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
2946{
2947 int i;
2948
2949 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2950 if (ctx->map_fds[i])
2951 close(ctx->map_fds[i]);
2952 }
2953
2954 if (ctx->btf_fd)
2955 close(ctx->btf_fd);
2956 free(ctx->btf.types);
2957}
2958
2959static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
2960{
2961 if (failure)
2962 bpf_maps_teardown(ctx);
2963
2964 bpf_hash_destroy(ctx);
2965
2966 free(ctx->prog_text.insns);
2967 free(ctx->sec_done);
2968 free(ctx->log);
2969
2970 elf_end(ctx->elf_fd);
2971 close(ctx->obj_fd);
2972}
2973
2974static struct bpf_elf_ctx __ctx;
2975
2976static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
2977 const char *section, __u32 ifindex, bool verbose)
2978{
2979 struct bpf_elf_ctx *ctx = &__ctx;
2980 int fd = 0, ret;
2981
2982 ret = bpf_elf_ctx_init(ctx, pathname, type, ifindex, verbose);
2983 if (ret < 0) {
2984 fprintf(stderr, "Cannot initialize ELF context!\n");
2985 return ret;
2986 }
2987
2988 ret = bpf_fetch_ancillary(ctx, strcmp(section, ".text"));
2989 if (ret < 0) {
2990 fprintf(stderr, "Error fetching ELF ancillary data!\n");
2991 goto out;
2992 }
2993
2994 fd = bpf_fetch_prog_sec(ctx, section);
2995 if (fd < 0) {
2996 fprintf(stderr, "Error fetching program/map!\n");
2997 ret = fd;
2998 goto out;
2999 }
3000
3001 ret = bpf_fill_prog_arrays(ctx);
3002 if (ret < 0)
3003 fprintf(stderr, "Error filling program arrays!\n");
3004out:
3005 bpf_elf_ctx_destroy(ctx, ret < 0);
3006 if (ret < 0) {
3007 if (fd)
3008 close(fd);
3009 return ret;
3010 }
3011
3012 return fd;
3013}
3014
3015static int
3016bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
3017 const struct bpf_map_data *aux, unsigned int entries)
3018{
3019 struct bpf_map_set_msg msg = {
3020 .aux.uds_ver = BPF_SCM_AUX_VER,
3021 .aux.num_ent = entries,
3022 };
3023 int *cmsg_buf, min_fd;
3024 char *amsg_buf;
3025 int i;
3026
3027 strlcpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
3028 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
3029
3030 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
3031 amsg_buf = (char *)msg.aux.ent;
3032
3033 for (i = 0; i < entries; i += min_fd) {
3034 int ret;
3035
3036 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
3037 bpf_map_set_init_single(&msg, min_fd);
3038
3039 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
3040 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
3041
3042 ret = sendmsg(fd, &msg.hdr, 0);
3043 if (ret <= 0)
3044 return ret ? : -1;
3045 }
3046
3047 return 0;
3048}
3049
3050static int
3051bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
3052 unsigned int entries)
3053{
3054 struct bpf_map_set_msg msg;
3055 int *cmsg_buf, min_fd;
3056 char *amsg_buf, *mmsg_buf;
3057 unsigned int needed = 1;
3058 int i;
3059
3060 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
3061 amsg_buf = (char *)msg.aux.ent;
3062 mmsg_buf = (char *)&msg.aux;
3063
3064 for (i = 0; i < min(entries, needed); i += min_fd) {
3065 struct cmsghdr *cmsg;
3066 int ret;
3067
3068 min_fd = min(entries, entries - i);
3069 bpf_map_set_init_single(&msg, min_fd);
3070
3071 ret = recvmsg(fd, &msg.hdr, 0);
3072 if (ret <= 0)
3073 return ret ? : -1;
3074
3075 cmsg = CMSG_FIRSTHDR(&msg.hdr);
3076 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
3077 return -EINVAL;
3078 if (msg.hdr.msg_flags & MSG_CTRUNC)
3079 return -EIO;
3080 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
3081 return -ENOSYS;
3082
3083 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
3084 if (min_fd > entries || min_fd <= 0)
3085 return -EINVAL;
3086
3087 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
3088 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
3089 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
3090
3091 needed = aux->num_ent;
3092 }
3093
3094 return 0;
3095}
3096
3097int bpf_send_map_fds(const char *path, const char *obj)
3098{
3099 struct bpf_elf_ctx *ctx = &__ctx;
3100 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3101 struct bpf_map_data bpf_aux = {
3102 .fds = ctx->map_fds,
3103 .ent = ctx->maps,
3104 .st = &ctx->stat,
3105 .obj = obj,
3106 };
3107 int fd, ret;
3108
3109 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3110 if (fd < 0) {
3111 fprintf(stderr, "Cannot open socket: %s\n",
3112 strerror(errno));
3113 return -1;
3114 }
3115
3116 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3117
3118 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
3119 if (ret < 0) {
3120 fprintf(stderr, "Cannot connect to %s: %s\n",
3121 path, strerror(errno));
3122 return -1;
3123 }
3124
3125 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
3126 bpf_maps_count(ctx));
3127 if (ret < 0)
3128 fprintf(stderr, "Cannot send fds to %s: %s\n",
3129 path, strerror(errno));
3130
3131 bpf_maps_teardown(ctx);
3132 close(fd);
3133 return ret;
3134}
3135
3136int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
3137 unsigned int entries)
3138{
3139 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3140 int fd, ret;
3141
3142 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3143 if (fd < 0) {
3144 fprintf(stderr, "Cannot open socket: %s\n",
3145 strerror(errno));
3146 return -1;
3147 }
3148
3149 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3150
3151 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
3152 if (ret < 0) {
3153 fprintf(stderr, "Cannot bind to socket: %s\n",
3154 strerror(errno));
3155 return -1;
3156 }
3157
3158 ret = bpf_map_set_recv(fd, fds, aux, entries);
3159 if (ret < 0)
3160 fprintf(stderr, "Cannot recv fds from %s: %s\n",
3161 path, strerror(errno));
3162
3163 unlink(addr.sun_path);
3164 close(fd);
3165 return ret;
3166}
3167#endif
3168