linux/tools/lib/bpf/libbpf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
   2
   3/*
   4 * Common eBPF ELF object loading operations.
   5 *
   6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
   7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
   8 * Copyright (C) 2015 Huawei Inc.
   9 * Copyright (C) 2017 Nicira, Inc.
  10 * Copyright (C) 2019 Isovalent, Inc.
  11 */
  12
  13#ifndef _GNU_SOURCE
  14#define _GNU_SOURCE
  15#endif
  16#include <stdlib.h>
  17#include <stdio.h>
  18#include <stdarg.h>
  19#include <libgen.h>
  20#include <inttypes.h>
  21#include <limits.h>
  22#include <string.h>
  23#include <unistd.h>
  24#include <endian.h>
  25#include <fcntl.h>
  26#include <errno.h>
  27#include <ctype.h>
  28#include <asm/unistd.h>
  29#include <linux/err.h>
  30#include <linux/kernel.h>
  31#include <linux/bpf.h>
  32#include <linux/btf.h>
  33#include <linux/filter.h>
  34#include <linux/list.h>
  35#include <linux/limits.h>
  36#include <linux/perf_event.h>
  37#include <linux/ring_buffer.h>
  38#include <linux/version.h>
  39#include <sys/epoll.h>
  40#include <sys/ioctl.h>
  41#include <sys/mman.h>
  42#include <sys/stat.h>
  43#include <sys/types.h>
  44#include <sys/vfs.h>
  45#include <sys/utsname.h>
  46#include <sys/resource.h>
  47#include <libelf.h>
  48#include <gelf.h>
  49#include <zlib.h>
  50
  51#include "libbpf.h"
  52#include "bpf.h"
  53#include "btf.h"
  54#include "str_error.h"
  55#include "libbpf_internal.h"
  56#include "hashmap.h"
  57#include "bpf_gen_internal.h"
  58
  59#ifndef BPF_FS_MAGIC
  60#define BPF_FS_MAGIC            0xcafe4a11
  61#endif
  62
  63#define BPF_INSN_SZ (sizeof(struct bpf_insn))
  64
  65/* vsprintf() in __base_pr() uses nonliteral format string. It may break
  66 * compilation if user enables corresponding warning. Disable it explicitly.
  67 */
  68#pragma GCC diagnostic ignored "-Wformat-nonliteral"
  69
  70#define __printf(a, b)  __attribute__((format(printf, a, b)))
  71
  72static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
  73static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
  74
  75static int __base_pr(enum libbpf_print_level level, const char *format,
  76                     va_list args)
  77{
  78        if (level == LIBBPF_DEBUG)
  79                return 0;
  80
  81        return vfprintf(stderr, format, args);
  82}
  83
  84static libbpf_print_fn_t __libbpf_pr = __base_pr;
  85
  86libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
  87{
  88        libbpf_print_fn_t old_print_fn = __libbpf_pr;
  89
  90        __libbpf_pr = fn;
  91        return old_print_fn;
  92}
  93
  94__printf(2, 3)
  95void libbpf_print(enum libbpf_print_level level, const char *format, ...)
  96{
  97        va_list args;
  98
  99        if (!__libbpf_pr)
 100                return;
 101
 102        va_start(args, format);
 103        __libbpf_pr(level, format, args);
 104        va_end(args);
 105}
 106
 107static void pr_perm_msg(int err)
 108{
 109        struct rlimit limit;
 110        char buf[100];
 111
 112        if (err != -EPERM || geteuid() != 0)
 113                return;
 114
 115        err = getrlimit(RLIMIT_MEMLOCK, &limit);
 116        if (err)
 117                return;
 118
 119        if (limit.rlim_cur == RLIM_INFINITY)
 120                return;
 121
 122        if (limit.rlim_cur < 1024)
 123                snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
 124        else if (limit.rlim_cur < 1024*1024)
 125                snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
 126        else
 127                snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
 128
 129        pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
 130                buf);
 131}
 132
 133#define STRERR_BUFSIZE  128
 134
 135/* Copied from tools/perf/util/util.h */
 136#ifndef zfree
 137# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
 138#endif
 139
 140#ifndef zclose
 141# define zclose(fd) ({                  \
 142        int ___err = 0;                 \
 143        if ((fd) >= 0)                  \
 144                ___err = close((fd));   \
 145        fd = -1;                        \
 146        ___err; })
 147#endif
 148
 149static inline __u64 ptr_to_u64(const void *ptr)
 150{
 151        return (__u64) (unsigned long) ptr;
 152}
 153
 154/* this goes away in libbpf 1.0 */
 155enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE;
 156
 157int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
 158{
 159        libbpf_mode = mode;
 160        return 0;
 161}
 162
 163__u32 libbpf_major_version(void)
 164{
 165        return LIBBPF_MAJOR_VERSION;
 166}
 167
 168__u32 libbpf_minor_version(void)
 169{
 170        return LIBBPF_MINOR_VERSION;
 171}
 172
 173const char *libbpf_version_string(void)
 174{
 175#define __S(X) #X
 176#define _S(X) __S(X)
 177        return  "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
 178#undef _S
 179#undef __S
 180}
 181
 182enum reloc_type {
 183        RELO_LD64,
 184        RELO_CALL,
 185        RELO_DATA,
 186        RELO_EXTERN_VAR,
 187        RELO_EXTERN_FUNC,
 188        RELO_SUBPROG_ADDR,
 189        RELO_CORE,
 190};
 191
 192struct reloc_desc {
 193        enum reloc_type type;
 194        int insn_idx;
 195        union {
 196                const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
 197                struct {
 198                        int map_idx;
 199                        int sym_off;
 200                };
 201        };
 202};
 203
 204/* stored as sec_def->cookie for all libbpf-supported SEC()s */
 205enum sec_def_flags {
 206        SEC_NONE = 0,
 207        /* expected_attach_type is optional, if kernel doesn't support that */
 208        SEC_EXP_ATTACH_OPT = 1,
 209        /* legacy, only used by libbpf_get_type_names() and
 210         * libbpf_attach_type_by_name(), not used by libbpf itself at all.
 211         * This used to be associated with cgroup (and few other) BPF programs
 212         * that were attachable through BPF_PROG_ATTACH command. Pretty
 213         * meaningless nowadays, though.
 214         */
 215        SEC_ATTACHABLE = 2,
 216        SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
 217        /* attachment target is specified through BTF ID in either kernel or
 218         * other BPF program's BTF object */
 219        SEC_ATTACH_BTF = 4,
 220        /* BPF program type allows sleeping/blocking in kernel */
 221        SEC_SLEEPABLE = 8,
 222        /* allow non-strict prefix matching */
 223        SEC_SLOPPY_PFX = 16,
 224        /* BPF program support non-linear XDP buffer */
 225        SEC_XDP_FRAGS = 32,
 226        /* deprecated sec definitions not supposed to be used */
 227        SEC_DEPRECATED = 64,
 228};
 229
 230struct bpf_sec_def {
 231        char *sec;
 232        enum bpf_prog_type prog_type;
 233        enum bpf_attach_type expected_attach_type;
 234        long cookie;
 235        int handler_id;
 236
 237        libbpf_prog_setup_fn_t prog_setup_fn;
 238        libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
 239        libbpf_prog_attach_fn_t prog_attach_fn;
 240};
 241
 242/*
 243 * bpf_prog should be a better name but it has been used in
 244 * linux/filter.h.
 245 */
 246struct bpf_program {
 247        const struct bpf_sec_def *sec_def;
 248        char *sec_name;
 249        size_t sec_idx;
 250        /* this program's instruction offset (in number of instructions)
 251         * within its containing ELF section
 252         */
 253        size_t sec_insn_off;
 254        /* number of original instructions in ELF section belonging to this
 255         * program, not taking into account subprogram instructions possible
 256         * appended later during relocation
 257         */
 258        size_t sec_insn_cnt;
 259        /* Offset (in number of instructions) of the start of instruction
 260         * belonging to this BPF program  within its containing main BPF
 261         * program. For the entry-point (main) BPF program, this is always
 262         * zero. For a sub-program, this gets reset before each of main BPF
 263         * programs are processed and relocated and is used to determined
 264         * whether sub-program was already appended to the main program, and
 265         * if yes, at which instruction offset.
 266         */
 267        size_t sub_insn_off;
 268
 269        char *name;
 270        /* name with / replaced by _; makes recursive pinning
 271         * in bpf_object__pin_programs easier
 272         */
 273        char *pin_name;
 274
 275        /* instructions that belong to BPF program; insns[0] is located at
 276         * sec_insn_off instruction within its ELF section in ELF file, so
 277         * when mapping ELF file instruction index to the local instruction,
 278         * one needs to subtract sec_insn_off; and vice versa.
 279         */
 280        struct bpf_insn *insns;
 281        /* actual number of instruction in this BPF program's image; for
 282         * entry-point BPF programs this includes the size of main program
 283         * itself plus all the used sub-programs, appended at the end
 284         */
 285        size_t insns_cnt;
 286
 287        struct reloc_desc *reloc_desc;
 288        int nr_reloc;
 289
 290        /* BPF verifier log settings */
 291        char *log_buf;
 292        size_t log_size;
 293        __u32 log_level;
 294
 295        struct {
 296                int nr;
 297                int *fds;
 298        } instances;
 299        bpf_program_prep_t preprocessor;
 300
 301        struct bpf_object *obj;
 302        void *priv;
 303        bpf_program_clear_priv_t clear_priv;
 304
 305        bool autoload;
 306        bool mark_btf_static;
 307        enum bpf_prog_type type;
 308        enum bpf_attach_type expected_attach_type;
 309        int prog_ifindex;
 310        __u32 attach_btf_obj_fd;
 311        __u32 attach_btf_id;
 312        __u32 attach_prog_fd;
 313        void *func_info;
 314        __u32 func_info_rec_size;
 315        __u32 func_info_cnt;
 316
 317        void *line_info;
 318        __u32 line_info_rec_size;
 319        __u32 line_info_cnt;
 320        __u32 prog_flags;
 321};
 322
 323struct bpf_struct_ops {
 324        const char *tname;
 325        const struct btf_type *type;
 326        struct bpf_program **progs;
 327        __u32 *kern_func_off;
 328        /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
 329        void *data;
 330        /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
 331         *      btf_vmlinux's format.
 332         * struct bpf_struct_ops_tcp_congestion_ops {
 333         *      [... some other kernel fields ...]
 334         *      struct tcp_congestion_ops data;
 335         * }
 336         * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
 337         * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
 338         * from "data".
 339         */
 340        void *kern_vdata;
 341        __u32 type_id;
 342};
 343
 344#define DATA_SEC ".data"
 345#define BSS_SEC ".bss"
 346#define RODATA_SEC ".rodata"
 347#define KCONFIG_SEC ".kconfig"
 348#define KSYMS_SEC ".ksyms"
 349#define STRUCT_OPS_SEC ".struct_ops"
 350
 351enum libbpf_map_type {
 352        LIBBPF_MAP_UNSPEC,
 353        LIBBPF_MAP_DATA,
 354        LIBBPF_MAP_BSS,
 355        LIBBPF_MAP_RODATA,
 356        LIBBPF_MAP_KCONFIG,
 357};
 358
 359struct bpf_map {
 360        struct bpf_object *obj;
 361        char *name;
 362        /* real_name is defined for special internal maps (.rodata*,
 363         * .data*, .bss, .kconfig) and preserves their original ELF section
 364         * name. This is important to be be able to find corresponding BTF
 365         * DATASEC information.
 366         */
 367        char *real_name;
 368        int fd;
 369        int sec_idx;
 370        size_t sec_offset;
 371        int map_ifindex;
 372        int inner_map_fd;
 373        struct bpf_map_def def;
 374        __u32 numa_node;
 375        __u32 btf_var_idx;
 376        __u32 btf_key_type_id;
 377        __u32 btf_value_type_id;
 378        __u32 btf_vmlinux_value_type_id;
 379        void *priv;
 380        bpf_map_clear_priv_t clear_priv;
 381        enum libbpf_map_type libbpf_type;
 382        void *mmaped;
 383        struct bpf_struct_ops *st_ops;
 384        struct bpf_map *inner_map;
 385        void **init_slots;
 386        int init_slots_sz;
 387        char *pin_path;
 388        bool pinned;
 389        bool reused;
 390        bool autocreate;
 391        __u64 map_extra;
 392};
 393
 394enum extern_type {
 395        EXT_UNKNOWN,
 396        EXT_KCFG,
 397        EXT_KSYM,
 398};
 399
 400enum kcfg_type {
 401        KCFG_UNKNOWN,
 402        KCFG_CHAR,
 403        KCFG_BOOL,
 404        KCFG_INT,
 405        KCFG_TRISTATE,
 406        KCFG_CHAR_ARR,
 407};
 408
 409struct extern_desc {
 410        enum extern_type type;
 411        int sym_idx;
 412        int btf_id;
 413        int sec_btf_id;
 414        const char *name;
 415        bool is_set;
 416        bool is_weak;
 417        union {
 418                struct {
 419                        enum kcfg_type type;
 420                        int sz;
 421                        int align;
 422                        int data_off;
 423                        bool is_signed;
 424                } kcfg;
 425                struct {
 426                        unsigned long long addr;
 427
 428                        /* target btf_id of the corresponding kernel var. */
 429                        int kernel_btf_obj_fd;
 430                        int kernel_btf_id;
 431
 432                        /* local btf_id of the ksym extern's type. */
 433                        __u32 type_id;
 434                        /* BTF fd index to be patched in for insn->off, this is
 435                         * 0 for vmlinux BTF, index in obj->fd_array for module
 436                         * BTF
 437                         */
 438                        __s16 btf_fd_idx;
 439                } ksym;
 440        };
 441};
 442
 443static LIST_HEAD(bpf_objects_list);
 444
 445struct module_btf {
 446        struct btf *btf;
 447        char *name;
 448        __u32 id;
 449        int fd;
 450        int fd_array_idx;
 451};
 452
 453enum sec_type {
 454        SEC_UNUSED = 0,
 455        SEC_RELO,
 456        SEC_BSS,
 457        SEC_DATA,
 458        SEC_RODATA,
 459};
 460
 461struct elf_sec_desc {
 462        enum sec_type sec_type;
 463        Elf64_Shdr *shdr;
 464        Elf_Data *data;
 465};
 466
 467struct elf_state {
 468        int fd;
 469        const void *obj_buf;
 470        size_t obj_buf_sz;
 471        Elf *elf;
 472        Elf64_Ehdr *ehdr;
 473        Elf_Data *symbols;
 474        Elf_Data *st_ops_data;
 475        size_t shstrndx; /* section index for section name strings */
 476        size_t strtabidx;
 477        struct elf_sec_desc *secs;
 478        int sec_cnt;
 479        int maps_shndx;
 480        int btf_maps_shndx;
 481        __u32 btf_maps_sec_btf_id;
 482        int text_shndx;
 483        int symbols_shndx;
 484        int st_ops_shndx;
 485};
 486
 487struct usdt_manager;
 488
 489struct bpf_object {
 490        char name[BPF_OBJ_NAME_LEN];
 491        char license[64];
 492        __u32 kern_version;
 493
 494        struct bpf_program *programs;
 495        size_t nr_programs;
 496        struct bpf_map *maps;
 497        size_t nr_maps;
 498        size_t maps_cap;
 499
 500        char *kconfig;
 501        struct extern_desc *externs;
 502        int nr_extern;
 503        int kconfig_map_idx;
 504
 505        bool loaded;
 506        bool has_subcalls;
 507        bool has_rodata;
 508
 509        struct bpf_gen *gen_loader;
 510
 511        /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
 512        struct elf_state efile;
 513        /*
 514         * All loaded bpf_object are linked in a list, which is
 515         * hidden to caller. bpf_objects__<func> handlers deal with
 516         * all objects.
 517         */
 518        struct list_head list;
 519
 520        struct btf *btf;
 521        struct btf_ext *btf_ext;
 522
 523        /* Parse and load BTF vmlinux if any of the programs in the object need
 524         * it at load time.
 525         */
 526        struct btf *btf_vmlinux;
 527        /* Path to the custom BTF to be used for BPF CO-RE relocations as an
 528         * override for vmlinux BTF.
 529         */
 530        char *btf_custom_path;
 531        /* vmlinux BTF override for CO-RE relocations */
 532        struct btf *btf_vmlinux_override;
 533        /* Lazily initialized kernel module BTFs */
 534        struct module_btf *btf_modules;
 535        bool btf_modules_loaded;
 536        size_t btf_module_cnt;
 537        size_t btf_module_cap;
 538
 539        /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
 540        char *log_buf;
 541        size_t log_size;
 542        __u32 log_level;
 543
 544        void *priv;
 545        bpf_object_clear_priv_t clear_priv;
 546
 547        int *fd_array;
 548        size_t fd_array_cap;
 549        size_t fd_array_cnt;
 550
 551        struct usdt_manager *usdt_man;
 552
 553        char path[];
 554};
 555
 556static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
 557static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
 558static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
 559static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
 560static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
 561static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
 562static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
 563static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
 564static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
 565
 566void bpf_program__unload(struct bpf_program *prog)
 567{
 568        int i;
 569
 570        if (!prog)
 571                return;
 572
 573        /*
 574         * If the object is opened but the program was never loaded,
 575         * it is possible that prog->instances.nr == -1.
 576         */
 577        if (prog->instances.nr > 0) {
 578                for (i = 0; i < prog->instances.nr; i++)
 579                        zclose(prog->instances.fds[i]);
 580        } else if (prog->instances.nr != -1) {
 581                pr_warn("Internal error: instances.nr is %d\n",
 582                        prog->instances.nr);
 583        }
 584
 585        prog->instances.nr = -1;
 586        zfree(&prog->instances.fds);
 587
 588        zfree(&prog->func_info);
 589        zfree(&prog->line_info);
 590}
 591
 592static void bpf_program__exit(struct bpf_program *prog)
 593{
 594        if (!prog)
 595                return;
 596
 597        if (prog->clear_priv)
 598                prog->clear_priv(prog, prog->priv);
 599
 600        prog->priv = NULL;
 601        prog->clear_priv = NULL;
 602
 603        bpf_program__unload(prog);
 604        zfree(&prog->name);
 605        zfree(&prog->sec_name);
 606        zfree(&prog->pin_name);
 607        zfree(&prog->insns);
 608        zfree(&prog->reloc_desc);
 609
 610        prog->nr_reloc = 0;
 611        prog->insns_cnt = 0;
 612        prog->sec_idx = -1;
 613}
 614
 615static char *__bpf_program__pin_name(struct bpf_program *prog)
 616{
 617        char *name, *p;
 618
 619        if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
 620                name = strdup(prog->name);
 621        else
 622                name = strdup(prog->sec_name);
 623
 624        if (!name)
 625                return NULL;
 626
 627        p = name;
 628
 629        while ((p = strchr(p, '/')))
 630                *p = '_';
 631
 632        return name;
 633}
 634
 635static bool insn_is_subprog_call(const struct bpf_insn *insn)
 636{
 637        return BPF_CLASS(insn->code) == BPF_JMP &&
 638               BPF_OP(insn->code) == BPF_CALL &&
 639               BPF_SRC(insn->code) == BPF_K &&
 640               insn->src_reg == BPF_PSEUDO_CALL &&
 641               insn->dst_reg == 0 &&
 642               insn->off == 0;
 643}
 644
 645static bool is_call_insn(const struct bpf_insn *insn)
 646{
 647        return insn->code == (BPF_JMP | BPF_CALL);
 648}
 649
 650static bool insn_is_pseudo_func(struct bpf_insn *insn)
 651{
 652        return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
 653}
 654
 655static int
 656bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
 657                      const char *name, size_t sec_idx, const char *sec_name,
 658                      size_t sec_off, void *insn_data, size_t insn_data_sz)
 659{
 660        if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
 661                pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
 662                        sec_name, name, sec_off, insn_data_sz);
 663                return -EINVAL;
 664        }
 665
 666        memset(prog, 0, sizeof(*prog));
 667        prog->obj = obj;
 668
 669        prog->sec_idx = sec_idx;
 670        prog->sec_insn_off = sec_off / BPF_INSN_SZ;
 671        prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
 672        /* insns_cnt can later be increased by appending used subprograms */
 673        prog->insns_cnt = prog->sec_insn_cnt;
 674
 675        prog->type = BPF_PROG_TYPE_UNSPEC;
 676
 677        /* libbpf's convention for SEC("?abc...") is that it's just like
 678         * SEC("abc...") but the corresponding bpf_program starts out with
 679         * autoload set to false.
 680         */
 681        if (sec_name[0] == '?') {
 682                prog->autoload = false;
 683                /* from now on forget there was ? in section name */
 684                sec_name++;
 685        } else {
 686                prog->autoload = true;
 687        }
 688
 689        prog->instances.fds = NULL;
 690        prog->instances.nr = -1;
 691
 692        /* inherit object's log_level */
 693        prog->log_level = obj->log_level;
 694
 695        prog->sec_name = strdup(sec_name);
 696        if (!prog->sec_name)
 697                goto errout;
 698
 699        prog->name = strdup(name);
 700        if (!prog->name)
 701                goto errout;
 702
 703        prog->pin_name = __bpf_program__pin_name(prog);
 704        if (!prog->pin_name)
 705                goto errout;
 706
 707        prog->insns = malloc(insn_data_sz);
 708        if (!prog->insns)
 709                goto errout;
 710        memcpy(prog->insns, insn_data, insn_data_sz);
 711
 712        return 0;
 713errout:
 714        pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
 715        bpf_program__exit(prog);
 716        return -ENOMEM;
 717}
 718
 719static int
 720bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
 721                         const char *sec_name, int sec_idx)
 722{
 723        Elf_Data *symbols = obj->efile.symbols;
 724        struct bpf_program *prog, *progs;
 725        void *data = sec_data->d_buf;
 726        size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
 727        int nr_progs, err, i;
 728        const char *name;
 729        Elf64_Sym *sym;
 730
 731        progs = obj->programs;
 732        nr_progs = obj->nr_programs;
 733        nr_syms = symbols->d_size / sizeof(Elf64_Sym);
 734        sec_off = 0;
 735
 736        for (i = 0; i < nr_syms; i++) {
 737                sym = elf_sym_by_idx(obj, i);
 738
 739                if (sym->st_shndx != sec_idx)
 740                        continue;
 741                if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
 742                        continue;
 743
 744                prog_sz = sym->st_size;
 745                sec_off = sym->st_value;
 746
 747                name = elf_sym_str(obj, sym->st_name);
 748                if (!name) {
 749                        pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
 750                                sec_name, sec_off);
 751                        return -LIBBPF_ERRNO__FORMAT;
 752                }
 753
 754                if (sec_off + prog_sz > sec_sz) {
 755                        pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
 756                                sec_name, sec_off);
 757                        return -LIBBPF_ERRNO__FORMAT;
 758                }
 759
 760                if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
 761                        pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
 762                        return -ENOTSUP;
 763                }
 764
 765                pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
 766                         sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
 767
 768                progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
 769                if (!progs) {
 770                        /*
 771                         * In this case the original obj->programs
 772                         * is still valid, so don't need special treat for
 773                         * bpf_close_object().
 774                         */
 775                        pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
 776                                sec_name, name);
 777                        return -ENOMEM;
 778                }
 779                obj->programs = progs;
 780
 781                prog = &progs[nr_progs];
 782
 783                err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
 784                                            sec_off, data + sec_off, prog_sz);
 785                if (err)
 786                        return err;
 787
 788                /* if function is a global/weak symbol, but has restricted
 789                 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
 790                 * as static to enable more permissive BPF verification mode
 791                 * with more outside context available to BPF verifier
 792                 */
 793                if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
 794                    && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
 795                        || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
 796                        prog->mark_btf_static = true;
 797
 798                nr_progs++;
 799                obj->nr_programs = nr_progs;
 800        }
 801
 802        return 0;
 803}
 804
 805__u32 get_kernel_version(void)
 806{
 807        /* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release,
 808         * but Ubuntu provides /proc/version_signature file, as described at
 809         * https://ubuntu.com/kernel, with an example contents below, which we
 810         * can use to get a proper LINUX_VERSION_CODE.
 811         *
 812         *   Ubuntu 5.4.0-12.15-generic 5.4.8
 813         *
 814         * In the above, 5.4.8 is what kernel is actually expecting, while
 815         * uname() call will return 5.4.0 in info.release.
 816         */
 817        const char *ubuntu_kver_file = "/proc/version_signature";
 818        __u32 major, minor, patch;
 819        struct utsname info;
 820
 821        if (access(ubuntu_kver_file, R_OK) == 0) {
 822                FILE *f;
 823
 824                f = fopen(ubuntu_kver_file, "r");
 825                if (f) {
 826                        if (fscanf(f, "%*s %*s %d.%d.%d\n", &major, &minor, &patch) == 3) {
 827                                fclose(f);
 828                                return KERNEL_VERSION(major, minor, patch);
 829                        }
 830                        fclose(f);
 831                }
 832                /* something went wrong, fall back to uname() approach */
 833        }
 834
 835        uname(&info);
 836        if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
 837                return 0;
 838        return KERNEL_VERSION(major, minor, patch);
 839}
 840
 841static const struct btf_member *
 842find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
 843{
 844        struct btf_member *m;
 845        int i;
 846
 847        for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
 848                if (btf_member_bit_offset(t, i) == bit_offset)
 849                        return m;
 850        }
 851
 852        return NULL;
 853}
 854
 855static const struct btf_member *
 856find_member_by_name(const struct btf *btf, const struct btf_type *t,
 857                    const char *name)
 858{
 859        struct btf_member *m;
 860        int i;
 861
 862        for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
 863                if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
 864                        return m;
 865        }
 866
 867        return NULL;
 868}
 869
 870#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
 871static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
 872                                   const char *name, __u32 kind);
 873
 874static int
 875find_struct_ops_kern_types(const struct btf *btf, const char *tname,
 876                           const struct btf_type **type, __u32 *type_id,
 877                           const struct btf_type **vtype, __u32 *vtype_id,
 878                           const struct btf_member **data_member)
 879{
 880        const struct btf_type *kern_type, *kern_vtype;
 881        const struct btf_member *kern_data_member;
 882        __s32 kern_vtype_id, kern_type_id;
 883        __u32 i;
 884
 885        kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
 886        if (kern_type_id < 0) {
 887                pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
 888                        tname);
 889                return kern_type_id;
 890        }
 891        kern_type = btf__type_by_id(btf, kern_type_id);
 892
 893        /* Find the corresponding "map_value" type that will be used
 894         * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
 895         * find "struct bpf_struct_ops_tcp_congestion_ops" from the
 896         * btf_vmlinux.
 897         */
 898        kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
 899                                                tname, BTF_KIND_STRUCT);
 900        if (kern_vtype_id < 0) {
 901                pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
 902                        STRUCT_OPS_VALUE_PREFIX, tname);
 903                return kern_vtype_id;
 904        }
 905        kern_vtype = btf__type_by_id(btf, kern_vtype_id);
 906
 907        /* Find "struct tcp_congestion_ops" from
 908         * struct bpf_struct_ops_tcp_congestion_ops {
 909         *      [ ... ]
 910         *      struct tcp_congestion_ops data;
 911         * }
 912         */
 913        kern_data_member = btf_members(kern_vtype);
 914        for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
 915                if (kern_data_member->type == kern_type_id)
 916                        break;
 917        }
 918        if (i == btf_vlen(kern_vtype)) {
 919                pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
 920                        tname, STRUCT_OPS_VALUE_PREFIX, tname);
 921                return -EINVAL;
 922        }
 923
 924        *type = kern_type;
 925        *type_id = kern_type_id;
 926        *vtype = kern_vtype;
 927        *vtype_id = kern_vtype_id;
 928        *data_member = kern_data_member;
 929
 930        return 0;
 931}
 932
 933static bool bpf_map__is_struct_ops(const struct bpf_map *map)
 934{
 935        return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
 936}
 937
 938/* Init the map's fields that depend on kern_btf */
 939static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
 940                                         const struct btf *btf,
 941                                         const struct btf *kern_btf)
 942{
 943        const struct btf_member *member, *kern_member, *kern_data_member;
 944        const struct btf_type *type, *kern_type, *kern_vtype;
 945        __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
 946        struct bpf_struct_ops *st_ops;
 947        void *data, *kern_data;
 948        const char *tname;
 949        int err;
 950
 951        st_ops = map->st_ops;
 952        type = st_ops->type;
 953        tname = st_ops->tname;
 954        err = find_struct_ops_kern_types(kern_btf, tname,
 955                                         &kern_type, &kern_type_id,
 956                                         &kern_vtype, &kern_vtype_id,
 957                                         &kern_data_member);
 958        if (err)
 959                return err;
 960
 961        pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
 962                 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
 963
 964        map->def.value_size = kern_vtype->size;
 965        map->btf_vmlinux_value_type_id = kern_vtype_id;
 966
 967        st_ops->kern_vdata = calloc(1, kern_vtype->size);
 968        if (!st_ops->kern_vdata)
 969                return -ENOMEM;
 970
 971        data = st_ops->data;
 972        kern_data_off = kern_data_member->offset / 8;
 973        kern_data = st_ops->kern_vdata + kern_data_off;
 974
 975        member = btf_members(type);
 976        for (i = 0; i < btf_vlen(type); i++, member++) {
 977                const struct btf_type *mtype, *kern_mtype;
 978                __u32 mtype_id, kern_mtype_id;
 979                void *mdata, *kern_mdata;
 980                __s64 msize, kern_msize;
 981                __u32 moff, kern_moff;
 982                __u32 kern_member_idx;
 983                const char *mname;
 984
 985                mname = btf__name_by_offset(btf, member->name_off);
 986                kern_member = find_member_by_name(kern_btf, kern_type, mname);
 987                if (!kern_member) {
 988                        pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
 989                                map->name, mname);
 990                        return -ENOTSUP;
 991                }
 992
 993                kern_member_idx = kern_member - btf_members(kern_type);
 994                if (btf_member_bitfield_size(type, i) ||
 995                    btf_member_bitfield_size(kern_type, kern_member_idx)) {
 996                        pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
 997                                map->name, mname);
 998                        return -ENOTSUP;
 999                }
1000
1001                moff = member->offset / 8;
1002                kern_moff = kern_member->offset / 8;
1003
1004                mdata = data + moff;
1005                kern_mdata = kern_data + kern_moff;
1006
1007                mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1008                kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1009                                                    &kern_mtype_id);
1010                if (BTF_INFO_KIND(mtype->info) !=
1011                    BTF_INFO_KIND(kern_mtype->info)) {
1012                        pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1013                                map->name, mname, BTF_INFO_KIND(mtype->info),
1014                                BTF_INFO_KIND(kern_mtype->info));
1015                        return -ENOTSUP;
1016                }
1017
1018                if (btf_is_ptr(mtype)) {
1019                        struct bpf_program *prog;
1020
1021                        prog = st_ops->progs[i];
1022                        if (!prog)
1023                                continue;
1024
1025                        kern_mtype = skip_mods_and_typedefs(kern_btf,
1026                                                            kern_mtype->type,
1027                                                            &kern_mtype_id);
1028
1029                        /* mtype->type must be a func_proto which was
1030                         * guaranteed in bpf_object__collect_st_ops_relos(),
1031                         * so only check kern_mtype for func_proto here.
1032                         */
1033                        if (!btf_is_func_proto(kern_mtype)) {
1034                                pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1035                                        map->name, mname);
1036                                return -ENOTSUP;
1037                        }
1038
1039                        prog->attach_btf_id = kern_type_id;
1040                        prog->expected_attach_type = kern_member_idx;
1041
1042                        st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1043
1044                        pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1045                                 map->name, mname, prog->name, moff,
1046                                 kern_moff);
1047
1048                        continue;
1049                }
1050
1051                msize = btf__resolve_size(btf, mtype_id);
1052                kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1053                if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
1054                        pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1055                                map->name, mname, (ssize_t)msize,
1056                                (ssize_t)kern_msize);
1057                        return -ENOTSUP;
1058                }
1059
1060                pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1061                         map->name, mname, (unsigned int)msize,
1062                         moff, kern_moff);
1063                memcpy(kern_mdata, mdata, msize);
1064        }
1065
1066        return 0;
1067}
1068
1069static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1070{
1071        struct bpf_map *map;
1072        size_t i;
1073        int err;
1074
1075        for (i = 0; i < obj->nr_maps; i++) {
1076                map = &obj->maps[i];
1077
1078                if (!bpf_map__is_struct_ops(map))
1079                        continue;
1080
1081                err = bpf_map__init_kern_struct_ops(map, obj->btf,
1082                                                    obj->btf_vmlinux);
1083                if (err)
1084                        return err;
1085        }
1086
1087        return 0;
1088}
1089
1090static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
1091{
1092        const struct btf_type *type, *datasec;
1093        const struct btf_var_secinfo *vsi;
1094        struct bpf_struct_ops *st_ops;
1095        const char *tname, *var_name;
1096        __s32 type_id, datasec_id;
1097        const struct btf *btf;
1098        struct bpf_map *map;
1099        __u32 i;
1100
1101        if (obj->efile.st_ops_shndx == -1)
1102                return 0;
1103
1104        btf = obj->btf;
1105        datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
1106                                            BTF_KIND_DATASEC);
1107        if (datasec_id < 0) {
1108                pr_warn("struct_ops init: DATASEC %s not found\n",
1109                        STRUCT_OPS_SEC);
1110                return -EINVAL;
1111        }
1112
1113        datasec = btf__type_by_id(btf, datasec_id);
1114        vsi = btf_var_secinfos(datasec);
1115        for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1116                type = btf__type_by_id(obj->btf, vsi->type);
1117                var_name = btf__name_by_offset(obj->btf, type->name_off);
1118
1119                type_id = btf__resolve_type(obj->btf, vsi->type);
1120                if (type_id < 0) {
1121                        pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1122                                vsi->type, STRUCT_OPS_SEC);
1123                        return -EINVAL;
1124                }
1125
1126                type = btf__type_by_id(obj->btf, type_id);
1127                tname = btf__name_by_offset(obj->btf, type->name_off);
1128                if (!tname[0]) {
1129                        pr_warn("struct_ops init: anonymous type is not supported\n");
1130                        return -ENOTSUP;
1131                }
1132                if (!btf_is_struct(type)) {
1133                        pr_warn("struct_ops init: %s is not a struct\n", tname);
1134                        return -EINVAL;
1135                }
1136
1137                map = bpf_object__add_map(obj);
1138                if (IS_ERR(map))
1139                        return PTR_ERR(map);
1140
1141                map->sec_idx = obj->efile.st_ops_shndx;
1142                map->sec_offset = vsi->offset;
1143                map->name = strdup(var_name);
1144                if (!map->name)
1145                        return -ENOMEM;
1146
1147                map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1148                map->def.key_size = sizeof(int);
1149                map->def.value_size = type->size;
1150                map->def.max_entries = 1;
1151
1152                map->st_ops = calloc(1, sizeof(*map->st_ops));
1153                if (!map->st_ops)
1154                        return -ENOMEM;
1155                st_ops = map->st_ops;
1156                st_ops->data = malloc(type->size);
1157                st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1158                st_ops->kern_func_off = malloc(btf_vlen(type) *
1159                                               sizeof(*st_ops->kern_func_off));
1160                if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1161                        return -ENOMEM;
1162
1163                if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1164                        pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1165                                var_name, STRUCT_OPS_SEC);
1166                        return -EINVAL;
1167                }
1168
1169                memcpy(st_ops->data,
1170                       obj->efile.st_ops_data->d_buf + vsi->offset,
1171                       type->size);
1172                st_ops->tname = tname;
1173                st_ops->type = type;
1174                st_ops->type_id = type_id;
1175
1176                pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1177                         tname, type_id, var_name, vsi->offset);
1178        }
1179
1180        return 0;
1181}
1182
1183static struct bpf_object *bpf_object__new(const char *path,
1184                                          const void *obj_buf,
1185                                          size_t obj_buf_sz,
1186                                          const char *obj_name)
1187{
1188        bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
1189        struct bpf_object *obj;
1190        char *end;
1191
1192        obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1193        if (!obj) {
1194                pr_warn("alloc memory failed for %s\n", path);
1195                return ERR_PTR(-ENOMEM);
1196        }
1197
1198        strcpy(obj->path, path);
1199        if (obj_name) {
1200                libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1201        } else {
1202                /* Using basename() GNU version which doesn't modify arg. */
1203                libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1204                end = strchr(obj->name, '.');
1205                if (end)
1206                        *end = 0;
1207        }
1208
1209        obj->efile.fd = -1;
1210        /*
1211         * Caller of this function should also call
1212         * bpf_object__elf_finish() after data collection to return
1213         * obj_buf to user. If not, we should duplicate the buffer to
1214         * avoid user freeing them before elf finish.
1215         */
1216        obj->efile.obj_buf = obj_buf;
1217        obj->efile.obj_buf_sz = obj_buf_sz;
1218        obj->efile.maps_shndx = -1;
1219        obj->efile.btf_maps_shndx = -1;
1220        obj->efile.st_ops_shndx = -1;
1221        obj->kconfig_map_idx = -1;
1222
1223        obj->kern_version = get_kernel_version();
1224        obj->loaded = false;
1225
1226        INIT_LIST_HEAD(&obj->list);
1227        if (!strict)
1228                list_add(&obj->list, &bpf_objects_list);
1229        return obj;
1230}
1231
1232static void bpf_object__elf_finish(struct bpf_object *obj)
1233{
1234        if (!obj->efile.elf)
1235                return;
1236
1237        elf_end(obj->efile.elf);
1238        obj->efile.elf = NULL;
1239        obj->efile.symbols = NULL;
1240        obj->efile.st_ops_data = NULL;
1241
1242        zfree(&obj->efile.secs);
1243        obj->efile.sec_cnt = 0;
1244        zclose(obj->efile.fd);
1245        obj->efile.obj_buf = NULL;
1246        obj->efile.obj_buf_sz = 0;
1247}
1248
1249static int bpf_object__elf_init(struct bpf_object *obj)
1250{
1251        Elf64_Ehdr *ehdr;
1252        int err = 0;
1253        Elf *elf;
1254
1255        if (obj->efile.elf) {
1256                pr_warn("elf: init internal error\n");
1257                return -LIBBPF_ERRNO__LIBELF;
1258        }
1259
1260        if (obj->efile.obj_buf_sz > 0) {
1261                /*
1262                 * obj_buf should have been validated by
1263                 * bpf_object__open_buffer().
1264                 */
1265                elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1266        } else {
1267                obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1268                if (obj->efile.fd < 0) {
1269                        char errmsg[STRERR_BUFSIZE], *cp;
1270
1271                        err = -errno;
1272                        cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1273                        pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1274                        return err;
1275                }
1276
1277                elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1278        }
1279
1280        if (!elf) {
1281                pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1282                err = -LIBBPF_ERRNO__LIBELF;
1283                goto errout;
1284        }
1285
1286        obj->efile.elf = elf;
1287
1288        if (elf_kind(elf) != ELF_K_ELF) {
1289                err = -LIBBPF_ERRNO__FORMAT;
1290                pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1291                goto errout;
1292        }
1293
1294        if (gelf_getclass(elf) != ELFCLASS64) {
1295                err = -LIBBPF_ERRNO__FORMAT;
1296                pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1297                goto errout;
1298        }
1299
1300        obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1301        if (!obj->efile.ehdr) {
1302                pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1303                err = -LIBBPF_ERRNO__FORMAT;
1304                goto errout;
1305        }
1306
1307        if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1308                pr_warn("elf: failed to get section names section index for %s: %s\n",
1309                        obj->path, elf_errmsg(-1));
1310                err = -LIBBPF_ERRNO__FORMAT;
1311                goto errout;
1312        }
1313
1314        /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1315        if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1316                pr_warn("elf: failed to get section names strings from %s: %s\n",
1317                        obj->path, elf_errmsg(-1));
1318                err = -LIBBPF_ERRNO__FORMAT;
1319                goto errout;
1320        }
1321
1322        /* Old LLVM set e_machine to EM_NONE */
1323        if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1324                pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1325                err = -LIBBPF_ERRNO__FORMAT;
1326                goto errout;
1327        }
1328
1329        return 0;
1330errout:
1331        bpf_object__elf_finish(obj);
1332        return err;
1333}
1334
1335static int bpf_object__check_endianness(struct bpf_object *obj)
1336{
1337#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1338        if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1339                return 0;
1340#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1341        if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1342                return 0;
1343#else
1344# error "Unrecognized __BYTE_ORDER__"
1345#endif
1346        pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1347        return -LIBBPF_ERRNO__ENDIAN;
1348}
1349
1350static int
1351bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1352{
1353        /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1354         * go over allowed ELF data section buffer
1355         */
1356        libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1357        pr_debug("license of %s is %s\n", obj->path, obj->license);
1358        return 0;
1359}
1360
1361static int
1362bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1363{
1364        __u32 kver;
1365
1366        if (size != sizeof(kver)) {
1367                pr_warn("invalid kver section in %s\n", obj->path);
1368                return -LIBBPF_ERRNO__FORMAT;
1369        }
1370        memcpy(&kver, data, sizeof(kver));
1371        obj->kern_version = kver;
1372        pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1373        return 0;
1374}
1375
1376static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1377{
1378        if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1379            type == BPF_MAP_TYPE_HASH_OF_MAPS)
1380                return true;
1381        return false;
1382}
1383
1384static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1385{
1386        Elf_Data *data;
1387        Elf_Scn *scn;
1388
1389        if (!name)
1390                return -EINVAL;
1391
1392        scn = elf_sec_by_name(obj, name);
1393        data = elf_sec_data(obj, scn);
1394        if (data) {
1395                *size = data->d_size;
1396                return 0; /* found it */
1397        }
1398
1399        return -ENOENT;
1400}
1401
1402static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off)
1403{
1404        Elf_Data *symbols = obj->efile.symbols;
1405        const char *sname;
1406        size_t si;
1407
1408        if (!name || !off)
1409                return -EINVAL;
1410
1411        for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1412                Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1413
1414                if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1415                        continue;
1416
1417                if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1418                    ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1419                        continue;
1420
1421                sname = elf_sym_str(obj, sym->st_name);
1422                if (!sname) {
1423                        pr_warn("failed to get sym name string for var %s\n", name);
1424                        return -EIO;
1425                }
1426                if (strcmp(name, sname) == 0) {
1427                        *off = sym->st_value;
1428                        return 0;
1429                }
1430        }
1431
1432        return -ENOENT;
1433}
1434
1435static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1436{
1437        struct bpf_map *map;
1438        int err;
1439
1440        err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1441                                sizeof(*obj->maps), obj->nr_maps + 1);
1442        if (err)
1443                return ERR_PTR(err);
1444
1445        map = &obj->maps[obj->nr_maps++];
1446        map->obj = obj;
1447        map->fd = -1;
1448        map->inner_map_fd = -1;
1449        map->autocreate = true;
1450
1451        return map;
1452}
1453
1454static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1455{
1456        long page_sz = sysconf(_SC_PAGE_SIZE);
1457        size_t map_sz;
1458
1459        map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1460        map_sz = roundup(map_sz, page_sz);
1461        return map_sz;
1462}
1463
1464static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1465{
1466        char map_name[BPF_OBJ_NAME_LEN], *p;
1467        int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1468
1469        /* This is one of the more confusing parts of libbpf for various
1470         * reasons, some of which are historical. The original idea for naming
1471         * internal names was to include as much of BPF object name prefix as
1472         * possible, so that it can be distinguished from similar internal
1473         * maps of a different BPF object.
1474         * As an example, let's say we have bpf_object named 'my_object_name'
1475         * and internal map corresponding to '.rodata' ELF section. The final
1476         * map name advertised to user and to the kernel will be
1477         * 'my_objec.rodata', taking first 8 characters of object name and
1478         * entire 7 characters of '.rodata'.
1479         * Somewhat confusingly, if internal map ELF section name is shorter
1480         * than 7 characters, e.g., '.bss', we still reserve 7 characters
1481         * for the suffix, even though we only have 4 actual characters, and
1482         * resulting map will be called 'my_objec.bss', not even using all 15
1483         * characters allowed by the kernel. Oh well, at least the truncated
1484         * object name is somewhat consistent in this case. But if the map
1485         * name is '.kconfig', we'll still have entirety of '.kconfig' added
1486         * (8 chars) and thus will be left with only first 7 characters of the
1487         * object name ('my_obje'). Happy guessing, user, that the final map
1488         * name will be "my_obje.kconfig".
1489         * Now, with libbpf starting to support arbitrarily named .rodata.*
1490         * and .data.* data sections, it's possible that ELF section name is
1491         * longer than allowed 15 chars, so we now need to be careful to take
1492         * only up to 15 first characters of ELF name, taking no BPF object
1493         * name characters at all. So '.rodata.abracadabra' will result in
1494         * '.rodata.abracad' kernel and user-visible name.
1495         * We need to keep this convoluted logic intact for .data, .bss and
1496         * .rodata maps, but for new custom .data.custom and .rodata.custom
1497         * maps we use their ELF names as is, not prepending bpf_object name
1498         * in front. We still need to truncate them to 15 characters for the
1499         * kernel. Full name can be recovered for such maps by using DATASEC
1500         * BTF type associated with such map's value type, though.
1501         */
1502        if (sfx_len >= BPF_OBJ_NAME_LEN)
1503                sfx_len = BPF_OBJ_NAME_LEN - 1;
1504
1505        /* if there are two or more dots in map name, it's a custom dot map */
1506        if (strchr(real_name + 1, '.') != NULL)
1507                pfx_len = 0;
1508        else
1509                pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1510
1511        snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1512                 sfx_len, real_name);
1513
1514        /* sanitise map name to characters allowed by kernel */
1515        for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1516                if (!isalnum(*p) && *p != '_' && *p != '.')
1517                        *p = '_';
1518
1519        return strdup(map_name);
1520}
1521
1522static int
1523bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
1524
1525static int
1526bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1527                              const char *real_name, int sec_idx, void *data, size_t data_sz)
1528{
1529        struct bpf_map_def *def;
1530        struct bpf_map *map;
1531        int err;
1532
1533        map = bpf_object__add_map(obj);
1534        if (IS_ERR(map))
1535                return PTR_ERR(map);
1536
1537        map->libbpf_type = type;
1538        map->sec_idx = sec_idx;
1539        map->sec_offset = 0;
1540        map->real_name = strdup(real_name);
1541        map->name = internal_map_name(obj, real_name);
1542        if (!map->real_name || !map->name) {
1543                zfree(&map->real_name);
1544                zfree(&map->name);
1545                return -ENOMEM;
1546        }
1547
1548        def = &map->def;
1549        def->type = BPF_MAP_TYPE_ARRAY;
1550        def->key_size = sizeof(int);
1551        def->value_size = data_sz;
1552        def->max_entries = 1;
1553        def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1554                         ? BPF_F_RDONLY_PROG : 0;
1555        def->map_flags |= BPF_F_MMAPABLE;
1556
1557        pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1558                 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1559
1560        map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1561                           MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1562        if (map->mmaped == MAP_FAILED) {
1563                err = -errno;
1564                map->mmaped = NULL;
1565                pr_warn("failed to alloc map '%s' content buffer: %d\n",
1566                        map->name, err);
1567                zfree(&map->real_name);
1568                zfree(&map->name);
1569                return err;
1570        }
1571
1572        /* failures are fine because of maps like .rodata.str1.1 */
1573        (void) bpf_map_find_btf_info(obj, map);
1574
1575        if (data)
1576                memcpy(map->mmaped, data, data_sz);
1577
1578        pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1579        return 0;
1580}
1581
1582static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1583{
1584        struct elf_sec_desc *sec_desc;
1585        const char *sec_name;
1586        int err = 0, sec_idx;
1587
1588        /*
1589         * Populate obj->maps with libbpf internal maps.
1590         */
1591        for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1592                sec_desc = &obj->efile.secs[sec_idx];
1593
1594                switch (sec_desc->sec_type) {
1595                case SEC_DATA:
1596                        sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1597                        err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1598                                                            sec_name, sec_idx,
1599                                                            sec_desc->data->d_buf,
1600                                                            sec_desc->data->d_size);
1601                        break;
1602                case SEC_RODATA:
1603                        obj->has_rodata = true;
1604                        sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1605                        err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1606                                                            sec_name, sec_idx,
1607                                                            sec_desc->data->d_buf,
1608                                                            sec_desc->data->d_size);
1609                        break;
1610                case SEC_BSS:
1611                        sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1612                        err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1613                                                            sec_name, sec_idx,
1614                                                            NULL,
1615                                                            sec_desc->data->d_size);
1616                        break;
1617                default:
1618                        /* skip */
1619                        break;
1620                }
1621                if (err)
1622                        return err;
1623        }
1624        return 0;
1625}
1626
1627
1628static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1629                                               const void *name)
1630{
1631        int i;
1632
1633        for (i = 0; i < obj->nr_extern; i++) {
1634                if (strcmp(obj->externs[i].name, name) == 0)
1635                        return &obj->externs[i];
1636        }
1637        return NULL;
1638}
1639
1640static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1641                              char value)
1642{
1643        switch (ext->kcfg.type) {
1644        case KCFG_BOOL:
1645                if (value == 'm') {
1646                        pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
1647                                ext->name, value);
1648                        return -EINVAL;
1649                }
1650                *(bool *)ext_val = value == 'y' ? true : false;
1651                break;
1652        case KCFG_TRISTATE:
1653                if (value == 'y')
1654                        *(enum libbpf_tristate *)ext_val = TRI_YES;
1655                else if (value == 'm')
1656                        *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1657                else /* value == 'n' */
1658                        *(enum libbpf_tristate *)ext_val = TRI_NO;
1659                break;
1660        case KCFG_CHAR:
1661                *(char *)ext_val = value;
1662                break;
1663        case KCFG_UNKNOWN:
1664        case KCFG_INT:
1665        case KCFG_CHAR_ARR:
1666        default:
1667                pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
1668                        ext->name, value);
1669                return -EINVAL;
1670        }
1671        ext->is_set = true;
1672        return 0;
1673}
1674
1675static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1676                              const char *value)
1677{
1678        size_t len;
1679
1680        if (ext->kcfg.type != KCFG_CHAR_ARR) {
1681                pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
1682                return -EINVAL;
1683        }
1684
1685        len = strlen(value);
1686        if (value[len - 1] != '"') {
1687                pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1688                        ext->name, value);
1689                return -EINVAL;
1690        }
1691
1692        /* strip quotes */
1693        len -= 2;
1694        if (len >= ext->kcfg.sz) {
1695                pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1696                        ext->name, value, len, ext->kcfg.sz - 1);
1697                len = ext->kcfg.sz - 1;
1698        }
1699        memcpy(ext_val, value + 1, len);
1700        ext_val[len] = '\0';
1701        ext->is_set = true;
1702        return 0;
1703}
1704
1705static int parse_u64(const char *value, __u64 *res)
1706{
1707        char *value_end;
1708        int err;
1709
1710        errno = 0;
1711        *res = strtoull(value, &value_end, 0);
1712        if (errno) {
1713                err = -errno;
1714                pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1715                return err;
1716        }
1717        if (*value_end) {
1718                pr_warn("failed to parse '%s' as integer completely\n", value);
1719                return -EINVAL;
1720        }
1721        return 0;
1722}
1723
1724static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1725{
1726        int bit_sz = ext->kcfg.sz * 8;
1727
1728        if (ext->kcfg.sz == 8)
1729                return true;
1730
1731        /* Validate that value stored in u64 fits in integer of `ext->sz`
1732         * bytes size without any loss of information. If the target integer
1733         * is signed, we rely on the following limits of integer type of
1734         * Y bits and subsequent transformation:
1735         *
1736         *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
1737         *            0 <= X + 2^(Y-1) <= 2^Y - 1
1738         *            0 <= X + 2^(Y-1) <  2^Y
1739         *
1740         *  For unsigned target integer, check that all the (64 - Y) bits are
1741         *  zero.
1742         */
1743        if (ext->kcfg.is_signed)
1744                return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1745        else
1746                return (v >> bit_sz) == 0;
1747}
1748
1749static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1750                              __u64 value)
1751{
1752        if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1753                pr_warn("extern (kcfg) %s=%llu should be integer\n",
1754                        ext->name, (unsigned long long)value);
1755                return -EINVAL;
1756        }
1757        if (!is_kcfg_value_in_range(ext, value)) {
1758                pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1759                        ext->name, (unsigned long long)value, ext->kcfg.sz);
1760                return -ERANGE;
1761        }
1762        switch (ext->kcfg.sz) {
1763                case 1: *(__u8 *)ext_val = value; break;
1764                case 2: *(__u16 *)ext_val = value; break;
1765                case 4: *(__u32 *)ext_val = value; break;
1766                case 8: *(__u64 *)ext_val = value; break;
1767                default:
1768                        return -EINVAL;
1769        }
1770        ext->is_set = true;
1771        return 0;
1772}
1773
1774static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1775                                            char *buf, void *data)
1776{
1777        struct extern_desc *ext;
1778        char *sep, *value;
1779        int len, err = 0;
1780        void *ext_val;
1781        __u64 num;
1782
1783        if (!str_has_pfx(buf, "CONFIG_"))
1784                return 0;
1785
1786        sep = strchr(buf, '=');
1787        if (!sep) {
1788                pr_warn("failed to parse '%s': no separator\n", buf);
1789                return -EINVAL;
1790        }
1791
1792        /* Trim ending '\n' */
1793        len = strlen(buf);
1794        if (buf[len - 1] == '\n')
1795                buf[len - 1] = '\0';
1796        /* Split on '=' and ensure that a value is present. */
1797        *sep = '\0';
1798        if (!sep[1]) {
1799                *sep = '=';
1800                pr_warn("failed to parse '%s': no value\n", buf);
1801                return -EINVAL;
1802        }
1803
1804        ext = find_extern_by_name(obj, buf);
1805        if (!ext || ext->is_set)
1806                return 0;
1807
1808        ext_val = data + ext->kcfg.data_off;
1809        value = sep + 1;
1810
1811        switch (*value) {
1812        case 'y': case 'n': case 'm':
1813                err = set_kcfg_value_tri(ext, ext_val, *value);
1814                break;
1815        case '"':
1816                err = set_kcfg_value_str(ext, ext_val, value);
1817                break;
1818        default:
1819                /* assume integer */
1820                err = parse_u64(value, &num);
1821                if (err) {
1822                        pr_warn("extern (kcfg) %s=%s should be integer\n",
1823                                ext->name, value);
1824                        return err;
1825                }
1826                err = set_kcfg_value_num(ext, ext_val, num);
1827                break;
1828        }
1829        if (err)
1830                return err;
1831        pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
1832        return 0;
1833}
1834
1835static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1836{
1837        char buf[PATH_MAX];
1838        struct utsname uts;
1839        int len, err = 0;
1840        gzFile file;
1841
1842        uname(&uts);
1843        len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1844        if (len < 0)
1845                return -EINVAL;
1846        else if (len >= PATH_MAX)
1847                return -ENAMETOOLONG;
1848
1849        /* gzopen also accepts uncompressed files. */
1850        file = gzopen(buf, "r");
1851        if (!file)
1852                file = gzopen("/proc/config.gz", "r");
1853
1854        if (!file) {
1855                pr_warn("failed to open system Kconfig\n");
1856                return -ENOENT;
1857        }
1858
1859        while (gzgets(file, buf, sizeof(buf))) {
1860                err = bpf_object__process_kconfig_line(obj, buf, data);
1861                if (err) {
1862                        pr_warn("error parsing system Kconfig line '%s': %d\n",
1863                                buf, err);
1864                        goto out;
1865                }
1866        }
1867
1868out:
1869        gzclose(file);
1870        return err;
1871}
1872
1873static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1874                                        const char *config, void *data)
1875{
1876        char buf[PATH_MAX];
1877        int err = 0;
1878        FILE *file;
1879
1880        file = fmemopen((void *)config, strlen(config), "r");
1881        if (!file) {
1882                err = -errno;
1883                pr_warn("failed to open in-memory Kconfig: %d\n", err);
1884                return err;
1885        }
1886
1887        while (fgets(buf, sizeof(buf), file)) {
1888                err = bpf_object__process_kconfig_line(obj, buf, data);
1889                if (err) {
1890                        pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1891                                buf, err);
1892                        break;
1893                }
1894        }
1895
1896        fclose(file);
1897        return err;
1898}
1899
1900static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1901{
1902        struct extern_desc *last_ext = NULL, *ext;
1903        size_t map_sz;
1904        int i, err;
1905
1906        for (i = 0; i < obj->nr_extern; i++) {
1907                ext = &obj->externs[i];
1908                if (ext->type == EXT_KCFG)
1909                        last_ext = ext;
1910        }
1911
1912        if (!last_ext)
1913                return 0;
1914
1915        map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1916        err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1917                                            ".kconfig", obj->efile.symbols_shndx,
1918                                            NULL, map_sz);
1919        if (err)
1920                return err;
1921
1922        obj->kconfig_map_idx = obj->nr_maps - 1;
1923
1924        return 0;
1925}
1926
1927static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
1928{
1929        Elf_Data *symbols = obj->efile.symbols;
1930        int i, map_def_sz = 0, nr_maps = 0, nr_syms;
1931        Elf_Data *data = NULL;
1932        Elf_Scn *scn;
1933
1934        if (obj->efile.maps_shndx < 0)
1935                return 0;
1936
1937        if (libbpf_mode & LIBBPF_STRICT_MAP_DEFINITIONS) {
1938                pr_warn("legacy map definitions in SEC(\"maps\") are not supported\n");
1939                return -EOPNOTSUPP;
1940        }
1941
1942        if (!symbols)
1943                return -EINVAL;
1944
1945        scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1946        data = elf_sec_data(obj, scn);
1947        if (!scn || !data) {
1948                pr_warn("elf: failed to get legacy map definitions for %s\n",
1949                        obj->path);
1950                return -EINVAL;
1951        }
1952
1953        /*
1954         * Count number of maps. Each map has a name.
1955         * Array of maps is not supported: only the first element is
1956         * considered.
1957         *
1958         * TODO: Detect array of map and report error.
1959         */
1960        nr_syms = symbols->d_size / sizeof(Elf64_Sym);
1961        for (i = 0; i < nr_syms; i++) {
1962                Elf64_Sym *sym = elf_sym_by_idx(obj, i);
1963
1964                if (sym->st_shndx != obj->efile.maps_shndx)
1965                        continue;
1966                if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
1967                        continue;
1968                nr_maps++;
1969        }
1970        /* Assume equally sized map definitions */
1971        pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1972                 nr_maps, data->d_size, obj->path);
1973
1974        if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
1975                pr_warn("elf: unable to determine legacy map definition size in %s\n",
1976                        obj->path);
1977                return -EINVAL;
1978        }
1979        map_def_sz = data->d_size / nr_maps;
1980
1981        /* Fill obj->maps using data in "maps" section.  */
1982        for (i = 0; i < nr_syms; i++) {
1983                Elf64_Sym *sym = elf_sym_by_idx(obj, i);
1984                const char *map_name;
1985                struct bpf_map_def *def;
1986                struct bpf_map *map;
1987
1988                if (sym->st_shndx != obj->efile.maps_shndx)
1989                        continue;
1990                if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
1991                        continue;
1992
1993                map = bpf_object__add_map(obj);
1994                if (IS_ERR(map))
1995                        return PTR_ERR(map);
1996
1997                map_name = elf_sym_str(obj, sym->st_name);
1998                if (!map_name) {
1999                        pr_warn("failed to get map #%d name sym string for obj %s\n",
2000                                i, obj->path);
2001                        return -LIBBPF_ERRNO__FORMAT;
2002                }
2003
2004                pr_warn("map '%s' (legacy): legacy map definitions are deprecated, use BTF-defined maps instead\n", map_name);
2005
2006                if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
2007                        pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
2008                        return -ENOTSUP;
2009                }
2010
2011                map->libbpf_type = LIBBPF_MAP_UNSPEC;
2012                map->sec_idx = sym->st_shndx;
2013                map->sec_offset = sym->st_value;
2014                pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
2015                         map_name, map->sec_idx, map->sec_offset);
2016                if (sym->st_value + map_def_sz > data->d_size) {
2017                        pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
2018                                obj->path, map_name);
2019                        return -EINVAL;
2020                }
2021
2022                map->name = strdup(map_name);
2023                if (!map->name) {
2024                        pr_warn("map '%s': failed to alloc map name\n", map_name);
2025                        return -ENOMEM;
2026                }
2027                pr_debug("map %d is \"%s\"\n", i, map->name);
2028                def = (struct bpf_map_def *)(data->d_buf + sym->st_value);
2029                /*
2030                 * If the definition of the map in the object file fits in
2031                 * bpf_map_def, copy it.  Any extra fields in our version
2032                 * of bpf_map_def will default to zero as a result of the
2033                 * calloc above.
2034                 */
2035                if (map_def_sz <= sizeof(struct bpf_map_def)) {
2036                        memcpy(&map->def, def, map_def_sz);
2037                } else {
2038                        /*
2039                         * Here the map structure being read is bigger than what
2040                         * we expect, truncate if the excess bits are all zero.
2041                         * If they are not zero, reject this map as
2042                         * incompatible.
2043                         */
2044                        char *b;
2045
2046                        for (b = ((char *)def) + sizeof(struct bpf_map_def);
2047                             b < ((char *)def) + map_def_sz; b++) {
2048                                if (*b != 0) {
2049                                        pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
2050                                                obj->path, map_name);
2051                                        if (strict)
2052                                                return -EINVAL;
2053                                }
2054                        }
2055                        memcpy(&map->def, def, sizeof(struct bpf_map_def));
2056                }
2057
2058                /* btf info may not exist but fill it in if it does exist */
2059                (void) bpf_map_find_btf_info(obj, map);
2060        }
2061        return 0;
2062}
2063
2064const struct btf_type *
2065skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2066{
2067        const struct btf_type *t = btf__type_by_id(btf, id);
2068
2069        if (res_id)
2070                *res_id = id;
2071
2072        while (btf_is_mod(t) || btf_is_typedef(t)) {
2073                if (res_id)
2074                        *res_id = t->type;
2075                t = btf__type_by_id(btf, t->type);
2076        }
2077
2078        return t;
2079}
2080
2081static const struct btf_type *
2082resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2083{
2084        const struct btf_type *t;
2085
2086        t = skip_mods_and_typedefs(btf, id, NULL);
2087        if (!btf_is_ptr(t))
2088                return NULL;
2089
2090        t = skip_mods_and_typedefs(btf, t->type, res_id);
2091
2092        return btf_is_func_proto(t) ? t : NULL;
2093}
2094
2095static const char *__btf_kind_str(__u16 kind)
2096{
2097        switch (kind) {
2098        case BTF_KIND_UNKN: return "void";
2099        case BTF_KIND_INT: return "int";
2100        case BTF_KIND_PTR: return "ptr";
2101        case BTF_KIND_ARRAY: return "array";
2102        case BTF_KIND_STRUCT: return "struct";
2103        case BTF_KIND_UNION: return "union";
2104        case BTF_KIND_ENUM: return "enum";
2105        case BTF_KIND_FWD: return "fwd";
2106        case BTF_KIND_TYPEDEF: return "typedef";
2107        case BTF_KIND_VOLATILE: return "volatile";
2108        case BTF_KIND_CONST: return "const";
2109        case BTF_KIND_RESTRICT: return "restrict";
2110        case BTF_KIND_FUNC: return "func";
2111        case BTF_KIND_FUNC_PROTO: return "func_proto";
2112        case BTF_KIND_VAR: return "var";
2113        case BTF_KIND_DATASEC: return "datasec";
2114        case BTF_KIND_FLOAT: return "float";
2115        case BTF_KIND_DECL_TAG: return "decl_tag";
2116        case BTF_KIND_TYPE_TAG: return "type_tag";
2117        default: return "unknown";
2118        }
2119}
2120
2121const char *btf_kind_str(const struct btf_type *t)
2122{
2123        return __btf_kind_str(btf_kind(t));
2124}
2125
2126/*
2127 * Fetch integer attribute of BTF map definition. Such attributes are
2128 * represented using a pointer to an array, in which dimensionality of array
2129 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2130 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2131 * type definition, while using only sizeof(void *) space in ELF data section.
2132 */
2133static bool get_map_field_int(const char *map_name, const struct btf *btf,
2134                              const struct btf_member *m, __u32 *res)
2135{
2136        const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2137        const char *name = btf__name_by_offset(btf, m->name_off);
2138        const struct btf_array *arr_info;
2139        const struct btf_type *arr_t;
2140
2141        if (!btf_is_ptr(t)) {
2142                pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2143                        map_name, name, btf_kind_str(t));
2144                return false;
2145        }
2146
2147        arr_t = btf__type_by_id(btf, t->type);
2148        if (!arr_t) {
2149                pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2150                        map_name, name, t->type);
2151                return false;
2152        }
2153        if (!btf_is_array(arr_t)) {
2154                pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2155                        map_name, name, btf_kind_str(arr_t));
2156                return false;
2157        }
2158        arr_info = btf_array(arr_t);
2159        *res = arr_info->nelems;
2160        return true;
2161}
2162
2163static int build_map_pin_path(struct bpf_map *map, const char *path)
2164{
2165        char buf[PATH_MAX];
2166        int len;
2167
2168        if (!path)
2169                path = "/sys/fs/bpf";
2170
2171        len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
2172        if (len < 0)
2173                return -EINVAL;
2174        else if (len >= PATH_MAX)
2175                return -ENAMETOOLONG;
2176
2177        return bpf_map__set_pin_path(map, buf);
2178}
2179
2180int parse_btf_map_def(const char *map_name, struct btf *btf,
2181                      const struct btf_type *def_t, bool strict,
2182                      struct btf_map_def *map_def, struct btf_map_def *inner_def)
2183{
2184        const struct btf_type *t;
2185        const struct btf_member *m;
2186        bool is_inner = inner_def == NULL;
2187        int vlen, i;
2188
2189        vlen = btf_vlen(def_t);
2190        m = btf_members(def_t);
2191        for (i = 0; i < vlen; i++, m++) {
2192                const char *name = btf__name_by_offset(btf, m->name_off);
2193
2194                if (!name) {
2195                        pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2196                        return -EINVAL;
2197                }
2198                if (strcmp(name, "type") == 0) {
2199                        if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2200                                return -EINVAL;
2201                        map_def->parts |= MAP_DEF_MAP_TYPE;
2202                } else if (strcmp(name, "max_entries") == 0) {
2203                        if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2204                                return -EINVAL;
2205                        map_def->parts |= MAP_DEF_MAX_ENTRIES;
2206                } else if (strcmp(name, "map_flags") == 0) {
2207                        if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2208                                return -EINVAL;
2209                        map_def->parts |= MAP_DEF_MAP_FLAGS;
2210                } else if (strcmp(name, "numa_node") == 0) {
2211                        if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2212                                return -EINVAL;
2213                        map_def->parts |= MAP_DEF_NUMA_NODE;
2214                } else if (strcmp(name, "key_size") == 0) {
2215                        __u32 sz;
2216
2217                        if (!get_map_field_int(map_name, btf, m, &sz))
2218                                return -EINVAL;
2219                        if (map_def->key_size && map_def->key_size != sz) {
2220                                pr_warn("map '%s': conflicting key size %u != %u.\n",
2221                                        map_name, map_def->key_size, sz);
2222                                return -EINVAL;
2223                        }
2224                        map_def->key_size = sz;
2225                        map_def->parts |= MAP_DEF_KEY_SIZE;
2226                } else if (strcmp(name, "key") == 0) {
2227                        __s64 sz;
2228
2229                        t = btf__type_by_id(btf, m->type);
2230                        if (!t) {
2231                                pr_warn("map '%s': key type [%d] not found.\n",
2232                                        map_name, m->type);
2233                                return -EINVAL;
2234                        }
2235                        if (!btf_is_ptr(t)) {
2236                                pr_warn("map '%s': key spec is not PTR: %s.\n",
2237                                        map_name, btf_kind_str(t));
2238                                return -EINVAL;
2239                        }
2240                        sz = btf__resolve_size(btf, t->type);
2241                        if (sz < 0) {
2242                                pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2243                                        map_name, t->type, (ssize_t)sz);
2244                                return sz;
2245                        }
2246                        if (map_def->key_size && map_def->key_size != sz) {
2247                                pr_warn("map '%s': conflicting key size %u != %zd.\n",
2248                                        map_name, map_def->key_size, (ssize_t)sz);
2249                                return -EINVAL;
2250                        }
2251                        map_def->key_size = sz;
2252                        map_def->key_type_id = t->type;
2253                        map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2254                } else if (strcmp(name, "value_size") == 0) {
2255                        __u32 sz;
2256
2257                        if (!get_map_field_int(map_name, btf, m, &sz))
2258                                return -EINVAL;
2259                        if (map_def->value_size && map_def->value_size != sz) {
2260                                pr_warn("map '%s': conflicting value size %u != %u.\n",
2261                                        map_name, map_def->value_size, sz);
2262                                return -EINVAL;
2263                        }
2264                        map_def->value_size = sz;
2265                        map_def->parts |= MAP_DEF_VALUE_SIZE;
2266                } else if (strcmp(name, "value") == 0) {
2267                        __s64 sz;
2268
2269                        t = btf__type_by_id(btf, m->type);
2270                        if (!t) {
2271                                pr_warn("map '%s': value type [%d] not found.\n",
2272                                        map_name, m->type);
2273                                return -EINVAL;
2274                        }
2275                        if (!btf_is_ptr(t)) {
2276                                pr_warn("map '%s': value spec is not PTR: %s.\n",
2277                                        map_name, btf_kind_str(t));
2278                                return -EINVAL;
2279                        }
2280                        sz = btf__resolve_size(btf, t->type);
2281                        if (sz < 0) {
2282                                pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2283                                        map_name, t->type, (ssize_t)sz);
2284                                return sz;
2285                        }
2286                        if (map_def->value_size && map_def->value_size != sz) {
2287                                pr_warn("map '%s': conflicting value size %u != %zd.\n",
2288                                        map_name, map_def->value_size, (ssize_t)sz);
2289                                return -EINVAL;
2290                        }
2291                        map_def->value_size = sz;
2292                        map_def->value_type_id = t->type;
2293                        map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2294                }
2295                else if (strcmp(name, "values") == 0) {
2296                        bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2297                        bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2298                        const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2299                        char inner_map_name[128];
2300                        int err;
2301
2302                        if (is_inner) {
2303                                pr_warn("map '%s': multi-level inner maps not supported.\n",
2304                                        map_name);
2305                                return -ENOTSUP;
2306                        }
2307                        if (i != vlen - 1) {
2308                                pr_warn("map '%s': '%s' member should be last.\n",
2309                                        map_name, name);
2310                                return -EINVAL;
2311                        }
2312                        if (!is_map_in_map && !is_prog_array) {
2313                                pr_warn("map '%s': should be map-in-map or prog-array.\n",
2314                                        map_name);
2315                                return -ENOTSUP;
2316                        }
2317                        if (map_def->value_size && map_def->value_size != 4) {
2318                                pr_warn("map '%s': conflicting value size %u != 4.\n",
2319                                        map_name, map_def->value_size);
2320                                return -EINVAL;
2321                        }
2322                        map_def->value_size = 4;
2323                        t = btf__type_by_id(btf, m->type);
2324                        if (!t) {
2325                                pr_warn("map '%s': %s type [%d] not found.\n",
2326                                        map_name, desc, m->type);
2327                                return -EINVAL;
2328                        }
2329                        if (!btf_is_array(t) || btf_array(t)->nelems) {
2330                                pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2331                                        map_name, desc);
2332                                return -EINVAL;
2333                        }
2334                        t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2335                        if (!btf_is_ptr(t)) {
2336                                pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2337                                        map_name, desc, btf_kind_str(t));
2338                                return -EINVAL;
2339                        }
2340                        t = skip_mods_and_typedefs(btf, t->type, NULL);
2341                        if (is_prog_array) {
2342                                if (!btf_is_func_proto(t)) {
2343                                        pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2344                                                map_name, btf_kind_str(t));
2345                                        return -EINVAL;
2346                                }
2347                                continue;
2348                        }
2349                        if (!btf_is_struct(t)) {
2350                                pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2351                                        map_name, btf_kind_str(t));
2352                                return -EINVAL;
2353                        }
2354
2355                        snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2356                        err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2357                        if (err)
2358                                return err;
2359
2360                        map_def->parts |= MAP_DEF_INNER_MAP;
2361                } else if (strcmp(name, "pinning") == 0) {
2362                        __u32 val;
2363
2364                        if (is_inner) {
2365                                pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2366                                return -EINVAL;
2367                        }
2368                        if (!get_map_field_int(map_name, btf, m, &val))
2369                                return -EINVAL;
2370                        if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2371                                pr_warn("map '%s': invalid pinning value %u.\n",
2372                                        map_name, val);
2373                                return -EINVAL;
2374                        }
2375                        map_def->pinning = val;
2376                        map_def->parts |= MAP_DEF_PINNING;
2377                } else if (strcmp(name, "map_extra") == 0) {
2378                        __u32 map_extra;
2379
2380                        if (!get_map_field_int(map_name, btf, m, &map_extra))
2381                                return -EINVAL;
2382                        map_def->map_extra = map_extra;
2383                        map_def->parts |= MAP_DEF_MAP_EXTRA;
2384                } else {
2385                        if (strict) {
2386                                pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2387                                return -ENOTSUP;
2388                        }
2389                        pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2390                }
2391        }
2392
2393        if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2394                pr_warn("map '%s': map type isn't specified.\n", map_name);
2395                return -EINVAL;
2396        }
2397
2398        return 0;
2399}
2400
2401static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2402{
2403        map->def.type = def->map_type;
2404        map->def.key_size = def->key_size;
2405        map->def.value_size = def->value_size;
2406        map->def.max_entries = def->max_entries;
2407        map->def.map_flags = def->map_flags;
2408        map->map_extra = def->map_extra;
2409
2410        map->numa_node = def->numa_node;
2411        map->btf_key_type_id = def->key_type_id;
2412        map->btf_value_type_id = def->value_type_id;
2413
2414        if (def->parts & MAP_DEF_MAP_TYPE)
2415                pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2416
2417        if (def->parts & MAP_DEF_KEY_TYPE)
2418                pr_debug("map '%s': found key [%u], sz = %u.\n",
2419                         map->name, def->key_type_id, def->key_size);
2420        else if (def->parts & MAP_DEF_KEY_SIZE)
2421                pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2422
2423        if (def->parts & MAP_DEF_VALUE_TYPE)
2424                pr_debug("map '%s': found value [%u], sz = %u.\n",
2425                         map->name, def->value_type_id, def->value_size);
2426        else if (def->parts & MAP_DEF_VALUE_SIZE)
2427                pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2428
2429        if (def->parts & MAP_DEF_MAX_ENTRIES)
2430                pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2431        if (def->parts & MAP_DEF_MAP_FLAGS)
2432                pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2433        if (def->parts & MAP_DEF_MAP_EXTRA)
2434                pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2435                         (unsigned long long)def->map_extra);
2436        if (def->parts & MAP_DEF_PINNING)
2437                pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2438        if (def->parts & MAP_DEF_NUMA_NODE)
2439                pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2440
2441        if (def->parts & MAP_DEF_INNER_MAP)
2442                pr_debug("map '%s': found inner map definition.\n", map->name);
2443}
2444
2445static const char *btf_var_linkage_str(__u32 linkage)
2446{
2447        switch (linkage) {
2448        case BTF_VAR_STATIC: return "static";
2449        case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2450        case BTF_VAR_GLOBAL_EXTERN: return "extern";
2451        default: return "unknown";
2452        }
2453}
2454
2455static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2456                                         const struct btf_type *sec,
2457                                         int var_idx, int sec_idx,
2458                                         const Elf_Data *data, bool strict,
2459                                         const char *pin_root_path)
2460{
2461        struct btf_map_def map_def = {}, inner_def = {};
2462        const struct btf_type *var, *def;
2463        const struct btf_var_secinfo *vi;
2464        const struct btf_var *var_extra;
2465        const char *map_name;
2466        struct bpf_map *map;
2467        int err;
2468
2469        vi = btf_var_secinfos(sec) + var_idx;
2470        var = btf__type_by_id(obj->btf, vi->type);
2471        var_extra = btf_var(var);
2472        map_name = btf__name_by_offset(obj->btf, var->name_off);
2473
2474        if (map_name == NULL || map_name[0] == '\0') {
2475                pr_warn("map #%d: empty name.\n", var_idx);
2476                return -EINVAL;
2477        }
2478        if ((__u64)vi->offset + vi->size > data->d_size) {
2479                pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2480                return -EINVAL;
2481        }
2482        if (!btf_is_var(var)) {
2483                pr_warn("map '%s': unexpected var kind %s.\n",
2484                        map_name, btf_kind_str(var));
2485                return -EINVAL;
2486        }
2487        if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2488                pr_warn("map '%s': unsupported map linkage %s.\n",
2489                        map_name, btf_var_linkage_str(var_extra->linkage));
2490                return -EOPNOTSUPP;
2491        }
2492
2493        def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2494        if (!btf_is_struct(def)) {
2495                pr_warn("map '%s': unexpected def kind %s.\n",
2496                        map_name, btf_kind_str(var));
2497                return -EINVAL;
2498        }
2499        if (def->size > vi->size) {
2500                pr_warn("map '%s': invalid def size.\n", map_name);
2501                return -EINVAL;
2502        }
2503
2504        map = bpf_object__add_map(obj);
2505        if (IS_ERR(map))
2506                return PTR_ERR(map);
2507        map->name = strdup(map_name);
2508        if (!map->name) {
2509                pr_warn("map '%s': failed to alloc map name.\n", map_name);
2510                return -ENOMEM;
2511        }
2512        map->libbpf_type = LIBBPF_MAP_UNSPEC;
2513        map->def.type = BPF_MAP_TYPE_UNSPEC;
2514        map->sec_idx = sec_idx;
2515        map->sec_offset = vi->offset;
2516        map->btf_var_idx = var_idx;
2517        pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2518                 map_name, map->sec_idx, map->sec_offset);
2519
2520        err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2521        if (err)
2522                return err;
2523
2524        fill_map_from_def(map, &map_def);
2525
2526        if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2527                err = build_map_pin_path(map, pin_root_path);
2528                if (err) {
2529                        pr_warn("map '%s': couldn't build pin path.\n", map->name);
2530                        return err;
2531                }
2532        }
2533
2534        if (map_def.parts & MAP_DEF_INNER_MAP) {
2535                map->inner_map = calloc(1, sizeof(*map->inner_map));
2536                if (!map->inner_map)
2537                        return -ENOMEM;
2538                map->inner_map->fd = -1;
2539                map->inner_map->sec_idx = sec_idx;
2540                map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2541                if (!map->inner_map->name)
2542                        return -ENOMEM;
2543                sprintf(map->inner_map->name, "%s.inner", map_name);
2544
2545                fill_map_from_def(map->inner_map, &inner_def);
2546        }
2547
2548        err = bpf_map_find_btf_info(obj, map);
2549        if (err)
2550                return err;
2551
2552        return 0;
2553}
2554
2555static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2556                                          const char *pin_root_path)
2557{
2558        const struct btf_type *sec = NULL;
2559        int nr_types, i, vlen, err;
2560        const struct btf_type *t;
2561        const char *name;
2562        Elf_Data *data;
2563        Elf_Scn *scn;
2564
2565        if (obj->efile.btf_maps_shndx < 0)
2566                return 0;
2567
2568        scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2569        data = elf_sec_data(obj, scn);
2570        if (!scn || !data) {
2571                pr_warn("elf: failed to get %s map definitions for %s\n",
2572                        MAPS_ELF_SEC, obj->path);
2573                return -EINVAL;
2574        }
2575
2576        nr_types = btf__type_cnt(obj->btf);
2577        for (i = 1; i < nr_types; i++) {
2578                t = btf__type_by_id(obj->btf, i);
2579                if (!btf_is_datasec(t))
2580                        continue;
2581                name = btf__name_by_offset(obj->btf, t->name_off);
2582                if (strcmp(name, MAPS_ELF_SEC) == 0) {
2583                        sec = t;
2584                        obj->efile.btf_maps_sec_btf_id = i;
2585                        break;
2586                }
2587        }
2588
2589        if (!sec) {
2590                pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2591                return -ENOENT;
2592        }
2593
2594        vlen = btf_vlen(sec);
2595        for (i = 0; i < vlen; i++) {
2596                err = bpf_object__init_user_btf_map(obj, sec, i,
2597                                                    obj->efile.btf_maps_shndx,
2598                                                    data, strict,
2599                                                    pin_root_path);
2600                if (err)
2601                        return err;
2602        }
2603
2604        return 0;
2605}
2606
2607static int bpf_object__init_maps(struct bpf_object *obj,
2608                                 const struct bpf_object_open_opts *opts)
2609{
2610        const char *pin_root_path;
2611        bool strict;
2612        int err;
2613
2614        strict = !OPTS_GET(opts, relaxed_maps, false);
2615        pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2616
2617        err = bpf_object__init_user_maps(obj, strict);
2618        err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2619        err = err ?: bpf_object__init_global_data_maps(obj);
2620        err = err ?: bpf_object__init_kconfig_map(obj);
2621        err = err ?: bpf_object__init_struct_ops_maps(obj);
2622
2623        return err;
2624}
2625
2626static bool section_have_execinstr(struct bpf_object *obj, int idx)
2627{
2628        Elf64_Shdr *sh;
2629
2630        sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2631        if (!sh)
2632                return false;
2633
2634        return sh->sh_flags & SHF_EXECINSTR;
2635}
2636
2637static bool btf_needs_sanitization(struct bpf_object *obj)
2638{
2639        bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2640        bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2641        bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2642        bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2643        bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2644        bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2645
2646        return !has_func || !has_datasec || !has_func_global || !has_float ||
2647               !has_decl_tag || !has_type_tag;
2648}
2649
2650static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2651{
2652        bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2653        bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2654        bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2655        bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2656        bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2657        bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2658        struct btf_type *t;
2659        int i, j, vlen;
2660
2661        for (i = 1; i < btf__type_cnt(btf); i++) {
2662                t = (struct btf_type *)btf__type_by_id(btf, i);
2663
2664                if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
2665                        /* replace VAR/DECL_TAG with INT */
2666                        t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2667                        /*
2668                         * using size = 1 is the safest choice, 4 will be too
2669                         * big and cause kernel BTF validation failure if
2670                         * original variable took less than 4 bytes
2671                         */
2672                        t->size = 1;
2673                        *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2674                } else if (!has_datasec && btf_is_datasec(t)) {
2675                        /* replace DATASEC with STRUCT */
2676                        const struct btf_var_secinfo *v = btf_var_secinfos(t);
2677                        struct btf_member *m = btf_members(t);
2678                        struct btf_type *vt;
2679                        char *name;
2680
2681                        name = (char *)btf__name_by_offset(btf, t->name_off);
2682                        while (*name) {
2683                                if (*name == '.')
2684                                        *name = '_';
2685                                name++;
2686                        }
2687
2688                        vlen = btf_vlen(t);
2689                        t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2690                        for (j = 0; j < vlen; j++, v++, m++) {
2691                                /* order of field assignments is important */
2692                                m->offset = v->offset * 8;
2693                                m->type = v->type;
2694                                /* preserve variable name as member name */
2695                                vt = (void *)btf__type_by_id(btf, v->type);
2696                                m->name_off = vt->name_off;
2697                        }
2698                } else if (!has_func && btf_is_func_proto(t)) {
2699                        /* replace FUNC_PROTO with ENUM */
2700                        vlen = btf_vlen(t);
2701                        t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2702                        t->size = sizeof(__u32); /* kernel enforced */
2703                } else if (!has_func && btf_is_func(t)) {
2704                        /* replace FUNC with TYPEDEF */
2705                        t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2706                } else if (!has_func_global && btf_is_func(t)) {
2707                        /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2708                        t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2709                } else if (!has_float && btf_is_float(t)) {
2710                        /* replace FLOAT with an equally-sized empty STRUCT;
2711                         * since C compilers do not accept e.g. "float" as a
2712                         * valid struct name, make it anonymous
2713                         */
2714                        t->name_off = 0;
2715                        t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2716                } else if (!has_type_tag && btf_is_type_tag(t)) {
2717                        /* replace TYPE_TAG with a CONST */
2718                        t->name_off = 0;
2719                        t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
2720                }
2721        }
2722}
2723
2724static bool libbpf_needs_btf(const struct bpf_object *obj)
2725{
2726        return obj->efile.btf_maps_shndx >= 0 ||
2727               obj->efile.st_ops_shndx >= 0 ||
2728               obj->nr_extern > 0;
2729}
2730
2731static bool kernel_needs_btf(const struct bpf_object *obj)
2732{
2733        return obj->efile.st_ops_shndx >= 0;
2734}
2735
2736static int bpf_object__init_btf(struct bpf_object *obj,
2737                                Elf_Data *btf_data,
2738                                Elf_Data *btf_ext_data)
2739{
2740        int err = -ENOENT;
2741
2742        if (btf_data) {
2743                obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2744                err = libbpf_get_error(obj->btf);
2745                if (err) {
2746                        obj->btf = NULL;
2747                        pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
2748                        goto out;
2749                }
2750                /* enforce 8-byte pointers for BPF-targeted BTFs */
2751                btf__set_pointer_size(obj->btf, 8);
2752        }
2753        if (btf_ext_data) {
2754                struct btf_ext_info *ext_segs[3];
2755                int seg_num, sec_num;
2756
2757                if (!obj->btf) {
2758                        pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2759                                 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2760                        goto out;
2761                }
2762                obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
2763                err = libbpf_get_error(obj->btf_ext);
2764                if (err) {
2765                        pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
2766                                BTF_EXT_ELF_SEC, err);
2767                        obj->btf_ext = NULL;
2768                        goto out;
2769                }
2770
2771                /* setup .BTF.ext to ELF section mapping */
2772                ext_segs[0] = &obj->btf_ext->func_info;
2773                ext_segs[1] = &obj->btf_ext->line_info;
2774                ext_segs[2] = &obj->btf_ext->core_relo_info;
2775                for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
2776                        struct btf_ext_info *seg = ext_segs[seg_num];
2777                        const struct btf_ext_info_sec *sec;
2778                        const char *sec_name;
2779                        Elf_Scn *scn;
2780
2781                        if (seg->sec_cnt == 0)
2782                                continue;
2783
2784                        seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
2785                        if (!seg->sec_idxs) {
2786                                err = -ENOMEM;
2787                                goto out;
2788                        }
2789
2790                        sec_num = 0;
2791                        for_each_btf_ext_sec(seg, sec) {
2792                                /* preventively increment index to avoid doing
2793                                 * this before every continue below
2794                                 */
2795                                sec_num++;
2796
2797                                sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
2798                                if (str_is_empty(sec_name))
2799                                        continue;
2800                                scn = elf_sec_by_name(obj, sec_name);
2801                                if (!scn)
2802                                        continue;
2803
2804                                seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
2805                        }
2806                }
2807        }
2808out:
2809        if (err && libbpf_needs_btf(obj)) {
2810                pr_warn("BTF is required, but is missing or corrupted.\n");
2811                return err;
2812        }
2813        return 0;
2814}
2815
2816static int compare_vsi_off(const void *_a, const void *_b)
2817{
2818        const struct btf_var_secinfo *a = _a;
2819        const struct btf_var_secinfo *b = _b;
2820
2821        return a->offset - b->offset;
2822}
2823
2824static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
2825                             struct btf_type *t)
2826{
2827        __u32 size = 0, off = 0, i, vars = btf_vlen(t);
2828        const char *name = btf__name_by_offset(btf, t->name_off);
2829        const struct btf_type *t_var;
2830        struct btf_var_secinfo *vsi;
2831        const struct btf_var *var;
2832        int ret;
2833
2834        if (!name) {
2835                pr_debug("No name found in string section for DATASEC kind.\n");
2836                return -ENOENT;
2837        }
2838
2839        /* .extern datasec size and var offsets were set correctly during
2840         * extern collection step, so just skip straight to sorting variables
2841         */
2842        if (t->size)
2843                goto sort_vars;
2844
2845        ret = find_elf_sec_sz(obj, name, &size);
2846        if (ret || !size) {
2847                pr_debug("Invalid size for section %s: %u bytes\n", name, size);
2848                return -ENOENT;
2849        }
2850
2851        t->size = size;
2852
2853        for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
2854                t_var = btf__type_by_id(btf, vsi->type);
2855                if (!t_var || !btf_is_var(t_var)) {
2856                        pr_debug("Non-VAR type seen in section %s\n", name);
2857                        return -EINVAL;
2858                }
2859
2860                var = btf_var(t_var);
2861                if (var->linkage == BTF_VAR_STATIC)
2862                        continue;
2863
2864                name = btf__name_by_offset(btf, t_var->name_off);
2865                if (!name) {
2866                        pr_debug("No name found in string section for VAR kind\n");
2867                        return -ENOENT;
2868                }
2869
2870                ret = find_elf_var_offset(obj, name, &off);
2871                if (ret) {
2872                        pr_debug("No offset found in symbol table for VAR %s\n",
2873                                 name);
2874                        return -ENOENT;
2875                }
2876
2877                vsi->offset = off;
2878        }
2879
2880sort_vars:
2881        qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
2882        return 0;
2883}
2884
2885static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
2886{
2887        int err = 0;
2888        __u32 i, n = btf__type_cnt(btf);
2889
2890        for (i = 1; i < n; i++) {
2891                struct btf_type *t = btf_type_by_id(btf, i);
2892
2893                /* Loader needs to fix up some of the things compiler
2894                 * couldn't get its hands on while emitting BTF. This
2895                 * is section size and global variable offset. We use
2896                 * the info from the ELF itself for this purpose.
2897                 */
2898                if (btf_is_datasec(t)) {
2899                        err = btf_fixup_datasec(obj, btf, t);
2900                        if (err)
2901                                break;
2902                }
2903        }
2904
2905        return libbpf_err(err);
2906}
2907
2908int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
2909{
2910        return btf_finalize_data(obj, btf);
2911}
2912
2913static int bpf_object__finalize_btf(struct bpf_object *obj)
2914{
2915        int err;
2916
2917        if (!obj->btf)
2918                return 0;
2919
2920        err = btf_finalize_data(obj, obj->btf);
2921        if (err) {
2922                pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2923                return err;
2924        }
2925
2926        return 0;
2927}
2928
2929static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2930{
2931        if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2932            prog->type == BPF_PROG_TYPE_LSM)
2933                return true;
2934
2935        /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2936         * also need vmlinux BTF
2937         */
2938        if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2939                return true;
2940
2941        return false;
2942}
2943
2944static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2945{
2946        struct bpf_program *prog;
2947        int i;
2948
2949        /* CO-RE relocations need kernel BTF, only when btf_custom_path
2950         * is not specified
2951         */
2952        if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
2953                return true;
2954
2955        /* Support for typed ksyms needs kernel BTF */
2956        for (i = 0; i < obj->nr_extern; i++) {
2957                const struct extern_desc *ext;
2958
2959                ext = &obj->externs[i];
2960                if (ext->type == EXT_KSYM && ext->ksym.type_id)
2961                        return true;
2962        }
2963
2964        bpf_object__for_each_program(prog, obj) {
2965                if (!prog->autoload)
2966                        continue;
2967                if (prog_needs_vmlinux_btf(prog))
2968                        return true;
2969        }
2970
2971        return false;
2972}
2973
2974static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2975{
2976        int err;
2977
2978        /* btf_vmlinux could be loaded earlier */
2979        if (obj->btf_vmlinux || obj->gen_loader)
2980                return 0;
2981
2982        if (!force && !obj_needs_vmlinux_btf(obj))
2983                return 0;
2984
2985        obj->btf_vmlinux = btf__load_vmlinux_btf();
2986        err = libbpf_get_error(obj->btf_vmlinux);
2987        if (err) {
2988                pr_warn("Error loading vmlinux BTF: %d\n", err);
2989                obj->btf_vmlinux = NULL;
2990                return err;
2991        }
2992        return 0;
2993}
2994
2995static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2996{
2997        struct btf *kern_btf = obj->btf;
2998        bool btf_mandatory, sanitize;
2999        int i, err = 0;
3000
3001        if (!obj->btf)
3002                return 0;
3003
3004        if (!kernel_supports(obj, FEAT_BTF)) {
3005                if (kernel_needs_btf(obj)) {
3006                        err = -EOPNOTSUPP;
3007                        goto report;
3008                }
3009                pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3010                return 0;
3011        }
3012
3013        /* Even though some subprogs are global/weak, user might prefer more
3014         * permissive BPF verification process that BPF verifier performs for
3015         * static functions, taking into account more context from the caller
3016         * functions. In such case, they need to mark such subprogs with
3017         * __attribute__((visibility("hidden"))) and libbpf will adjust
3018         * corresponding FUNC BTF type to be marked as static and trigger more
3019         * involved BPF verification process.
3020         */
3021        for (i = 0; i < obj->nr_programs; i++) {
3022                struct bpf_program *prog = &obj->programs[i];
3023                struct btf_type *t;
3024                const char *name;
3025                int j, n;
3026
3027                if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3028                        continue;
3029
3030                n = btf__type_cnt(obj->btf);
3031                for (j = 1; j < n; j++) {
3032                        t = btf_type_by_id(obj->btf, j);
3033                        if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3034                                continue;
3035
3036                        name = btf__str_by_offset(obj->btf, t->name_off);
3037                        if (strcmp(name, prog->name) != 0)
3038                                continue;
3039
3040                        t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3041                        break;
3042                }
3043        }
3044
3045        sanitize = btf_needs_sanitization(obj);
3046        if (sanitize) {
3047                const void *raw_data;
3048                __u32 sz;
3049
3050                /* clone BTF to sanitize a copy and leave the original intact */
3051                raw_data = btf__raw_data(obj->btf, &sz);
3052                kern_btf = btf__new(raw_data, sz);
3053                err = libbpf_get_error(kern_btf);
3054                if (err)
3055                        return err;
3056
3057                /* enforce 8-byte pointers for BPF-targeted BTFs */
3058                btf__set_pointer_size(obj->btf, 8);
3059                bpf_object__sanitize_btf(obj, kern_btf);
3060        }
3061
3062        if (obj->gen_loader) {
3063                __u32 raw_size = 0;
3064                const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3065
3066                if (!raw_data)
3067                        return -ENOMEM;
3068                bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3069                /* Pretend to have valid FD to pass various fd >= 0 checks.
3070                 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3071                 */
3072                btf__set_fd(kern_btf, 0);
3073        } else {
3074                /* currently BPF_BTF_LOAD only supports log_level 1 */
3075                err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3076                                           obj->log_level ? 1 : 0);
3077        }
3078        if (sanitize) {
3079                if (!err) {
3080                        /* move fd to libbpf's BTF */
3081                        btf__set_fd(obj->btf, btf__fd(kern_btf));
3082                        btf__set_fd(kern_btf, -1);
3083                }
3084                btf__free(kern_btf);
3085        }
3086report:
3087        if (err) {
3088                btf_mandatory = kernel_needs_btf(obj);
3089                pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3090                        btf_mandatory ? "BTF is mandatory, can't proceed."
3091                                      : "BTF is optional, ignoring.");
3092                if (!btf_mandatory)
3093                        err = 0;
3094        }
3095        return err;
3096}
3097
3098static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3099{
3100        const char *name;
3101
3102        name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3103        if (!name) {
3104                pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3105                        off, obj->path, elf_errmsg(-1));
3106                return NULL;
3107        }
3108
3109        return name;
3110}
3111
3112static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3113{
3114        const char *name;
3115
3116        name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3117        if (!name) {
3118                pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3119                        off, obj->path, elf_errmsg(-1));
3120                return NULL;
3121        }
3122
3123        return name;
3124}
3125
3126static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3127{
3128        Elf_Scn *scn;
3129
3130        scn = elf_getscn(obj->efile.elf, idx);
3131        if (!scn) {
3132                pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3133                        idx, obj->path, elf_errmsg(-1));
3134                return NULL;
3135        }
3136        return scn;
3137}
3138
3139static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3140{
3141        Elf_Scn *scn = NULL;
3142        Elf *elf = obj->efile.elf;
3143        const char *sec_name;
3144
3145        while ((scn = elf_nextscn(elf, scn)) != NULL) {
3146                sec_name = elf_sec_name(obj, scn);
3147                if (!sec_name)
3148                        return NULL;
3149
3150                if (strcmp(sec_name, name) != 0)
3151                        continue;
3152
3153                return scn;
3154        }
3155        return NULL;
3156}
3157
3158static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3159{
3160        Elf64_Shdr *shdr;
3161
3162        if (!scn)
3163                return NULL;
3164
3165        shdr = elf64_getshdr(scn);
3166        if (!shdr) {
3167                pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3168                        elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3169                return NULL;
3170        }
3171
3172        return shdr;
3173}
3174
3175static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3176{
3177        const char *name;
3178        Elf64_Shdr *sh;
3179
3180        if (!scn)
3181                return NULL;
3182
3183        sh = elf_sec_hdr(obj, scn);
3184        if (!sh)
3185                return NULL;
3186
3187        name = elf_sec_str(obj, sh->sh_name);
3188        if (!name) {
3189                pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3190                        elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3191                return NULL;
3192        }
3193
3194        return name;
3195}
3196
3197static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3198{
3199        Elf_Data *data;
3200
3201        if (!scn)
3202                return NULL;
3203
3204        data = elf_getdata(scn, 0);
3205        if (!data) {
3206                pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3207                        elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3208                        obj->path, elf_errmsg(-1));
3209                return NULL;
3210        }
3211
3212        return data;
3213}
3214
3215static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3216{
3217        if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3218                return NULL;
3219
3220        return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3221}
3222
3223static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3224{
3225        if (idx >= data->d_size / sizeof(Elf64_Rel))
3226                return NULL;
3227
3228        return (Elf64_Rel *)data->d_buf + idx;
3229}
3230
3231static bool is_sec_name_dwarf(const char *name)
3232{
3233        /* approximation, but the actual list is too long */
3234        return str_has_pfx(name, ".debug_");
3235}
3236
3237static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3238{
3239        /* no special handling of .strtab */
3240        if (hdr->sh_type == SHT_STRTAB)
3241                return true;
3242
3243        /* ignore .llvm_addrsig section as well */
3244        if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3245                return true;
3246
3247        /* no subprograms will lead to an empty .text section, ignore it */
3248        if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3249            strcmp(name, ".text") == 0)
3250                return true;
3251
3252        /* DWARF sections */
3253        if (is_sec_name_dwarf(name))
3254                return true;
3255
3256        if (str_has_pfx(name, ".rel")) {
3257                name += sizeof(".rel") - 1;
3258                /* DWARF section relocations */
3259                if (is_sec_name_dwarf(name))
3260                        return true;
3261
3262                /* .BTF and .BTF.ext don't need relocations */
3263                if (strcmp(name, BTF_ELF_SEC) == 0 ||
3264                    strcmp(name, BTF_EXT_ELF_SEC) == 0)
3265                        return true;
3266        }
3267
3268        return false;
3269}
3270
3271static int cmp_progs(const void *_a, const void *_b)
3272{
3273        const struct bpf_program *a = _a;
3274        const struct bpf_program *b = _b;
3275
3276        if (a->sec_idx != b->sec_idx)
3277                return a->sec_idx < b->sec_idx ? -1 : 1;
3278
3279        /* sec_insn_off can't be the same within the section */
3280        return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3281}
3282
3283static int bpf_object__elf_collect(struct bpf_object *obj)
3284{
3285        struct elf_sec_desc *sec_desc;
3286        Elf *elf = obj->efile.elf;
3287        Elf_Data *btf_ext_data = NULL;
3288        Elf_Data *btf_data = NULL;
3289        int idx = 0, err = 0;
3290        const char *name;
3291        Elf_Data *data;
3292        Elf_Scn *scn;
3293        Elf64_Shdr *sh;
3294
3295        /* ELF section indices are 0-based, but sec #0 is special "invalid"
3296         * section. e_shnum does include sec #0, so e_shnum is the necessary
3297         * size of an array to keep all the sections.
3298         */
3299        obj->efile.sec_cnt = obj->efile.ehdr->e_shnum;
3300        obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3301        if (!obj->efile.secs)
3302                return -ENOMEM;
3303
3304        /* a bunch of ELF parsing functionality depends on processing symbols,
3305         * so do the first pass and find the symbol table
3306         */
3307        scn = NULL;
3308        while ((scn = elf_nextscn(elf, scn)) != NULL) {
3309                sh = elf_sec_hdr(obj, scn);
3310                if (!sh)
3311                        return -LIBBPF_ERRNO__FORMAT;
3312
3313                if (sh->sh_type == SHT_SYMTAB) {
3314                        if (obj->efile.symbols) {
3315                                pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3316                                return -LIBBPF_ERRNO__FORMAT;
3317                        }
3318
3319                        data = elf_sec_data(obj, scn);
3320                        if (!data)
3321                                return -LIBBPF_ERRNO__FORMAT;
3322
3323                        idx = elf_ndxscn(scn);
3324
3325                        obj->efile.symbols = data;
3326                        obj->efile.symbols_shndx = idx;
3327                        obj->efile.strtabidx = sh->sh_link;
3328                }
3329        }
3330
3331        if (!obj->efile.symbols) {
3332                pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3333                        obj->path);
3334                return -ENOENT;
3335        }
3336
3337        scn = NULL;
3338        while ((scn = elf_nextscn(elf, scn)) != NULL) {
3339                idx = elf_ndxscn(scn);
3340                sec_desc = &obj->efile.secs[idx];
3341
3342                sh = elf_sec_hdr(obj, scn);
3343                if (!sh)
3344                        return -LIBBPF_ERRNO__FORMAT;
3345
3346                name = elf_sec_str(obj, sh->sh_name);
3347                if (!name)
3348                        return -LIBBPF_ERRNO__FORMAT;
3349
3350                if (ignore_elf_section(sh, name))
3351                        continue;
3352
3353                data = elf_sec_data(obj, scn);
3354                if (!data)
3355                        return -LIBBPF_ERRNO__FORMAT;
3356
3357                pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3358                         idx, name, (unsigned long)data->d_size,
3359                         (int)sh->sh_link, (unsigned long)sh->sh_flags,
3360                         (int)sh->sh_type);
3361
3362                if (strcmp(name, "license") == 0) {
3363                        err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3364                        if (err)
3365                                return err;
3366                } else if (strcmp(name, "version") == 0) {
3367                        err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3368                        if (err)
3369                                return err;
3370                } else if (strcmp(name, "maps") == 0) {
3371                        obj->efile.maps_shndx = idx;
3372                } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3373                        obj->efile.btf_maps_shndx = idx;
3374                } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3375                        if (sh->sh_type != SHT_PROGBITS)
3376                                return -LIBBPF_ERRNO__FORMAT;
3377                        btf_data = data;
3378                } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3379                        if (sh->sh_type != SHT_PROGBITS)
3380                                return -LIBBPF_ERRNO__FORMAT;
3381                        btf_ext_data = data;
3382                } else if (sh->sh_type == SHT_SYMTAB) {
3383                        /* already processed during the first pass above */
3384                } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3385                        if (sh->sh_flags & SHF_EXECINSTR) {
3386                                if (strcmp(name, ".text") == 0)
3387                                        obj->efile.text_shndx = idx;
3388                                err = bpf_object__add_programs(obj, data, name, idx);
3389                                if (err)
3390                                        return err;
3391                        } else if (strcmp(name, DATA_SEC) == 0 ||
3392                                   str_has_pfx(name, DATA_SEC ".")) {
3393                                sec_desc->sec_type = SEC_DATA;
3394                                sec_desc->shdr = sh;
3395                                sec_desc->data = data;
3396                        } else if (strcmp(name, RODATA_SEC) == 0 ||
3397                                   str_has_pfx(name, RODATA_SEC ".")) {
3398                                sec_desc->sec_type = SEC_RODATA;
3399                                sec_desc->shdr = sh;
3400                                sec_desc->data = data;
3401                        } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
3402                                obj->efile.st_ops_data = data;
3403                                obj->efile.st_ops_shndx = idx;
3404                        } else {
3405                                pr_info("elf: skipping unrecognized data section(%d) %s\n",
3406                                        idx, name);
3407                        }
3408                } else if (sh->sh_type == SHT_REL) {
3409                        int targ_sec_idx = sh->sh_info; /* points to other section */
3410
3411                        if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3412                            targ_sec_idx >= obj->efile.sec_cnt)
3413                                return -LIBBPF_ERRNO__FORMAT;
3414
3415                        /* Only do relo for section with exec instructions */
3416                        if (!section_have_execinstr(obj, targ_sec_idx) &&
3417                            strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3418                            strcmp(name, ".rel" MAPS_ELF_SEC)) {
3419                                pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3420                                        idx, name, targ_sec_idx,
3421                                        elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3422                                continue;
3423                        }
3424
3425                        sec_desc->sec_type = SEC_RELO;
3426                        sec_desc->shdr = sh;
3427                        sec_desc->data = data;
3428                } else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
3429                        sec_desc->sec_type = SEC_BSS;
3430                        sec_desc->shdr = sh;
3431                        sec_desc->data = data;
3432                } else {
3433                        pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3434                                (size_t)sh->sh_size);
3435                }
3436        }
3437
3438        if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3439                pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3440                return -LIBBPF_ERRNO__FORMAT;
3441        }
3442
3443        /* sort BPF programs by section name and in-section instruction offset
3444         * for faster search */
3445        if (obj->nr_programs)
3446                qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3447
3448        return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3449}
3450
3451static bool sym_is_extern(const Elf64_Sym *sym)
3452{
3453        int bind = ELF64_ST_BIND(sym->st_info);
3454        /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3455        return sym->st_shndx == SHN_UNDEF &&
3456               (bind == STB_GLOBAL || bind == STB_WEAK) &&
3457               ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3458}
3459
3460static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3461{
3462        int bind = ELF64_ST_BIND(sym->st_info);
3463        int type = ELF64_ST_TYPE(sym->st_info);
3464
3465        /* in .text section */
3466        if (sym->st_shndx != text_shndx)
3467                return false;
3468
3469        /* local function */
3470        if (bind == STB_LOCAL && type == STT_SECTION)
3471                return true;
3472
3473        /* global function */
3474        return bind == STB_GLOBAL && type == STT_FUNC;
3475}
3476
3477static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3478{
3479        const struct btf_type *t;
3480        const char *tname;
3481        int i, n;
3482
3483        if (!btf)
3484                return -ESRCH;
3485
3486        n = btf__type_cnt(btf);
3487        for (i = 1; i < n; i++) {
3488                t = btf__type_by_id(btf, i);
3489
3490                if (!btf_is_var(t) && !btf_is_func(t))
3491                        continue;
3492
3493                tname = btf__name_by_offset(btf, t->name_off);
3494                if (strcmp(tname, ext_name))
3495                        continue;
3496
3497                if (btf_is_var(t) &&
3498                    btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3499                        return -EINVAL;
3500
3501                if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3502                        return -EINVAL;
3503
3504                return i;
3505        }
3506
3507        return -ENOENT;
3508}
3509
3510static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3511        const struct btf_var_secinfo *vs;
3512        const struct btf_type *t;
3513        int i, j, n;
3514
3515        if (!btf)
3516                return -ESRCH;
3517
3518        n = btf__type_cnt(btf);
3519        for (i = 1; i < n; i++) {
3520                t = btf__type_by_id(btf, i);
3521
3522                if (!btf_is_datasec(t))
3523                        continue;
3524
3525                vs = btf_var_secinfos(t);
3526                for (j = 0; j < btf_vlen(t); j++, vs++) {
3527                        if (vs->type == ext_btf_id)
3528                                return i;
3529                }
3530        }
3531
3532        return -ENOENT;
3533}
3534
3535static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3536                                     bool *is_signed)
3537{
3538        const struct btf_type *t;
3539        const char *name;
3540
3541        t = skip_mods_and_typedefs(btf, id, NULL);
3542        name = btf__name_by_offset(btf, t->name_off);
3543
3544        if (is_signed)
3545                *is_signed = false;
3546        switch (btf_kind(t)) {
3547        case BTF_KIND_INT: {
3548                int enc = btf_int_encoding(t);
3549
3550                if (enc & BTF_INT_BOOL)
3551                        return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3552                if (is_signed)
3553                        *is_signed = enc & BTF_INT_SIGNED;
3554                if (t->size == 1)
3555                        return KCFG_CHAR;
3556                if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3557                        return KCFG_UNKNOWN;
3558                return KCFG_INT;
3559        }
3560        case BTF_KIND_ENUM:
3561                if (t->size != 4)
3562                        return KCFG_UNKNOWN;
3563                if (strcmp(name, "libbpf_tristate"))
3564                        return KCFG_UNKNOWN;
3565                return KCFG_TRISTATE;
3566        case BTF_KIND_ARRAY:
3567                if (btf_array(t)->nelems == 0)
3568                        return KCFG_UNKNOWN;
3569                if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3570                        return KCFG_UNKNOWN;
3571                return KCFG_CHAR_ARR;
3572        default:
3573                return KCFG_UNKNOWN;
3574        }
3575}
3576
3577static int cmp_externs(const void *_a, const void *_b)
3578{
3579        const struct extern_desc *a = _a;
3580        const struct extern_desc *b = _b;
3581
3582        if (a->type != b->type)
3583                return a->type < b->type ? -1 : 1;
3584
3585        if (a->type == EXT_KCFG) {
3586                /* descending order by alignment requirements */
3587                if (a->kcfg.align != b->kcfg.align)
3588                        return a->kcfg.align > b->kcfg.align ? -1 : 1;
3589                /* ascending order by size, within same alignment class */
3590                if (a->kcfg.sz != b->kcfg.sz)
3591                        return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3592        }
3593
3594        /* resolve ties by name */
3595        return strcmp(a->name, b->name);
3596}
3597
3598static int find_int_btf_id(const struct btf *btf)
3599{
3600        const struct btf_type *t;
3601        int i, n;
3602
3603        n = btf__type_cnt(btf);
3604        for (i = 1; i < n; i++) {
3605                t = btf__type_by_id(btf, i);
3606
3607                if (btf_is_int(t) && btf_int_bits(t) == 32)
3608                        return i;
3609        }
3610
3611        return 0;
3612}
3613
3614static int add_dummy_ksym_var(struct btf *btf)
3615{
3616        int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
3617        const struct btf_var_secinfo *vs;
3618        const struct btf_type *sec;
3619
3620        if (!btf)
3621                return 0;
3622
3623        sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
3624                                            BTF_KIND_DATASEC);
3625        if (sec_btf_id < 0)
3626                return 0;
3627
3628        sec = btf__type_by_id(btf, sec_btf_id);
3629        vs = btf_var_secinfos(sec);
3630        for (i = 0; i < btf_vlen(sec); i++, vs++) {
3631                const struct btf_type *vt;
3632
3633                vt = btf__type_by_id(btf, vs->type);
3634                if (btf_is_func(vt))
3635                        break;
3636        }
3637
3638        /* No func in ksyms sec.  No need to add dummy var. */
3639        if (i == btf_vlen(sec))
3640                return 0;
3641
3642        int_btf_id = find_int_btf_id(btf);
3643        dummy_var_btf_id = btf__add_var(btf,
3644                                        "dummy_ksym",
3645                                        BTF_VAR_GLOBAL_ALLOCATED,
3646                                        int_btf_id);
3647        if (dummy_var_btf_id < 0)
3648                pr_warn("cannot create a dummy_ksym var\n");
3649
3650        return dummy_var_btf_id;
3651}
3652
3653static int bpf_object__collect_externs(struct bpf_object *obj)
3654{
3655        struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3656        const struct btf_type *t;
3657        struct extern_desc *ext;
3658        int i, n, off, dummy_var_btf_id;
3659        const char *ext_name, *sec_name;
3660        Elf_Scn *scn;
3661        Elf64_Shdr *sh;
3662
3663        if (!obj->efile.symbols)
3664                return 0;
3665
3666        scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3667        sh = elf_sec_hdr(obj, scn);
3668        if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
3669                return -LIBBPF_ERRNO__FORMAT;
3670
3671        dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
3672        if (dummy_var_btf_id < 0)
3673                return dummy_var_btf_id;
3674
3675        n = sh->sh_size / sh->sh_entsize;
3676        pr_debug("looking for externs among %d symbols...\n", n);
3677
3678        for (i = 0; i < n; i++) {
3679                Elf64_Sym *sym = elf_sym_by_idx(obj, i);
3680
3681                if (!sym)
3682                        return -LIBBPF_ERRNO__FORMAT;
3683                if (!sym_is_extern(sym))
3684                        continue;
3685                ext_name = elf_sym_str(obj, sym->st_name);
3686                if (!ext_name || !ext_name[0])
3687                        continue;
3688
3689                ext = obj->externs;
3690                ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3691                if (!ext)
3692                        return -ENOMEM;
3693                obj->externs = ext;
3694                ext = &ext[obj->nr_extern];
3695                memset(ext, 0, sizeof(*ext));
3696                obj->nr_extern++;
3697
3698                ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3699                if (ext->btf_id <= 0) {
3700                        pr_warn("failed to find BTF for extern '%s': %d\n",
3701                                ext_name, ext->btf_id);
3702                        return ext->btf_id;
3703                }
3704                t = btf__type_by_id(obj->btf, ext->btf_id);
3705                ext->name = btf__name_by_offset(obj->btf, t->name_off);
3706                ext->sym_idx = i;
3707                ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
3708
3709                ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3710                if (ext->sec_btf_id <= 0) {
3711                        pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3712                                ext_name, ext->btf_id, ext->sec_btf_id);
3713                        return ext->sec_btf_id;
3714                }
3715                sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3716                sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3717
3718                if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3719                        if (btf_is_func(t)) {
3720                                pr_warn("extern function %s is unsupported under %s section\n",
3721                                        ext->name, KCONFIG_SEC);
3722                                return -ENOTSUP;
3723                        }
3724                        kcfg_sec = sec;
3725                        ext->type = EXT_KCFG;
3726                        ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3727                        if (ext->kcfg.sz <= 0) {
3728                                pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3729                                        ext_name, ext->kcfg.sz);
3730                                return ext->kcfg.sz;
3731                        }
3732                        ext->kcfg.align = btf__align_of(obj->btf, t->type);
3733                        if (ext->kcfg.align <= 0) {
3734                                pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3735                                        ext_name, ext->kcfg.align);
3736                                return -EINVAL;
3737                        }
3738                        ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3739                                                        &ext->kcfg.is_signed);
3740                        if (ext->kcfg.type == KCFG_UNKNOWN) {
3741                                pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3742                                return -ENOTSUP;
3743                        }
3744                } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3745                        ksym_sec = sec;
3746                        ext->type = EXT_KSYM;
3747                        skip_mods_and_typedefs(obj->btf, t->type,
3748                                               &ext->ksym.type_id);
3749                } else {
3750                        pr_warn("unrecognized extern section '%s'\n", sec_name);
3751                        return -ENOTSUP;
3752                }
3753        }
3754        pr_debug("collected %d externs total\n", obj->nr_extern);
3755
3756        if (!obj->nr_extern)
3757                return 0;
3758
3759        /* sort externs by type, for kcfg ones also by (align, size, name) */
3760        qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3761
3762        /* for .ksyms section, we need to turn all externs into allocated
3763         * variables in BTF to pass kernel verification; we do this by
3764         * pretending that each extern is a 8-byte variable
3765         */
3766        if (ksym_sec) {
3767                /* find existing 4-byte integer type in BTF to use for fake
3768                 * extern variables in DATASEC
3769                 */
3770                int int_btf_id = find_int_btf_id(obj->btf);
3771                /* For extern function, a dummy_var added earlier
3772                 * will be used to replace the vs->type and
3773                 * its name string will be used to refill
3774                 * the missing param's name.
3775                 */
3776                const struct btf_type *dummy_var;
3777
3778                dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
3779                for (i = 0; i < obj->nr_extern; i++) {
3780                        ext = &obj->externs[i];
3781                        if (ext->type != EXT_KSYM)
3782                                continue;
3783                        pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3784                                 i, ext->sym_idx, ext->name);
3785                }
3786
3787                sec = ksym_sec;
3788                n = btf_vlen(sec);
3789                for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3790                        struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3791                        struct btf_type *vt;
3792
3793                        vt = (void *)btf__type_by_id(obj->btf, vs->type);
3794                        ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3795                        ext = find_extern_by_name(obj, ext_name);
3796                        if (!ext) {
3797                                pr_warn("failed to find extern definition for BTF %s '%s'\n",
3798                                        btf_kind_str(vt), ext_name);
3799                                return -ESRCH;
3800                        }
3801                        if (btf_is_func(vt)) {
3802                                const struct btf_type *func_proto;
3803                                struct btf_param *param;
3804                                int j;
3805
3806                                func_proto = btf__type_by_id(obj->btf,
3807                                                             vt->type);
3808                                param = btf_params(func_proto);
3809                                /* Reuse the dummy_var string if the
3810                                 * func proto does not have param name.
3811                                 */
3812                                for (j = 0; j < btf_vlen(func_proto); j++)
3813                                        if (param[j].type && !param[j].name_off)
3814                                                param[j].name_off =
3815                                                        dummy_var->name_off;
3816                                vs->type = dummy_var_btf_id;
3817                                vt->info &= ~0xffff;
3818                                vt->info |= BTF_FUNC_GLOBAL;
3819                        } else {
3820                                btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3821                                vt->type = int_btf_id;
3822                        }
3823                        vs->offset = off;
3824                        vs->size = sizeof(int);
3825                }
3826                sec->size = off;
3827        }
3828
3829        if (kcfg_sec) {
3830                sec = kcfg_sec;
3831                /* for kcfg externs calculate their offsets within a .kconfig map */
3832                off = 0;
3833                for (i = 0; i < obj->nr_extern; i++) {
3834                        ext = &obj->externs[i];
3835                        if (ext->type != EXT_KCFG)
3836                                continue;
3837
3838                        ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3839                        off = ext->kcfg.data_off + ext->kcfg.sz;
3840                        pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3841                                 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3842                }
3843                sec->size = off;
3844                n = btf_vlen(sec);
3845                for (i = 0; i < n; i++) {
3846                        struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3847
3848                        t = btf__type_by_id(obj->btf, vs->type);
3849                        ext_name = btf__name_by_offset(obj->btf, t->name_off);
3850                        ext = find_extern_by_name(obj, ext_name);
3851                        if (!ext) {
3852                                pr_warn("failed to find extern definition for BTF var '%s'\n",
3853                                        ext_name);
3854                                return -ESRCH;
3855                        }
3856                        btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3857                        vs->offset = ext->kcfg.data_off;
3858                }
3859        }
3860        return 0;
3861}
3862
3863struct bpf_program *
3864bpf_object__find_program_by_title(const struct bpf_object *obj,
3865                                  const char *title)
3866{
3867        struct bpf_program *pos;
3868
3869        bpf_object__for_each_program(pos, obj) {
3870                if (pos->sec_name && !strcmp(pos->sec_name, title))
3871                        return pos;
3872        }
3873        return errno = ENOENT, NULL;
3874}
3875
3876static bool prog_is_subprog(const struct bpf_object *obj,
3877                            const struct bpf_program *prog)
3878{
3879        /* For legacy reasons, libbpf supports an entry-point BPF programs
3880         * without SEC() attribute, i.e., those in the .text section. But if
3881         * there are 2 or more such programs in the .text section, they all
3882         * must be subprograms called from entry-point BPF programs in
3883         * designated SEC()'tions, otherwise there is no way to distinguish
3884         * which of those programs should be loaded vs which are a subprogram.
3885         * Similarly, if there is a function/program in .text and at least one
3886         * other BPF program with custom SEC() attribute, then we just assume
3887         * .text programs are subprograms (even if they are not called from
3888         * other programs), because libbpf never explicitly supported mixing
3889         * SEC()-designated BPF programs and .text entry-point BPF programs.
3890         *
3891         * In libbpf 1.0 strict mode, we always consider .text
3892         * programs to be subprograms.
3893         */
3894
3895        if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
3896                return prog->sec_idx == obj->efile.text_shndx;
3897
3898        return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3899}
3900
3901struct bpf_program *
3902bpf_object__find_program_by_name(const struct bpf_object *obj,
3903                                 const char *name)
3904{
3905        struct bpf_program *prog;
3906
3907        bpf_object__for_each_program(prog, obj) {
3908                if (prog_is_subprog(obj, prog))
3909                        continue;
3910                if (!strcmp(prog->name, name))
3911                        return prog;
3912        }
3913        return errno = ENOENT, NULL;
3914}
3915
3916static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3917                                      int shndx)
3918{
3919        switch (obj->efile.secs[shndx].sec_type) {
3920        case SEC_BSS:
3921        case SEC_DATA:
3922        case SEC_RODATA:
3923                return true;
3924        default:
3925                return false;
3926        }
3927}
3928
3929static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3930                                      int shndx)
3931{
3932        return shndx == obj->efile.maps_shndx ||
3933               shndx == obj->efile.btf_maps_shndx;
3934}
3935
3936static enum libbpf_map_type
3937bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3938{
3939        if (shndx == obj->efile.symbols_shndx)
3940                return LIBBPF_MAP_KCONFIG;
3941
3942        switch (obj->efile.secs[shndx].sec_type) {
3943        case SEC_BSS:
3944                return LIBBPF_MAP_BSS;
3945        case SEC_DATA:
3946                return LIBBPF_MAP_DATA;
3947        case SEC_RODATA:
3948                return LIBBPF_MAP_RODATA;
3949        default:
3950                return LIBBPF_MAP_UNSPEC;
3951        }
3952}
3953
3954static int bpf_program__record_reloc(struct bpf_program *prog,
3955                                     struct reloc_desc *reloc_desc,
3956                                     __u32 insn_idx, const char *sym_name,
3957                                     const Elf64_Sym *sym, const Elf64_Rel *rel)
3958{
3959        struct bpf_insn *insn = &prog->insns[insn_idx];
3960        size_t map_idx, nr_maps = prog->obj->nr_maps;
3961        struct bpf_object *obj = prog->obj;
3962        __u32 shdr_idx = sym->st_shndx;
3963        enum libbpf_map_type type;
3964        const char *sym_sec_name;
3965        struct bpf_map *map;
3966
3967        if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
3968                pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3969                        prog->name, sym_name, insn_idx, insn->code);
3970                return -LIBBPF_ERRNO__RELOC;
3971        }
3972
3973        if (sym_is_extern(sym)) {
3974                int sym_idx = ELF64_R_SYM(rel->r_info);
3975                int i, n = obj->nr_extern;
3976                struct extern_desc *ext;
3977
3978                for (i = 0; i < n; i++) {
3979                        ext = &obj->externs[i];
3980                        if (ext->sym_idx == sym_idx)
3981                                break;
3982                }
3983                if (i >= n) {
3984                        pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3985                                prog->name, sym_name, sym_idx);
3986                        return -LIBBPF_ERRNO__RELOC;
3987                }
3988                pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3989                         prog->name, i, ext->name, ext->sym_idx, insn_idx);
3990                if (insn->code == (BPF_JMP | BPF_CALL))
3991                        reloc_desc->type = RELO_EXTERN_FUNC;
3992                else
3993                        reloc_desc->type = RELO_EXTERN_VAR;
3994                reloc_desc->insn_idx = insn_idx;
3995                reloc_desc->sym_off = i; /* sym_off stores extern index */
3996                return 0;
3997        }
3998
3999        /* sub-program call relocation */
4000        if (is_call_insn(insn)) {
4001                if (insn->src_reg != BPF_PSEUDO_CALL) {
4002                        pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4003                        return -LIBBPF_ERRNO__RELOC;
4004                }
4005                /* text_shndx can be 0, if no default "main" program exists */
4006                if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4007                        sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4008                        pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4009                                prog->name, sym_name, sym_sec_name);
4010                        return -LIBBPF_ERRNO__RELOC;
4011                }
4012                if (sym->st_value % BPF_INSN_SZ) {
4013                        pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4014                                prog->name, sym_name, (size_t)sym->st_value);
4015                        return -LIBBPF_ERRNO__RELOC;
4016                }
4017                reloc_desc->type = RELO_CALL;
4018                reloc_desc->insn_idx = insn_idx;
4019                reloc_desc->sym_off = sym->st_value;
4020                return 0;
4021        }
4022
4023        if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
4024                pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4025                        prog->name, sym_name, shdr_idx);
4026                return -LIBBPF_ERRNO__RELOC;
4027        }
4028
4029        /* loading subprog addresses */
4030        if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4031                /* global_func: sym->st_value = offset in the section, insn->imm = 0.
4032                 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4033                 */
4034                if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4035                        pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4036                                prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4037                        return -LIBBPF_ERRNO__RELOC;
4038                }
4039
4040                reloc_desc->type = RELO_SUBPROG_ADDR;
4041                reloc_desc->insn_idx = insn_idx;
4042                reloc_desc->sym_off = sym->st_value;
4043                return 0;
4044        }
4045
4046        type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4047        sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4048
4049        /* generic map reference relocation */
4050        if (type == LIBBPF_MAP_UNSPEC) {
4051                if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4052                        pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4053                                prog->name, sym_name, sym_sec_name);
4054                        return -LIBBPF_ERRNO__RELOC;
4055                }
4056                for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4057                        map = &obj->maps[map_idx];
4058                        if (map->libbpf_type != type ||
4059                            map->sec_idx != sym->st_shndx ||
4060                            map->sec_offset != sym->st_value)
4061                                continue;
4062                        pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4063                                 prog->name, map_idx, map->name, map->sec_idx,
4064                                 map->sec_offset, insn_idx);
4065                        break;
4066                }
4067                if (map_idx >= nr_maps) {
4068                        pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4069                                prog->name, sym_sec_name, (size_t)sym->st_value);
4070                        return -LIBBPF_ERRNO__RELOC;
4071                }
4072                reloc_desc->type = RELO_LD64;
4073                reloc_desc->insn_idx = insn_idx;
4074                reloc_desc->map_idx = map_idx;
4075                reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4076                return 0;
4077        }
4078
4079        /* global data map relocation */
4080        if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4081                pr_warn("prog '%s': bad data relo against section '%s'\n",
4082                        prog->name, sym_sec_name);
4083                return -LIBBPF_ERRNO__RELOC;
4084        }
4085        for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4086                map = &obj->maps[map_idx];
4087                if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4088                        continue;
4089                pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4090                         prog->name, map_idx, map->name, map->sec_idx,
4091                         map->sec_offset, insn_idx);
4092                break;
4093        }
4094        if (map_idx >= nr_maps) {
4095                pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4096                        prog->name, sym_sec_name);
4097                return -LIBBPF_ERRNO__RELOC;
4098        }
4099
4100        reloc_desc->type = RELO_DATA;
4101        reloc_desc->insn_idx = insn_idx;
4102        reloc_desc->map_idx = map_idx;
4103        reloc_desc->sym_off = sym->st_value;
4104        return 0;
4105}
4106
4107static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4108{
4109        return insn_idx >= prog->sec_insn_off &&
4110               insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4111}
4112
4113static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4114                                                 size_t sec_idx, size_t insn_idx)
4115{
4116        int l = 0, r = obj->nr_programs - 1, m;
4117        struct bpf_program *prog;
4118
4119        while (l < r) {
4120                m = l + (r - l + 1) / 2;
4121                prog = &obj->programs[m];
4122
4123                if (prog->sec_idx < sec_idx ||
4124                    (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4125                        l = m;
4126                else
4127                        r = m - 1;
4128        }
4129        /* matching program could be at index l, but it still might be the
4130         * wrong one, so we need to double check conditions for the last time
4131         */
4132        prog = &obj->programs[l];
4133        if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4134                return prog;
4135        return NULL;
4136}
4137
4138static int
4139bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4140{
4141        const char *relo_sec_name, *sec_name;
4142        size_t sec_idx = shdr->sh_info, sym_idx;
4143        struct bpf_program *prog;
4144        struct reloc_desc *relos;
4145        int err, i, nrels;
4146        const char *sym_name;
4147        __u32 insn_idx;
4148        Elf_Scn *scn;
4149        Elf_Data *scn_data;
4150        Elf64_Sym *sym;
4151        Elf64_Rel *rel;
4152
4153        if (sec_idx >= obj->efile.sec_cnt)
4154                return -EINVAL;
4155
4156        scn = elf_sec_by_idx(obj, sec_idx);
4157        scn_data = elf_sec_data(obj, scn);
4158
4159        relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4160        sec_name = elf_sec_name(obj, scn);
4161        if (!relo_sec_name || !sec_name)
4162                return -EINVAL;
4163
4164        pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4165                 relo_sec_name, sec_idx, sec_name);
4166        nrels = shdr->sh_size / shdr->sh_entsize;
4167
4168        for (i = 0; i < nrels; i++) {
4169                rel = elf_rel_by_idx(data, i);
4170                if (!rel) {
4171                        pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4172                        return -LIBBPF_ERRNO__FORMAT;
4173                }
4174
4175                sym_idx = ELF64_R_SYM(rel->r_info);
4176                sym = elf_sym_by_idx(obj, sym_idx);
4177                if (!sym) {
4178                        pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4179                                relo_sec_name, sym_idx, i);
4180                        return -LIBBPF_ERRNO__FORMAT;
4181                }
4182
4183                if (sym->st_shndx >= obj->efile.sec_cnt) {
4184                        pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4185                                relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4186                        return -LIBBPF_ERRNO__FORMAT;
4187                }
4188
4189                if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4190                        pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4191                                relo_sec_name, (size_t)rel->r_offset, i);
4192                        return -LIBBPF_ERRNO__FORMAT;
4193                }
4194
4195                insn_idx = rel->r_offset / BPF_INSN_SZ;
4196                /* relocations against static functions are recorded as
4197                 * relocations against the section that contains a function;
4198                 * in such case, symbol will be STT_SECTION and sym.st_name
4199                 * will point to empty string (0), so fetch section name
4200                 * instead
4201                 */
4202                if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4203                        sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4204                else
4205                        sym_name = elf_sym_str(obj, sym->st_name);
4206                sym_name = sym_name ?: "<?";
4207
4208                pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4209                         relo_sec_name, i, insn_idx, sym_name);
4210
4211                prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4212                if (!prog) {
4213                        pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4214                                relo_sec_name, i, sec_name, insn_idx);
4215                        continue;
4216                }
4217
4218                relos = libbpf_reallocarray(prog->reloc_desc,
4219                                            prog->nr_reloc + 1, sizeof(*relos));
4220                if (!relos)
4221                        return -ENOMEM;
4222                prog->reloc_desc = relos;
4223
4224                /* adjust insn_idx to local BPF program frame of reference */
4225                insn_idx -= prog->sec_insn_off;
4226                err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4227                                                insn_idx, sym_name, sym, rel);
4228                if (err)
4229                        return err;
4230
4231                prog->nr_reloc++;
4232        }
4233        return 0;
4234}
4235
4236static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
4237{
4238        struct bpf_map_def *def = &map->def;
4239        __u32 key_type_id = 0, value_type_id = 0;
4240        int ret;
4241
4242        if (!obj->btf)
4243                return -ENOENT;
4244
4245        /* if it's BTF-defined map, we don't need to search for type IDs.
4246         * For struct_ops map, it does not need btf_key_type_id and
4247         * btf_value_type_id.
4248         */
4249        if (map->sec_idx == obj->efile.btf_maps_shndx ||
4250            bpf_map__is_struct_ops(map))
4251                return 0;
4252
4253        if (!bpf_map__is_internal(map)) {
4254                pr_warn("Use of BPF_ANNOTATE_KV_PAIR is deprecated, use BTF-defined maps in .maps section instead\n");
4255#pragma GCC diagnostic push
4256#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
4257                ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
4258                                           def->value_size, &key_type_id,
4259                                           &value_type_id);
4260#pragma GCC diagnostic pop
4261        } else {
4262                /*
4263                 * LLVM annotates global data differently in BTF, that is,
4264                 * only as '.data', '.bss' or '.rodata'.
4265                 */
4266                ret = btf__find_by_name(obj->btf, map->real_name);
4267        }
4268        if (ret < 0)
4269                return ret;
4270
4271        map->btf_key_type_id = key_type_id;
4272        map->btf_value_type_id = bpf_map__is_internal(map) ?
4273                                 ret : value_type_id;
4274        return 0;
4275}
4276
4277static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4278{
4279        char file[PATH_MAX], buff[4096];
4280        FILE *fp;
4281        __u32 val;
4282        int err;
4283
4284        snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4285        memset(info, 0, sizeof(*info));
4286
4287        fp = fopen(file, "r");
4288        if (!fp) {
4289                err = -errno;
4290                pr_warn("failed to open %s: %d. No procfs support?\n", file,
4291                        err);
4292                return err;
4293        }
4294
4295        while (fgets(buff, sizeof(buff), fp)) {
4296                if (sscanf(buff, "map_type:\t%u", &val) == 1)
4297                        info->type = val;
4298                else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4299                        info->key_size = val;
4300                else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4301                        info->value_size = val;
4302                else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4303                        info->max_entries = val;
4304                else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4305                        info->map_flags = val;
4306        }
4307
4308        fclose(fp);
4309
4310        return 0;
4311}
4312
4313bool bpf_map__autocreate(const struct bpf_map *map)
4314{
4315        return map->autocreate;
4316}
4317
4318int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4319{
4320        if (map->obj->loaded)
4321                return libbpf_err(-EBUSY);
4322
4323        map->autocreate = autocreate;
4324        return 0;
4325}
4326
4327int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4328{
4329        struct bpf_map_info info = {};
4330        __u32 len = sizeof(info);
4331        int new_fd, err;
4332        char *new_name;
4333
4334        err = bpf_obj_get_info_by_fd(fd, &info, &len);
4335        if (err && errno == EINVAL)
4336                err = bpf_get_map_info_from_fdinfo(fd, &info);
4337        if (err)
4338                return libbpf_err(err);
4339
4340        new_name = strdup(info.name);
4341        if (!new_name)
4342                return libbpf_err(-errno);
4343
4344        new_fd = open("/", O_RDONLY | O_CLOEXEC);
4345        if (new_fd < 0) {
4346                err = -errno;
4347                goto err_free_new_name;
4348        }
4349
4350        new_fd = dup3(fd, new_fd, O_CLOEXEC);
4351        if (new_fd < 0) {
4352                err = -errno;
4353                goto err_close_new_fd;
4354        }
4355
4356        err = zclose(map->fd);
4357        if (err) {
4358                err = -errno;
4359                goto err_close_new_fd;
4360        }
4361        free(map->name);
4362
4363        map->fd = new_fd;
4364        map->name = new_name;
4365        map->def.type = info.type;
4366        map->def.key_size = info.key_size;
4367        map->def.value_size = info.value_size;
4368        map->def.max_entries = info.max_entries;
4369        map->def.map_flags = info.map_flags;
4370        map->btf_key_type_id = info.btf_key_type_id;
4371        map->btf_value_type_id = info.btf_value_type_id;
4372        map->reused = true;
4373        map->map_extra = info.map_extra;
4374
4375        return 0;
4376
4377err_close_new_fd:
4378        close(new_fd);
4379err_free_new_name:
4380        free(new_name);
4381        return libbpf_err(err);
4382}
4383
4384__u32 bpf_map__max_entries(const struct bpf_map *map)
4385{
4386        return map->def.max_entries;
4387}
4388
4389struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4390{
4391        if (!bpf_map_type__is_map_in_map(map->def.type))
4392                return errno = EINVAL, NULL;
4393
4394        return map->inner_map;
4395}
4396
4397int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4398{
4399        if (map->fd >= 0)
4400                return libbpf_err(-EBUSY);
4401        map->def.max_entries = max_entries;
4402        return 0;
4403}
4404
4405int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
4406{
4407        if (!map || !max_entries)
4408                return libbpf_err(-EINVAL);
4409
4410        return bpf_map__set_max_entries(map, max_entries);
4411}
4412
4413static int
4414bpf_object__probe_loading(struct bpf_object *obj)
4415{
4416        char *cp, errmsg[STRERR_BUFSIZE];
4417        struct bpf_insn insns[] = {
4418                BPF_MOV64_IMM(BPF_REG_0, 0),
4419                BPF_EXIT_INSN(),
4420        };
4421        int ret, insn_cnt = ARRAY_SIZE(insns);
4422
4423        if (obj->gen_loader)
4424                return 0;
4425
4426        ret = bump_rlimit_memlock();
4427        if (ret)
4428                pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4429
4430        /* make sure basic loading works */
4431        ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4432        if (ret < 0)
4433                ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
4434        if (ret < 0) {
4435                ret = errno;
4436                cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4437                pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4438                        "program. Make sure your kernel supports BPF "
4439                        "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4440                        "set to big enough value.\n", __func__, cp, ret);
4441                return -ret;
4442        }
4443        close(ret);
4444
4445        return 0;
4446}
4447
4448static int probe_fd(int fd)
4449{
4450        if (fd >= 0)
4451                close(fd);
4452        return fd >= 0;
4453}
4454
4455static int probe_kern_prog_name(void)
4456{
4457        struct bpf_insn insns[] = {
4458                BPF_MOV64_IMM(BPF_REG_0, 0),
4459                BPF_EXIT_INSN(),
4460        };
4461        int ret, insn_cnt = ARRAY_SIZE(insns);
4462
4463        /* make sure loading with name works */
4464        ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "test", "GPL", insns, insn_cnt, NULL);
4465        return probe_fd(ret);
4466}
4467
4468static int probe_kern_global_data(void)
4469{
4470        char *cp, errmsg[STRERR_BUFSIZE];
4471        struct bpf_insn insns[] = {
4472                BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
4473                BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
4474                BPF_MOV64_IMM(BPF_REG_0, 0),
4475                BPF_EXIT_INSN(),
4476        };
4477        int ret, map, insn_cnt = ARRAY_SIZE(insns);
4478
4479        map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
4480        if (map < 0) {
4481                ret = -errno;
4482                cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4483                pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4484                        __func__, cp, -ret);
4485                return ret;
4486        }
4487
4488        insns[0].imm = map;
4489
4490        ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4491        close(map);
4492        return probe_fd(ret);
4493}
4494
4495static int probe_kern_btf(void)
4496{
4497        static const char strs[] = "\0int";
4498        __u32 types[] = {
4499                /* int */
4500                BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4501        };
4502
4503        return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4504                                             strs, sizeof(strs)));
4505}
4506
4507static int probe_kern_btf_func(void)
4508{
4509        static const char strs[] = "\0int\0x\0a";
4510        /* void x(int a) {} */
4511        __u32 types[] = {
4512                /* int */
4513                BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4514                /* FUNC_PROTO */                                /* [2] */
4515                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4516                BTF_PARAM_ENC(7, 1),
4517                /* FUNC x */                                    /* [3] */
4518                BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
4519        };
4520
4521        return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4522                                             strs, sizeof(strs)));
4523}
4524
4525static int probe_kern_btf_func_global(void)
4526{
4527        static const char strs[] = "\0int\0x\0a";
4528        /* static void x(int a) {} */
4529        __u32 types[] = {
4530                /* int */
4531                BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4532                /* FUNC_PROTO */                                /* [2] */
4533                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4534                BTF_PARAM_ENC(7, 1),
4535                /* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
4536                BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
4537        };
4538
4539        return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4540                                             strs, sizeof(strs)));
4541}
4542
4543static int probe_kern_btf_datasec(void)
4544{
4545        static const char strs[] = "\0x\0.data";
4546        /* static int a; */
4547        __u32 types[] = {
4548                /* int */
4549                BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4550                /* VAR x */                                     /* [2] */
4551                BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4552                BTF_VAR_STATIC,
4553                /* DATASEC val */                               /* [3] */
4554                BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
4555                BTF_VAR_SECINFO_ENC(2, 0, 4),
4556        };
4557
4558        return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4559                                             strs, sizeof(strs)));
4560}
4561
4562static int probe_kern_btf_float(void)
4563{
4564        static const char strs[] = "\0float";
4565        __u32 types[] = {
4566                /* float */
4567                BTF_TYPE_FLOAT_ENC(1, 4),
4568        };
4569
4570        return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4571                                             strs, sizeof(strs)));
4572}
4573
4574static int probe_kern_btf_decl_tag(void)
4575{
4576        static const char strs[] = "\0tag";
4577        __u32 types[] = {
4578                /* int */
4579                BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4580                /* VAR x */                                     /* [2] */
4581                BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4582                BTF_VAR_STATIC,
4583                /* attr */
4584                BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
4585        };
4586
4587        return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4588                                             strs, sizeof(strs)));
4589}
4590
4591static int probe_kern_btf_type_tag(void)
4592{
4593        static const char strs[] = "\0tag";
4594        __u32 types[] = {
4595                /* int */
4596                BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
4597                /* attr */
4598                BTF_TYPE_TYPE_TAG_ENC(1, 1),                            /* [2] */
4599                /* ptr */
4600                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),   /* [3] */
4601        };
4602
4603        return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4604                                             strs, sizeof(strs)));
4605}
4606
4607static int probe_kern_array_mmap(void)
4608{
4609        LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
4610        int fd;
4611
4612        fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts);
4613        return probe_fd(fd);
4614}
4615
4616static int probe_kern_exp_attach_type(void)
4617{
4618        LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
4619        struct bpf_insn insns[] = {
4620                BPF_MOV64_IMM(BPF_REG_0, 0),
4621                BPF_EXIT_INSN(),
4622        };
4623        int fd, insn_cnt = ARRAY_SIZE(insns);
4624
4625        /* use any valid combination of program type and (optional)
4626         * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
4627         * to see if kernel supports expected_attach_type field for
4628         * BPF_PROG_LOAD command
4629         */
4630        fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
4631        return probe_fd(fd);
4632}
4633
4634static int probe_kern_probe_read_kernel(void)
4635{
4636        struct bpf_insn insns[] = {
4637                BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),   /* r1 = r10 (fp) */
4638                BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),  /* r1 += -8 */
4639                BPF_MOV64_IMM(BPF_REG_2, 8),            /* r2 = 8 */
4640                BPF_MOV64_IMM(BPF_REG_3, 0),            /* r3 = 0 */
4641                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
4642                BPF_EXIT_INSN(),
4643        };
4644        int fd, insn_cnt = ARRAY_SIZE(insns);
4645
4646        fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
4647        return probe_fd(fd);
4648}
4649
4650static int probe_prog_bind_map(void)
4651{
4652        char *cp, errmsg[STRERR_BUFSIZE];
4653        struct bpf_insn insns[] = {
4654                BPF_MOV64_IMM(BPF_REG_0, 0),
4655                BPF_EXIT_INSN(),
4656        };
4657        int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
4658
4659        map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
4660        if (map < 0) {
4661                ret = -errno;
4662                cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4663                pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4664                        __func__, cp, -ret);
4665                return ret;
4666        }
4667
4668        prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4669        if (prog < 0) {
4670                close(map);
4671                return 0;
4672        }
4673
4674        ret = bpf_prog_bind_map(prog, map, NULL);
4675
4676        close(map);
4677        close(prog);
4678
4679        return ret >= 0;
4680}
4681
4682static int probe_module_btf(void)
4683{
4684        static const char strs[] = "\0int";
4685        __u32 types[] = {
4686                /* int */
4687                BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4688        };
4689        struct bpf_btf_info info;
4690        __u32 len = sizeof(info);
4691        char name[16];
4692        int fd, err;
4693
4694        fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4695        if (fd < 0)
4696                return 0; /* BTF not supported at all */
4697
4698        memset(&info, 0, sizeof(info));
4699        info.name = ptr_to_u64(name);
4700        info.name_len = sizeof(name);
4701
4702        /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
4703         * kernel's module BTF support coincides with support for
4704         * name/name_len fields in struct bpf_btf_info.
4705         */
4706        err = bpf_obj_get_info_by_fd(fd, &info, &len);
4707        close(fd);
4708        return !err;
4709}
4710
4711static int probe_perf_link(void)
4712{
4713        struct bpf_insn insns[] = {
4714                BPF_MOV64_IMM(BPF_REG_0, 0),
4715                BPF_EXIT_INSN(),
4716        };
4717        int prog_fd, link_fd, err;
4718
4719        prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
4720                                insns, ARRAY_SIZE(insns), NULL);
4721        if (prog_fd < 0)
4722                return -errno;
4723
4724        /* use invalid perf_event FD to get EBADF, if link is supported;
4725         * otherwise EINVAL should be returned
4726         */
4727        link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
4728        err = -errno; /* close() can clobber errno */
4729
4730        if (link_fd >= 0)
4731                close(link_fd);
4732        close(prog_fd);
4733
4734        return link_fd < 0 && err == -EBADF;
4735}
4736
4737static int probe_kern_bpf_cookie(void)
4738{
4739        struct bpf_insn insns[] = {
4740                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
4741                BPF_EXIT_INSN(),
4742        };
4743        int ret, insn_cnt = ARRAY_SIZE(insns);
4744
4745        ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
4746        return probe_fd(ret);
4747}
4748
4749enum kern_feature_result {
4750        FEAT_UNKNOWN = 0,
4751        FEAT_SUPPORTED = 1,
4752        FEAT_MISSING = 2,
4753};
4754
4755typedef int (*feature_probe_fn)(void);
4756
4757static struct kern_feature_desc {
4758        const char *desc;
4759        feature_probe_fn probe;
4760        enum kern_feature_result res;
4761} feature_probes[__FEAT_CNT] = {
4762        [FEAT_PROG_NAME] = {
4763                "BPF program name", probe_kern_prog_name,
4764        },
4765        [FEAT_GLOBAL_DATA] = {
4766                "global variables", probe_kern_global_data,
4767        },
4768        [FEAT_BTF] = {
4769                "minimal BTF", probe_kern_btf,
4770        },
4771        [FEAT_BTF_FUNC] = {
4772                "BTF functions", probe_kern_btf_func,
4773        },
4774        [FEAT_BTF_GLOBAL_FUNC] = {
4775                "BTF global function", probe_kern_btf_func_global,
4776        },
4777        [FEAT_BTF_DATASEC] = {
4778                "BTF data section and variable", probe_kern_btf_datasec,
4779        },
4780        [FEAT_ARRAY_MMAP] = {
4781                "ARRAY map mmap()", probe_kern_array_mmap,
4782        },
4783        [FEAT_EXP_ATTACH_TYPE] = {
4784                "BPF_PROG_LOAD expected_attach_type attribute",
4785                probe_kern_exp_attach_type,
4786        },
4787        [FEAT_PROBE_READ_KERN] = {
4788                "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4789        },
4790        [FEAT_PROG_BIND_MAP] = {
4791                "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4792        },
4793        [FEAT_MODULE_BTF] = {
4794                "module BTF support", probe_module_btf,
4795        },
4796        [FEAT_BTF_FLOAT] = {
4797                "BTF_KIND_FLOAT support", probe_kern_btf_float,
4798        },
4799        [FEAT_PERF_LINK] = {
4800                "BPF perf link support", probe_perf_link,
4801        },
4802        [FEAT_BTF_DECL_TAG] = {
4803                "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
4804        },
4805        [FEAT_BTF_TYPE_TAG] = {
4806                "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
4807        },
4808        [FEAT_MEMCG_ACCOUNT] = {
4809                "memcg-based memory accounting", probe_memcg_account,
4810        },
4811        [FEAT_BPF_COOKIE] = {
4812                "BPF cookie support", probe_kern_bpf_cookie,
4813        },
4814};
4815
4816bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4817{
4818        struct kern_feature_desc *feat = &feature_probes[feat_id];
4819        int ret;
4820
4821        if (obj && obj->gen_loader)
4822                /* To generate loader program assume the latest kernel
4823                 * to avoid doing extra prog_load, map_create syscalls.
4824                 */
4825                return true;
4826
4827        if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4828                ret = feat->probe();
4829                if (ret > 0) {
4830                        WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4831                } else if (ret == 0) {
4832                        WRITE_ONCE(feat->res, FEAT_MISSING);
4833                } else {
4834                        pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4835                        WRITE_ONCE(feat->res, FEAT_MISSING);
4836                }
4837        }
4838
4839        return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4840}
4841
4842static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4843{
4844        struct bpf_map_info map_info = {};
4845        char msg[STRERR_BUFSIZE];
4846        __u32 map_info_len;
4847        int err;
4848
4849        map_info_len = sizeof(map_info);
4850
4851        err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
4852        if (err && errno == EINVAL)
4853                err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4854        if (err) {
4855                pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4856                        libbpf_strerror_r(errno, msg, sizeof(msg)));
4857                return false;
4858        }
4859
4860        return (map_info.type == map->def.type &&
4861                map_info.key_size == map->def.key_size &&
4862                map_info.value_size == map->def.value_size &&
4863                map_info.max_entries == map->def.max_entries &&
4864                map_info.map_flags == map->def.map_flags &&
4865                map_info.map_extra == map->map_extra);
4866}
4867
4868static int
4869bpf_object__reuse_map(struct bpf_map *map)
4870{
4871        char *cp, errmsg[STRERR_BUFSIZE];
4872        int err, pin_fd;
4873
4874        pin_fd = bpf_obj_get(map->pin_path);
4875        if (pin_fd < 0) {
4876                err = -errno;
4877                if (err == -ENOENT) {
4878                        pr_debug("found no pinned map to reuse at '%s'\n",
4879                                 map->pin_path);
4880                        return 0;
4881                }
4882
4883                cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4884                pr_warn("couldn't retrieve pinned map '%s': %s\n",
4885                        map->pin_path, cp);
4886                return err;
4887        }
4888
4889        if (!map_is_reuse_compat(map, pin_fd)) {
4890                pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4891                        map->pin_path);
4892                close(pin_fd);
4893                return -EINVAL;
4894        }
4895
4896        err = bpf_map__reuse_fd(map, pin_fd);
4897        close(pin_fd);
4898        if (err) {
4899                return err;
4900        }
4901        map->pinned = true;
4902        pr_debug("reused pinned map at '%s'\n", map->pin_path);
4903
4904        return 0;
4905}
4906
4907static int
4908bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4909{
4910        enum libbpf_map_type map_type = map->libbpf_type;
4911        char *cp, errmsg[STRERR_BUFSIZE];
4912        int err, zero = 0;
4913
4914        if (obj->gen_loader) {
4915                bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
4916                                         map->mmaped, map->def.value_size);
4917                if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
4918                        bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
4919                return 0;
4920        }
4921        err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4922        if (err) {
4923                err = -errno;
4924                cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4925                pr_warn("Error setting initial map(%s) contents: %s\n",
4926                        map->name, cp);
4927                return err;
4928        }
4929
4930        /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4931        if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4932                err = bpf_map_freeze(map->fd);
4933                if (err) {
4934                        err = -errno;
4935                        cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4936                        pr_warn("Error freezing map(%s) as read-only: %s\n",
4937                                map->name, cp);
4938                        return err;
4939                }
4940        }
4941        return 0;
4942}
4943
4944static void bpf_map__destroy(struct bpf_map *map);
4945
4946static bool is_pow_of_2(size_t x)
4947{
4948        return x && (x & (x - 1));
4949}
4950
4951static size_t adjust_ringbuf_sz(size_t sz)
4952{
4953        __u32 page_sz = sysconf(_SC_PAGE_SIZE);
4954        __u32 mul;
4955
4956        /* if user forgot to set any size, make sure they see error */
4957        if (sz == 0)
4958                return 0;
4959        /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
4960         * a power-of-2 multiple of kernel's page size. If user diligently
4961         * satisified these conditions, pass the size through.
4962         */
4963        if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
4964                return sz;
4965
4966        /* Otherwise find closest (page_sz * power_of_2) product bigger than
4967         * user-set size to satisfy both user size request and kernel
4968         * requirements and substitute correct max_entries for map creation.
4969         */
4970        for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
4971                if (mul * page_sz > sz)
4972                        return mul * page_sz;
4973        }
4974
4975        /* if it's impossible to satisfy the conditions (i.e., user size is
4976         * very close to UINT_MAX but is not a power-of-2 multiple of
4977         * page_size) then just return original size and let kernel reject it
4978         */
4979        return sz;
4980}
4981
4982static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
4983{
4984        LIBBPF_OPTS(bpf_map_create_opts, create_attr);
4985        struct bpf_map_def *def = &map->def;
4986        const char *map_name = NULL;
4987        int err = 0;
4988
4989        if (kernel_supports(obj, FEAT_PROG_NAME))
4990                map_name = map->name;
4991        create_attr.map_ifindex = map->map_ifindex;
4992        create_attr.map_flags = def->map_flags;
4993        create_attr.numa_node = map->numa_node;
4994        create_attr.map_extra = map->map_extra;
4995
4996        if (bpf_map__is_struct_ops(map))
4997                create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4998
4999        if (obj->btf && btf__fd(obj->btf) >= 0) {
5000                create_attr.btf_fd = btf__fd(obj->btf);
5001                create_attr.btf_key_type_id = map->btf_key_type_id;
5002                create_attr.btf_value_type_id = map->btf_value_type_id;
5003        }
5004
5005        if (bpf_map_type__is_map_in_map(def->type)) {
5006                if (map->inner_map) {
5007                        err = bpf_object__create_map(obj, map->inner_map, true);
5008                        if (err) {
5009                                pr_warn("map '%s': failed to create inner map: %d\n",
5010                                        map->name, err);
5011                                return err;
5012                        }
5013                        map->inner_map_fd = bpf_map__fd(map->inner_map);
5014                }
5015                if (map->inner_map_fd >= 0)
5016                        create_attr.inner_map_fd = map->inner_map_fd;
5017        }
5018
5019        switch (def->type) {
5020        case BPF_MAP_TYPE_RINGBUF:
5021                map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
5022                /* fallthrough */
5023        case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5024        case BPF_MAP_TYPE_CGROUP_ARRAY:
5025        case BPF_MAP_TYPE_STACK_TRACE:
5026        case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5027        case BPF_MAP_TYPE_HASH_OF_MAPS:
5028        case BPF_MAP_TYPE_DEVMAP:
5029        case BPF_MAP_TYPE_DEVMAP_HASH:
5030        case BPF_MAP_TYPE_CPUMAP:
5031        case BPF_MAP_TYPE_XSKMAP:
5032        case BPF_MAP_TYPE_SOCKMAP:
5033        case BPF_MAP_TYPE_SOCKHASH:
5034        case BPF_MAP_TYPE_QUEUE:
5035        case BPF_MAP_TYPE_STACK:
5036                create_attr.btf_fd = 0;
5037                create_attr.btf_key_type_id = 0;
5038                create_attr.btf_value_type_id = 0;
5039                map->btf_key_type_id = 0;
5040                map->btf_value_type_id = 0;
5041        default:
5042                break;
5043        }
5044
5045        if (obj->gen_loader) {
5046                bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5047                                    def->key_size, def->value_size, def->max_entries,
5048                                    &create_attr, is_inner ? -1 : map - obj->maps);
5049                /* Pretend to have valid FD to pass various fd >= 0 checks.
5050                 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
5051                 */
5052                map->fd = 0;
5053        } else {
5054                map->fd = bpf_map_create(def->type, map_name,
5055                                         def->key_size, def->value_size,
5056                                         def->max_entries, &create_attr);
5057        }
5058        if (map->fd < 0 && (create_attr.btf_key_type_id ||
5059                            create_attr.btf_value_type_id)) {
5060                char *cp, errmsg[STRERR_BUFSIZE];
5061
5062                err = -errno;
5063                cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5064                pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
5065                        map->name, cp, err);
5066                create_attr.btf_fd = 0;
5067                create_attr.btf_key_type_id = 0;
5068                create_attr.btf_value_type_id = 0;
5069                map->btf_key_type_id = 0;
5070                map->btf_value_type_id = 0;
5071                map->fd = bpf_map_create(def->type, map_name,
5072                                         def->key_size, def->value_size,
5073                                         def->max_entries, &create_attr);
5074        }
5075
5076        err = map->fd < 0 ? -errno : 0;
5077
5078        if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5079                if (obj->gen_loader)
5080                        map->inner_map->fd = -1;
5081                bpf_map__destroy(map->inner_map);
5082                zfree(&map->inner_map);
5083        }
5084
5085        return err;
5086}
5087
5088static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5089{
5090        const struct bpf_map *targ_map;
5091        unsigned int i;
5092        int fd, err = 0;
5093
5094        for (i = 0; i < map->init_slots_sz; i++) {
5095                if (!map->init_slots[i])
5096                        continue;
5097
5098                targ_map = map->init_slots[i];
5099                fd = bpf_map__fd(targ_map);
5100
5101                if (obj->gen_loader) {
5102                        bpf_gen__populate_outer_map(obj->gen_loader,
5103                                                    map - obj->maps, i,
5104                                                    targ_map - obj->maps);
5105                } else {
5106                        err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5107                }
5108                if (err) {
5109                        err = -errno;
5110                        pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
5111                                map->name, i, targ_map->name, fd, err);
5112                        return err;
5113                }
5114                pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5115                         map->name, i, targ_map->name, fd);
5116        }
5117
5118        zfree(&map->init_slots);
5119        map->init_slots_sz = 0;
5120
5121        return 0;
5122}
5123
5124static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5125{
5126        const struct bpf_program *targ_prog;
5127        unsigned int i;
5128        int fd, err;
5129
5130        if (obj->gen_loader)
5131                return -ENOTSUP;
5132
5133        for (i = 0; i < map->init_slots_sz; i++) {
5134                if (!map->init_slots[i])
5135                        continue;
5136
5137                targ_prog = map->init_slots[i];
5138                fd = bpf_program__fd(targ_prog);
5139
5140                err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5141                if (err) {
5142                        err = -errno;
5143                        pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5144                                map->name, i, targ_prog->name, fd, err);
5145                        return err;
5146                }
5147                pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5148                         map->name, i, targ_prog->name, fd);
5149        }
5150
5151        zfree(&map->init_slots);
5152        map->init_slots_sz = 0;
5153
5154        return 0;
5155}
5156
5157static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5158{
5159        struct bpf_map *map;
5160        int i, err;
5161
5162        for (i = 0; i < obj->nr_maps; i++) {
5163                map = &obj->maps[i];
5164
5165                if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5166                        continue;
5167
5168                err = init_prog_array_slots(obj, map);
5169                if (err < 0) {
5170                        zclose(map->fd);
5171                        return err;
5172                }
5173        }
5174        return 0;
5175}
5176
5177static int map_set_def_max_entries(struct bpf_map *map)
5178{
5179        if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5180                int nr_cpus;
5181
5182                nr_cpus = libbpf_num_possible_cpus();
5183                if (nr_cpus < 0) {
5184                        pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5185                                map->name, nr_cpus);
5186                        return nr_cpus;
5187                }
5188                pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5189                map->def.max_entries = nr_cpus;
5190        }
5191
5192        return 0;
5193}
5194
5195static int
5196bpf_object__create_maps(struct bpf_object *obj)
5197{
5198        struct bpf_map *map;
5199        char *cp, errmsg[STRERR_BUFSIZE];
5200        unsigned int i, j;
5201        int err;
5202        bool retried;
5203
5204        for (i = 0; i < obj->nr_maps; i++) {
5205                map = &obj->maps[i];
5206
5207                /* To support old kernels, we skip creating global data maps
5208                 * (.rodata, .data, .kconfig, etc); later on, during program
5209                 * loading, if we detect that at least one of the to-be-loaded
5210                 * programs is referencing any global data map, we'll error
5211                 * out with program name and relocation index logged.
5212                 * This approach allows to accommodate Clang emitting
5213                 * unnecessary .rodata.str1.1 sections for string literals,
5214                 * but also it allows to have CO-RE applications that use
5215                 * global variables in some of BPF programs, but not others.
5216                 * If those global variable-using programs are not loaded at
5217                 * runtime due to bpf_program__set_autoload(prog, false),
5218                 * bpf_object loading will succeed just fine even on old
5219                 * kernels.
5220                 */
5221                if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5222                        map->autocreate = false;
5223
5224                if (!map->autocreate) {
5225                        pr_debug("map '%s': skipped auto-creating...\n", map->name);
5226                        continue;
5227                }
5228
5229                err = map_set_def_max_entries(map);
5230                if (err)
5231                        goto err_out;
5232
5233                retried = false;
5234retry:
5235                if (map->pin_path) {
5236                        err = bpf_object__reuse_map(map);
5237                        if (err) {
5238                                pr_warn("map '%s': error reusing pinned map\n",
5239                                        map->name);
5240                                goto err_out;
5241                        }
5242                        if (retried && map->fd < 0) {
5243                                pr_warn("map '%s': cannot find pinned map\n",
5244                                        map->name);
5245                                err = -ENOENT;
5246                                goto err_out;
5247                        }
5248                }
5249
5250                if (map->fd >= 0) {
5251                        pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5252                                 map->name, map->fd);
5253                } else {
5254                        err = bpf_object__create_map(obj, map, false);
5255                        if (err)
5256                                goto err_out;
5257
5258                        pr_debug("map '%s': created successfully, fd=%d\n",
5259                                 map->name, map->fd);
5260
5261                        if (bpf_map__is_internal(map)) {
5262                                err = bpf_object__populate_internal_map(obj, map);
5263                                if (err < 0) {
5264                                        zclose(map->fd);
5265                                        goto err_out;
5266                                }
5267                        }
5268
5269                        if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5270                                err = init_map_in_map_slots(obj, map);
5271                                if (err < 0) {
5272                                        zclose(map->fd);
5273                                        goto err_out;
5274                                }
5275                        }
5276                }
5277
5278                if (map->pin_path && !map->pinned) {
5279                        err = bpf_map__pin(map, NULL);
5280                        if (err) {
5281                                zclose(map->fd);
5282                                if (!retried && err == -EEXIST) {
5283                                        retried = true;
5284                                        goto retry;
5285                                }
5286                                pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5287                                        map->name, map->pin_path, err);
5288                                goto err_out;
5289                        }
5290                }
5291        }
5292
5293        return 0;
5294
5295err_out:
5296        cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5297        pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5298        pr_perm_msg(err);
5299        for (j = 0; j < i; j++)
5300                zclose(obj->maps[j].fd);
5301        return err;
5302}
5303
5304static bool bpf_core_is_flavor_sep(const char *s)
5305{
5306        /* check X___Y name pattern, where X and Y are not underscores */
5307        return s[0] != '_' &&                                 /* X */
5308               s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
5309               s[4] != '_';                                   /* Y */
5310}
5311
5312/* Given 'some_struct_name___with_flavor' return the length of a name prefix
5313 * before last triple underscore. Struct name part after last triple
5314 * underscore is ignored by BPF CO-RE relocation during relocation matching.
5315 */
5316size_t bpf_core_essential_name_len(const char *name)
5317{
5318        size_t n = strlen(name);
5319        int i;
5320
5321        for (i = n - 5; i >= 0; i--) {
5322                if (bpf_core_is_flavor_sep(name + i))
5323                        return i + 1;
5324        }
5325        return n;
5326}
5327
5328void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5329{
5330        if (!cands)
5331                return;
5332
5333        free(cands->cands);
5334        free(cands);
5335}
5336
5337int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5338                       size_t local_essent_len,
5339                       const struct btf *targ_btf,
5340                       const char *targ_btf_name,
5341                       int targ_start_id,
5342                       struct bpf_core_cand_list *cands)
5343{
5344        struct bpf_core_cand *new_cands, *cand;
5345        const struct btf_type *t, *local_t;
5346        const char *targ_name, *local_name;
5347        size_t targ_essent_len;
5348        int n, i;
5349
5350        local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5351        local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5352
5353        n = btf__type_cnt(targ_btf);
5354        for (i = targ_start_id; i < n; i++) {
5355                t = btf__type_by_id(targ_btf, i);
5356                if (btf_kind(t) != btf_kind(local_t))
5357                        continue;
5358
5359                targ_name = btf__name_by_offset(targ_btf, t->name_off);
5360                if (str_is_empty(targ_name))
5361                        continue;
5362
5363                targ_essent_len = bpf_core_essential_name_len(targ_name);
5364                if (targ_essent_len != local_essent_len)
5365                        continue;
5366
5367                if (strncmp(local_name, targ_name, local_essent_len) != 0)
5368                        continue;
5369
5370                pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5371                         local_cand->id, btf_kind_str(local_t),
5372                         local_name, i, btf_kind_str(t), targ_name,
5373                         targ_btf_name);
5374                new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5375                                              sizeof(*cands->cands));
5376                if (!new_cands)
5377                        return -ENOMEM;
5378
5379                cand = &new_cands[cands->len];
5380                cand->btf = targ_btf;
5381                cand->id = i;
5382
5383                cands->cands = new_cands;
5384                cands->len++;
5385        }
5386        return 0;
5387}
5388
5389static int load_module_btfs(struct bpf_object *obj)
5390{
5391        struct bpf_btf_info info;
5392        struct module_btf *mod_btf;
5393        struct btf *btf;
5394        char name[64];
5395        __u32 id = 0, len;
5396        int err, fd;
5397
5398        if (obj->btf_modules_loaded)
5399                return 0;
5400
5401        if (obj->gen_loader)
5402                return 0;
5403
5404        /* don't do this again, even if we find no module BTFs */
5405        obj->btf_modules_loaded = true;
5406
5407        /* kernel too old to support module BTFs */
5408        if (!kernel_supports(obj, FEAT_MODULE_BTF))
5409                return 0;
5410
5411        while (true) {
5412                err = bpf_btf_get_next_id(id, &id);
5413                if (err && errno == ENOENT)
5414                        return 0;
5415                if (err) {
5416                        err = -errno;
5417                        pr_warn("failed to iterate BTF objects: %d\n", err);
5418                        return err;
5419                }
5420
5421                fd = bpf_btf_get_fd_by_id(id);
5422                if (fd < 0) {
5423                        if (errno == ENOENT)
5424                                continue; /* expected race: BTF was unloaded */
5425                        err = -errno;
5426                        pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5427                        return err;
5428                }
5429
5430                len = sizeof(info);
5431                memset(&info, 0, sizeof(info));
5432                info.name = ptr_to_u64(name);
5433                info.name_len = sizeof(name);
5434
5435                err = bpf_obj_get_info_by_fd(fd, &info, &len);
5436                if (err) {
5437                        err = -errno;
5438                        pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5439                        goto err_out;
5440                }
5441
5442                /* ignore non-module BTFs */
5443                if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5444                        close(fd);
5445                        continue;
5446                }
5447
5448                btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5449                err = libbpf_get_error(btf);
5450                if (err) {
5451                        pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5452                                name, id, err);
5453                        goto err_out;
5454                }
5455
5456                err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5457                                        sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5458                if (err)
5459                        goto err_out;
5460
5461                mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5462
5463                mod_btf->btf = btf;
5464                mod_btf->id = id;
5465                mod_btf->fd = fd;
5466                mod_btf->name = strdup(name);
5467                if (!mod_btf->name) {
5468                        err = -ENOMEM;
5469                        goto err_out;
5470                }
5471                continue;
5472
5473err_out:
5474                close(fd);
5475                return err;
5476        }
5477
5478        return 0;
5479}
5480
5481static struct bpf_core_cand_list *
5482bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5483{
5484        struct bpf_core_cand local_cand = {};
5485        struct bpf_core_cand_list *cands;
5486        const struct btf *main_btf;
5487        const struct btf_type *local_t;
5488        const char *local_name;
5489        size_t local_essent_len;
5490        int err, i;
5491
5492        local_cand.btf = local_btf;
5493        local_cand.id = local_type_id;
5494        local_t = btf__type_by_id(local_btf, local_type_id);
5495        if (!local_t)
5496                return ERR_PTR(-EINVAL);
5497
5498        local_name = btf__name_by_offset(local_btf, local_t->name_off);
5499        if (str_is_empty(local_name))
5500                return ERR_PTR(-EINVAL);
5501        local_essent_len = bpf_core_essential_name_len(local_name);
5502
5503        cands = calloc(1, sizeof(*cands));
5504        if (!cands)
5505                return ERR_PTR(-ENOMEM);
5506
5507        /* Attempt to find target candidates in vmlinux BTF first */
5508        main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5509        err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5510        if (err)
5511                goto err_out;
5512
5513        /* if vmlinux BTF has any candidate, don't got for module BTFs */
5514        if (cands->len)
5515                return cands;
5516
5517        /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5518        if (obj->btf_vmlinux_override)
5519                return cands;
5520
5521        /* now look through module BTFs, trying to still find candidates */
5522        err = load_module_btfs(obj);
5523        if (err)
5524                goto err_out;
5525
5526        for (i = 0; i < obj->btf_module_cnt; i++) {
5527                err = bpf_core_add_cands(&local_cand, local_essent_len,
5528                                         obj->btf_modules[i].btf,
5529                                         obj->btf_modules[i].name,
5530                                         btf__type_cnt(obj->btf_vmlinux),
5531                                         cands);
5532                if (err)
5533                        goto err_out;
5534        }
5535
5536        return cands;
5537err_out:
5538        bpf_core_free_cands(cands);
5539        return ERR_PTR(err);
5540}
5541
5542/* Check local and target types for compatibility. This check is used for
5543 * type-based CO-RE relocations and follow slightly different rules than
5544 * field-based relocations. This function assumes that root types were already
5545 * checked for name match. Beyond that initial root-level name check, names
5546 * are completely ignored. Compatibility rules are as follows:
5547 *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5548 *     kind should match for local and target types (i.e., STRUCT is not
5549 *     compatible with UNION);
5550 *   - for ENUMs, the size is ignored;
5551 *   - for INT, size and signedness are ignored;
5552 *   - for ARRAY, dimensionality is ignored, element types are checked for
5553 *     compatibility recursively;
5554 *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
5555 *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5556 *   - FUNC_PROTOs are compatible if they have compatible signature: same
5557 *     number of input args and compatible return and argument types.
5558 * These rules are not set in stone and probably will be adjusted as we get
5559 * more experience with using BPF CO-RE relocations.
5560 */
5561int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5562                              const struct btf *targ_btf, __u32 targ_id)
5563{
5564        const struct btf_type *local_type, *targ_type;
5565        int depth = 32; /* max recursion depth */
5566
5567        /* caller made sure that names match (ignoring flavor suffix) */
5568        local_type = btf__type_by_id(local_btf, local_id);
5569        targ_type = btf__type_by_id(targ_btf, targ_id);
5570        if (btf_kind(local_type) != btf_kind(targ_type))
5571                return 0;
5572
5573recur:
5574        depth--;
5575        if (depth < 0)
5576                return -EINVAL;
5577
5578        local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5579        targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5580        if (!local_type || !targ_type)
5581                return -EINVAL;
5582
5583        if (btf_kind(local_type) != btf_kind(targ_type))
5584                return 0;
5585
5586        switch (btf_kind(local_type)) {
5587        case BTF_KIND_UNKN:
5588        case BTF_KIND_STRUCT:
5589        case BTF_KIND_UNION:
5590        case BTF_KIND_ENUM:
5591        case BTF_KIND_FWD:
5592                return 1;
5593        case BTF_KIND_INT:
5594                /* just reject deprecated bitfield-like integers; all other
5595                 * integers are by default compatible between each other
5596                 */
5597                return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5598        case BTF_KIND_PTR:
5599                local_id = local_type->type;
5600                targ_id = targ_type->type;
5601                goto recur;
5602        case BTF_KIND_ARRAY:
5603                local_id = btf_array(local_type)->type;
5604                targ_id = btf_array(targ_type)->type;
5605                goto recur;
5606        case BTF_KIND_FUNC_PROTO: {
5607                struct btf_param *local_p = btf_params(local_type);
5608                struct btf_param *targ_p = btf_params(targ_type);
5609                __u16 local_vlen = btf_vlen(local_type);
5610                __u16 targ_vlen = btf_vlen(targ_type);
5611                int i, err;
5612
5613                if (local_vlen != targ_vlen)
5614                        return 0;
5615
5616                for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5617                        skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5618                        skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5619                        err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5620                        if (err <= 0)
5621                                return err;
5622                }
5623
5624                /* tail recurse for return type check */
5625                skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5626                skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5627                goto recur;
5628        }
5629        default:
5630                pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5631                        btf_kind_str(local_type), local_id, targ_id);
5632                return 0;
5633        }
5634}
5635
5636static size_t bpf_core_hash_fn(const void *key, void *ctx)
5637{
5638        return (size_t)key;
5639}
5640
5641static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5642{
5643        return k1 == k2;
5644}
5645
5646static void *u32_as_hash_key(__u32 x)
5647{
5648        return (void *)(uintptr_t)x;
5649}
5650
5651static int record_relo_core(struct bpf_program *prog,
5652                            const struct bpf_core_relo *core_relo, int insn_idx)
5653{
5654        struct reloc_desc *relos, *relo;
5655
5656        relos = libbpf_reallocarray(prog->reloc_desc,
5657                                    prog->nr_reloc + 1, sizeof(*relos));
5658        if (!relos)
5659                return -ENOMEM;
5660        relo = &relos[prog->nr_reloc];
5661        relo->type = RELO_CORE;
5662        relo->insn_idx = insn_idx;
5663        relo->core_relo = core_relo;
5664        prog->reloc_desc = relos;
5665        prog->nr_reloc++;
5666        return 0;
5667}
5668
5669static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5670{
5671        struct reloc_desc *relo;
5672        int i;
5673
5674        for (i = 0; i < prog->nr_reloc; i++) {
5675                relo = &prog->reloc_desc[i];
5676                if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5677                        continue;
5678
5679                return relo->core_relo;
5680        }
5681
5682        return NULL;
5683}
5684
5685static int bpf_core_resolve_relo(struct bpf_program *prog,
5686                                 const struct bpf_core_relo *relo,
5687                                 int relo_idx,
5688                                 const struct btf *local_btf,
5689                                 struct hashmap *cand_cache,
5690                                 struct bpf_core_relo_res *targ_res)
5691{
5692        struct bpf_core_spec specs_scratch[3] = {};
5693        const void *type_key = u32_as_hash_key(relo->type_id);
5694        struct bpf_core_cand_list *cands = NULL;
5695        const char *prog_name = prog->name;
5696        const struct btf_type *local_type;
5697        const char *local_name;
5698        __u32 local_id = relo->type_id;
5699        int err;
5700
5701        local_type = btf__type_by_id(local_btf, local_id);
5702        if (!local_type)
5703                return -EINVAL;
5704
5705        local_name = btf__name_by_offset(local_btf, local_type->name_off);
5706        if (!local_name)
5707                return -EINVAL;
5708
5709        if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5710            !hashmap__find(cand_cache, type_key, (void **)&cands)) {
5711                cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5712                if (IS_ERR(cands)) {
5713                        pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5714                                prog_name, relo_idx, local_id, btf_kind_str(local_type),
5715                                local_name, PTR_ERR(cands));
5716                        return PTR_ERR(cands);
5717                }
5718                err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
5719                if (err) {
5720                        bpf_core_free_cands(cands);
5721                        return err;
5722                }
5723        }
5724
5725        return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5726                                       targ_res);
5727}
5728
5729static int
5730bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5731{
5732        const struct btf_ext_info_sec *sec;
5733        struct bpf_core_relo_res targ_res;
5734        const struct bpf_core_relo *rec;
5735        const struct btf_ext_info *seg;
5736        struct hashmap_entry *entry;
5737        struct hashmap *cand_cache = NULL;
5738        struct bpf_program *prog;
5739        struct bpf_insn *insn;
5740        const char *sec_name;
5741        int i, err = 0, insn_idx, sec_idx, sec_num;
5742
5743        if (obj->btf_ext->core_relo_info.len == 0)
5744                return 0;
5745
5746        if (targ_btf_path) {
5747                obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5748                err = libbpf_get_error(obj->btf_vmlinux_override);
5749                if (err) {
5750                        pr_warn("failed to parse target BTF: %d\n", err);
5751                        return err;
5752                }
5753        }
5754
5755        cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5756        if (IS_ERR(cand_cache)) {
5757                err = PTR_ERR(cand_cache);
5758                goto out;
5759        }
5760
5761        seg = &obj->btf_ext->core_relo_info;
5762        sec_num = 0;
5763        for_each_btf_ext_sec(seg, sec) {
5764                sec_idx = seg->sec_idxs[sec_num];
5765                sec_num++;
5766
5767                sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5768                if (str_is_empty(sec_name)) {
5769                        err = -EINVAL;
5770                        goto out;
5771                }
5772
5773                pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
5774
5775                for_each_btf_ext_rec(seg, sec, i, rec) {
5776                        if (rec->insn_off % BPF_INSN_SZ)
5777                                return -EINVAL;
5778                        insn_idx = rec->insn_off / BPF_INSN_SZ;
5779                        prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5780                        if (!prog) {
5781                                /* When __weak subprog is "overridden" by another instance
5782                                 * of the subprog from a different object file, linker still
5783                                 * appends all the .BTF.ext info that used to belong to that
5784                                 * eliminated subprogram.
5785                                 * This is similar to what x86-64 linker does for relocations.
5786                                 * So just ignore such relocations just like we ignore
5787                                 * subprog instructions when discovering subprograms.
5788                                 */
5789                                pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
5790                                         sec_name, i, insn_idx);
5791                                continue;
5792                        }
5793                        /* no need to apply CO-RE relocation if the program is
5794                         * not going to be loaded
5795                         */
5796                        if (!prog->autoload)
5797                                continue;
5798
5799                        /* adjust insn_idx from section frame of reference to the local
5800                         * program's frame of reference; (sub-)program code is not yet
5801                         * relocated, so it's enough to just subtract in-section offset
5802                         */
5803                        insn_idx = insn_idx - prog->sec_insn_off;
5804                        if (insn_idx >= prog->insns_cnt)
5805                                return -EINVAL;
5806                        insn = &prog->insns[insn_idx];
5807
5808                        err = record_relo_core(prog, rec, insn_idx);
5809                        if (err) {
5810                                pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5811                                        prog->name, i, err);
5812                                goto out;
5813                        }
5814
5815                        if (prog->obj->gen_loader)
5816                                continue;
5817
5818                        err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
5819                        if (err) {
5820                                pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5821                                        prog->name, i, err);
5822                                goto out;
5823                        }
5824
5825                        err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5826                        if (err) {
5827                                pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5828                                        prog->name, i, insn_idx, err);
5829                                goto out;
5830                        }
5831                }
5832        }
5833
5834out:
5835        /* obj->btf_vmlinux and module BTFs are freed after object load */
5836        btf__free(obj->btf_vmlinux_override);
5837        obj->btf_vmlinux_override = NULL;
5838
5839        if (!IS_ERR_OR_NULL(cand_cache)) {
5840                hashmap__for_each_entry(cand_cache, entry, i) {
5841                        bpf_core_free_cands(entry->value);
5842                }
5843                hashmap__free(cand_cache);
5844        }
5845        return err;
5846}
5847
5848/* base map load ldimm64 special constant, used also for log fixup logic */
5849#define MAP_LDIMM64_POISON_BASE 2001000000
5850#define MAP_LDIMM64_POISON_PFX "200100"
5851
5852static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
5853                               int insn_idx, struct bpf_insn *insn,
5854                               int map_idx, const struct bpf_map *map)
5855{
5856        int i;
5857
5858        pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
5859                 prog->name, relo_idx, insn_idx, map_idx, map->name);
5860
5861        /* we turn single ldimm64 into two identical invalid calls */
5862        for (i = 0; i < 2; i++) {
5863                insn->code = BPF_JMP | BPF_CALL;
5864                insn->dst_reg = 0;
5865                insn->src_reg = 0;
5866                insn->off = 0;
5867                /* if this instruction is reachable (not a dead code),
5868                 * verifier will complain with something like:
5869                 * invalid func unknown#2001000123
5870                 * where lower 123 is map index into obj->maps[] array
5871                 */
5872                insn->imm = MAP_LDIMM64_POISON_BASE + map_idx;
5873
5874                insn++;
5875        }
5876}
5877
5878/* Relocate data references within program code:
5879 *  - map references;
5880 *  - global variable references;
5881 *  - extern references.
5882 */
5883static int
5884bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5885{
5886        int i;
5887
5888        for (i = 0; i < prog->nr_reloc; i++) {
5889                struct reloc_desc *relo = &prog->reloc_desc[i];
5890                struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5891                const struct bpf_map *map;
5892                struct extern_desc *ext;
5893
5894                switch (relo->type) {
5895                case RELO_LD64:
5896                        map = &obj->maps[relo->map_idx];
5897                        if (obj->gen_loader) {
5898                                insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5899                                insn[0].imm = relo->map_idx;
5900                        } else if (map->autocreate) {
5901                                insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5902                                insn[0].imm = map->fd;
5903                        } else {
5904                                poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5905                                                   relo->map_idx, map);
5906                        }
5907                        break;
5908                case RELO_DATA:
5909                        map = &obj->maps[relo->map_idx];
5910                        insn[1].imm = insn[0].imm + relo->sym_off;
5911                        if (obj->gen_loader) {
5912                                insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5913                                insn[0].imm = relo->map_idx;
5914                        } else if (map->autocreate) {
5915                                insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5916                                insn[0].imm = map->fd;
5917                        } else {
5918                                poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5919                                                   relo->map_idx, map);
5920                        }
5921                        break;
5922                case RELO_EXTERN_VAR:
5923                        ext = &obj->externs[relo->sym_off];
5924                        if (ext->type == EXT_KCFG) {
5925                                if (obj->gen_loader) {
5926                                        insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5927                                        insn[0].imm = obj->kconfig_map_idx;
5928                                } else {
5929                                        insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5930                                        insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5931                                }
5932                                insn[1].imm = ext->kcfg.data_off;
5933                        } else /* EXT_KSYM */ {
5934                                if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
5935                                        insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5936                                        insn[0].imm = ext->ksym.kernel_btf_id;
5937                                        insn[1].imm = ext->ksym.kernel_btf_obj_fd;
5938                                } else { /* typeless ksyms or unresolved typed ksyms */
5939                                        insn[0].imm = (__u32)ext->ksym.addr;
5940                                        insn[1].imm = ext->ksym.addr >> 32;
5941                                }
5942                        }
5943                        break;
5944                case RELO_EXTERN_FUNC:
5945                        ext = &obj->externs[relo->sym_off];
5946                        insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
5947                        if (ext->is_set) {
5948                                insn[0].imm = ext->ksym.kernel_btf_id;
5949                                insn[0].off = ext->ksym.btf_fd_idx;
5950                        } else { /* unresolved weak kfunc */
5951                                insn[0].imm = 0;
5952                                insn[0].off = 0;
5953                        }
5954                        break;
5955                case RELO_SUBPROG_ADDR:
5956                        if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
5957                                pr_warn("prog '%s': relo #%d: bad insn\n",
5958                                        prog->name, i);
5959                                return -EINVAL;
5960                        }
5961                        /* handled already */
5962                        break;
5963                case RELO_CALL:
5964                        /* handled already */
5965                        break;
5966                case RELO_CORE:
5967                        /* will be handled by bpf_program_record_relos() */
5968                        break;
5969                default:
5970                        pr_warn("prog '%s': relo #%d: bad relo type %d\n",
5971                                prog->name, i, relo->type);
5972                        return -EINVAL;
5973                }
5974        }
5975
5976        return 0;
5977}
5978
5979static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
5980                                    const struct bpf_program *prog,
5981                                    const struct btf_ext_info *ext_info,
5982                                    void **prog_info, __u32 *prog_rec_cnt,
5983                                    __u32 *prog_rec_sz)
5984{
5985        void *copy_start = NULL, *copy_end = NULL;
5986        void *rec, *rec_end, *new_prog_info;
5987        const struct btf_ext_info_sec *sec;
5988        size_t old_sz, new_sz;
5989        int i, sec_num, sec_idx, off_adj;
5990
5991        sec_num = 0;
5992        for_each_btf_ext_sec(ext_info, sec) {
5993                sec_idx = ext_info->sec_idxs[sec_num];
5994                sec_num++;
5995                if (prog->sec_idx != sec_idx)
5996                        continue;
5997
5998                for_each_btf_ext_rec(ext_info, sec, i, rec) {
5999                        __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6000
6001                        if (insn_off < prog->sec_insn_off)
6002                                continue;
6003                        if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6004                                break;
6005
6006                        if (!copy_start)
6007                                copy_start = rec;
6008                        copy_end = rec + ext_info->rec_size;
6009                }
6010
6011                if (!copy_start)
6012                        return -ENOENT;
6013
6014                /* append func/line info of a given (sub-)program to the main
6015                 * program func/line info
6016                 */
6017                old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6018                new_sz = old_sz + (copy_end - copy_start);
6019                new_prog_info = realloc(*prog_info, new_sz);
6020                if (!new_prog_info)
6021                        return -ENOMEM;
6022                *prog_info = new_prog_info;
6023                *prog_rec_cnt = new_sz / ext_info->rec_size;
6024                memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6025
6026                /* Kernel instruction offsets are in units of 8-byte
6027                 * instructions, while .BTF.ext instruction offsets generated
6028                 * by Clang are in units of bytes. So convert Clang offsets
6029                 * into kernel offsets and adjust offset according to program
6030                 * relocated position.
6031                 */
6032                off_adj = prog->sub_insn_off - prog->sec_insn_off;
6033                rec = new_prog_info + old_sz;
6034                rec_end = new_prog_info + new_sz;
6035                for (; rec < rec_end; rec += ext_info->rec_size) {
6036                        __u32 *insn_off = rec;
6037
6038                        *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6039                }
6040                *prog_rec_sz = ext_info->rec_size;
6041                return 0;
6042        }
6043
6044        return -ENOENT;
6045}
6046
6047static int
6048reloc_prog_func_and_line_info(const struct bpf_object *obj,
6049                              struct bpf_program *main_prog,
6050                              const struct bpf_program *prog)
6051{
6052        int err;
6053
6054        /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6055         * supprot func/line info
6056         */
6057        if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6058                return 0;
6059
6060        /* only attempt func info relocation if main program's func_info
6061         * relocation was successful
6062         */
6063        if (main_prog != prog && !main_prog->func_info)
6064                goto line_info;
6065
6066        err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6067                                       &main_prog->func_info,
6068                                       &main_prog->func_info_cnt,
6069                                       &main_prog->func_info_rec_size);
6070        if (err) {
6071                if (err != -ENOENT) {
6072                        pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6073                                prog->name, err);
6074                        return err;
6075                }
6076                if (main_prog->func_info) {
6077                        /*
6078                         * Some info has already been found but has problem
6079                         * in the last btf_ext reloc. Must have to error out.
6080                         */
6081                        pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6082                        return err;
6083                }
6084                /* Have problem loading the very first info. Ignore the rest. */
6085                pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6086                        prog->name);
6087        }
6088
6089line_info:
6090        /* don't relocate line info if main program's relocation failed */
6091        if (main_prog != prog && !main_prog->line_info)
6092                return 0;
6093
6094        err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6095                                       &main_prog->line_info,
6096                                       &main_prog->line_info_cnt,
6097                                       &main_prog->line_info_rec_size);
6098        if (err) {
6099                if (err != -ENOENT) {
6100                        pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6101                                prog->name, err);
6102                        return err;
6103                }
6104                if (main_prog->line_info) {
6105                        /*
6106                         * Some info has already been found but has problem
6107                         * in the last btf_ext reloc. Must have to error out.
6108                         */
6109                        pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6110                        return err;
6111                }
6112                /* Have problem loading the very first info. Ignore the rest. */
6113                pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6114                        prog->name);
6115        }
6116        return 0;
6117}
6118
6119static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6120{
6121        size_t insn_idx = *(const size_t *)key;
6122        const struct reloc_desc *relo = elem;
6123
6124        if (insn_idx == relo->insn_idx)
6125                return 0;
6126        return insn_idx < relo->insn_idx ? -1 : 1;
6127}
6128
6129static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6130{
6131        if (!prog->nr_reloc)
6132                return NULL;
6133        return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6134                       sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6135}
6136
6137static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6138{
6139        int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6140        struct reloc_desc *relos;
6141        int i;
6142
6143        if (main_prog == subprog)
6144                return 0;
6145        relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6146        if (!relos)
6147                return -ENOMEM;
6148        if (subprog->nr_reloc)
6149                memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6150                       sizeof(*relos) * subprog->nr_reloc);
6151
6152        for (i = main_prog->nr_reloc; i < new_cnt; i++)
6153                relos[i].insn_idx += subprog->sub_insn_off;
6154        /* After insn_idx adjustment the 'relos' array is still sorted
6155         * by insn_idx and doesn't break bsearch.
6156         */
6157        main_prog->reloc_desc = relos;
6158        main_prog->nr_reloc = new_cnt;
6159        return 0;
6160}
6161
6162static int
6163bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6164                       struct bpf_program *prog)
6165{
6166        size_t sub_insn_idx, insn_idx, new_cnt;
6167        struct bpf_program *subprog;
6168        struct bpf_insn *insns, *insn;
6169        struct reloc_desc *relo;
6170        int err;
6171
6172        err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6173        if (err)
6174                return err;
6175
6176        for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6177                insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6178                if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6179                        continue;
6180
6181                relo = find_prog_insn_relo(prog, insn_idx);
6182                if (relo && relo->type == RELO_EXTERN_FUNC)
6183                        /* kfunc relocations will be handled later
6184                         * in bpf_object__relocate_data()
6185                         */
6186                        continue;
6187                if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6188                        pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6189                                prog->name, insn_idx, relo->type);
6190                        return -LIBBPF_ERRNO__RELOC;
6191                }
6192                if (relo) {
6193                        /* sub-program instruction index is a combination of
6194                         * an offset of a symbol pointed to by relocation and
6195                         * call instruction's imm field; for global functions,
6196                         * call always has imm = -1, but for static functions
6197                         * relocation is against STT_SECTION and insn->imm
6198                         * points to a start of a static function
6199                         *
6200                         * for subprog addr relocation, the relo->sym_off + insn->imm is
6201                         * the byte offset in the corresponding section.
6202                         */
6203                        if (relo->type == RELO_CALL)
6204                                sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6205                        else
6206                                sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6207                } else if (insn_is_pseudo_func(insn)) {
6208                        /*
6209                         * RELO_SUBPROG_ADDR relo is always emitted even if both
6210                         * functions are in the same section, so it shouldn't reach here.
6211                         */
6212                        pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6213                                prog->name, insn_idx);
6214                        return -LIBBPF_ERRNO__RELOC;
6215                } else {
6216                        /* if subprogram call is to a static function within
6217                         * the same ELF section, there won't be any relocation
6218                         * emitted, but it also means there is no additional
6219                         * offset necessary, insns->imm is relative to
6220                         * instruction's original position within the section
6221                         */
6222                        sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6223                }
6224
6225                /* we enforce that sub-programs should be in .text section */
6226                subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6227                if (!subprog) {
6228                        pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6229                                prog->name);
6230                        return -LIBBPF_ERRNO__RELOC;
6231                }
6232
6233                /* if it's the first call instruction calling into this
6234                 * subprogram (meaning this subprog hasn't been processed
6235                 * yet) within the context of current main program:
6236                 *   - append it at the end of main program's instructions blog;
6237                 *   - process is recursively, while current program is put on hold;
6238                 *   - if that subprogram calls some other not yet processes
6239                 *   subprogram, same thing will happen recursively until
6240                 *   there are no more unprocesses subprograms left to append
6241                 *   and relocate.
6242                 */
6243                if (subprog->sub_insn_off == 0) {
6244                        subprog->sub_insn_off = main_prog->insns_cnt;
6245
6246                        new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6247                        insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6248                        if (!insns) {
6249                                pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6250                                return -ENOMEM;
6251                        }
6252                        main_prog->insns = insns;
6253                        main_prog->insns_cnt = new_cnt;
6254
6255                        memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6256                               subprog->insns_cnt * sizeof(*insns));
6257
6258                        pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6259                                 main_prog->name, subprog->insns_cnt, subprog->name);
6260
6261                        /* The subprog insns are now appended. Append its relos too. */
6262                        err = append_subprog_relos(main_prog, subprog);
6263                        if (err)
6264                                return err;
6265                        err = bpf_object__reloc_code(obj, main_prog, subprog);
6266                        if (err)
6267                                return err;
6268                }
6269
6270                /* main_prog->insns memory could have been re-allocated, so
6271                 * calculate pointer again
6272                 */
6273                insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6274                /* calculate correct instruction position within current main
6275                 * prog; each main prog can have a different set of
6276                 * subprograms appended (potentially in different order as
6277                 * well), so position of any subprog can be different for
6278                 * different main programs */
6279                insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6280
6281                pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6282                         prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6283        }
6284
6285        return 0;
6286}
6287
6288/*
6289 * Relocate sub-program calls.
6290 *
6291 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6292 * main prog) is processed separately. For each subprog (non-entry functions,
6293 * that can be called from either entry progs or other subprogs) gets their
6294 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6295 * hasn't been yet appended and relocated within current main prog. Once its
6296 * relocated, sub_insn_off will point at the position within current main prog
6297 * where given subprog was appended. This will further be used to relocate all
6298 * the call instructions jumping into this subprog.
6299 *
6300 * We start with main program and process all call instructions. If the call
6301 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6302 * is zero), subprog instructions are appended at the end of main program's
6303 * instruction array. Then main program is "put on hold" while we recursively
6304 * process newly appended subprogram. If that subprogram calls into another
6305 * subprogram that hasn't been appended, new subprogram is appended again to
6306 * the *main* prog's instructions (subprog's instructions are always left
6307 * untouched, as they need to be in unmodified state for subsequent main progs
6308 * and subprog instructions are always sent only as part of a main prog) and
6309 * the process continues recursively. Once all the subprogs called from a main
6310 * prog or any of its subprogs are appended (and relocated), all their
6311 * positions within finalized instructions array are known, so it's easy to
6312 * rewrite call instructions with correct relative offsets, corresponding to
6313 * desired target subprog.
6314 *
6315 * Its important to realize that some subprogs might not be called from some
6316 * main prog and any of its called/used subprogs. Those will keep their
6317 * subprog->sub_insn_off as zero at all times and won't be appended to current
6318 * main prog and won't be relocated within the context of current main prog.
6319 * They might still be used from other main progs later.
6320 *
6321 * Visually this process can be shown as below. Suppose we have two main
6322 * programs mainA and mainB and BPF object contains three subprogs: subA,
6323 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6324 * subC both call subB:
6325 *
6326 *        +--------+ +-------+
6327 *        |        v v       |
6328 *     +--+---+ +--+-+-+ +---+--+
6329 *     | subA | | subB | | subC |
6330 *     +--+---+ +------+ +---+--+
6331 *        ^                  ^
6332 *        |                  |
6333 *    +---+-------+   +------+----+
6334 *    |   mainA   |   |   mainB   |
6335 *    +-----------+   +-----------+
6336 *
6337 * We'll start relocating mainA, will find subA, append it and start
6338 * processing sub A recursively:
6339 *
6340 *    +-----------+------+
6341 *    |   mainA   | subA |
6342 *    +-----------+------+
6343 *
6344 * At this point we notice that subB is used from subA, so we append it and
6345 * relocate (there are no further subcalls from subB):
6346 *
6347 *    +-----------+------+------+
6348 *    |   mainA   | subA | subB |
6349 *    +-----------+------+------+
6350 *
6351 * At this point, we relocate subA calls, then go one level up and finish with
6352 * relocatin mainA calls. mainA is done.
6353 *
6354 * For mainB process is similar but results in different order. We start with
6355 * mainB and skip subA and subB, as mainB never calls them (at least
6356 * directly), but we see subC is needed, so we append and start processing it:
6357 *
6358 *    +-----------+------+
6359 *    |   mainB   | subC |
6360 *    +-----------+------+
6361 * Now we see subC needs subB, so we go back to it, append and relocate it:
6362 *
6363 *    +-----------+------+------+
6364 *    |   mainB   | subC | subB |
6365 *    +-----------+------+------+
6366 *
6367 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6368 */
6369static int
6370bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6371{
6372        struct bpf_program *subprog;
6373        int i, err;
6374
6375        /* mark all subprogs as not relocated (yet) within the context of
6376         * current main program
6377         */
6378        for (i = 0; i < obj->nr_programs; i++) {
6379                subprog = &obj->programs[i];
6380                if (!prog_is_subprog(obj, subprog))
6381                        continue;
6382
6383                subprog->sub_insn_off = 0;
6384        }
6385
6386        err = bpf_object__reloc_code(obj, prog, prog);
6387        if (err)
6388                return err;
6389
6390        return 0;
6391}
6392
6393static void
6394bpf_object__free_relocs(struct bpf_object *obj)
6395{
6396        struct bpf_program *prog;
6397        int i;
6398
6399        /* free up relocation descriptors */
6400        for (i = 0; i < obj->nr_programs; i++) {
6401                prog = &obj->programs[i];
6402                zfree(&prog->reloc_desc);
6403                prog->nr_reloc = 0;
6404        }
6405}
6406
6407static int cmp_relocs(const void *_a, const void *_b)
6408{
6409        const struct reloc_desc *a = _a;
6410        const struct reloc_desc *b = _b;
6411
6412        if (a->insn_idx != b->insn_idx)
6413                return a->insn_idx < b->insn_idx ? -1 : 1;
6414
6415        /* no two relocations should have the same insn_idx, but ... */
6416        if (a->type != b->type)
6417                return a->type < b->type ? -1 : 1;
6418
6419        return 0;
6420}
6421
6422static void bpf_object__sort_relos(struct bpf_object *obj)
6423{
6424        int i;
6425
6426        for (i = 0; i < obj->nr_programs; i++) {
6427                struct bpf_program *p = &obj->programs[i];
6428
6429                if (!p->nr_reloc)
6430                        continue;
6431
6432                qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6433        }
6434}
6435
6436static int
6437bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6438{
6439        struct bpf_program *prog;
6440        size_t i, j;
6441        int err;
6442
6443        if (obj->btf_ext) {
6444                err = bpf_object__relocate_core(obj, targ_btf_path);
6445                if (err) {
6446                        pr_warn("failed to perform CO-RE relocations: %d\n",
6447                                err);
6448                        return err;
6449                }
6450                bpf_object__sort_relos(obj);
6451        }
6452
6453        /* Before relocating calls pre-process relocations and mark
6454         * few ld_imm64 instructions that points to subprogs.
6455         * Otherwise bpf_object__reloc_code() later would have to consider
6456         * all ld_imm64 insns as relocation candidates. That would
6457         * reduce relocation speed, since amount of find_prog_insn_relo()
6458         * would increase and most of them will fail to find a relo.
6459         */
6460        for (i = 0; i < obj->nr_programs; i++) {
6461                prog = &obj->programs[i];
6462                for (j = 0; j < prog->nr_reloc; j++) {
6463                        struct reloc_desc *relo = &prog->reloc_desc[j];
6464                        struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6465
6466                        /* mark the insn, so it's recognized by insn_is_pseudo_func() */
6467                        if (relo->type == RELO_SUBPROG_ADDR)
6468                                insn[0].src_reg = BPF_PSEUDO_FUNC;
6469                }
6470        }
6471
6472        /* relocate subprogram calls and append used subprograms to main
6473         * programs; each copy of subprogram code needs to be relocated
6474         * differently for each main program, because its code location might
6475         * have changed.
6476         * Append subprog relos to main programs to allow data relos to be
6477         * processed after text is completely relocated.
6478         */
6479        for (i = 0; i < obj->nr_programs; i++) {
6480                prog = &obj->programs[i];
6481                /* sub-program's sub-calls are relocated within the context of
6482                 * its main program only
6483                 */
6484                if (prog_is_subprog(obj, prog))
6485                        continue;
6486                if (!prog->autoload)
6487                        continue;
6488
6489                err = bpf_object__relocate_calls(obj, prog);
6490                if (err) {
6491                        pr_warn("prog '%s': failed to relocate calls: %d\n",
6492                                prog->name, err);
6493                        return err;
6494                }
6495        }
6496        /* Process data relos for main programs */
6497        for (i = 0; i < obj->nr_programs; i++) {
6498                prog = &obj->programs[i];
6499                if (prog_is_subprog(obj, prog))
6500                        continue;
6501                if (!prog->autoload)
6502                        continue;
6503                err = bpf_object__relocate_data(obj, prog);
6504                if (err) {
6505                        pr_warn("prog '%s': failed to relocate data references: %d\n",
6506                                prog->name, err);
6507                        return err;
6508                }
6509        }
6510
6511        return 0;
6512}
6513
6514static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6515                                            Elf64_Shdr *shdr, Elf_Data *data);
6516
6517static int bpf_object__collect_map_relos(struct bpf_object *obj,
6518                                         Elf64_Shdr *shdr, Elf_Data *data)
6519{
6520        const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6521        int i, j, nrels, new_sz;
6522        const struct btf_var_secinfo *vi = NULL;
6523        const struct btf_type *sec, *var, *def;
6524        struct bpf_map *map = NULL, *targ_map = NULL;
6525        struct bpf_program *targ_prog = NULL;
6526        bool is_prog_array, is_map_in_map;
6527        const struct btf_member *member;
6528        const char *name, *mname, *type;
6529        unsigned int moff;
6530        Elf64_Sym *sym;
6531        Elf64_Rel *rel;
6532        void *tmp;
6533
6534        if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6535                return -EINVAL;
6536        sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6537        if (!sec)
6538                return -EINVAL;
6539
6540        nrels = shdr->sh_size / shdr->sh_entsize;
6541        for (i = 0; i < nrels; i++) {
6542                rel = elf_rel_by_idx(data, i);
6543                if (!rel) {
6544                        pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6545                        return -LIBBPF_ERRNO__FORMAT;
6546                }
6547
6548                sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
6549                if (!sym) {
6550                        pr_warn(".maps relo #%d: symbol %zx not found\n",
6551                                i, (size_t)ELF64_R_SYM(rel->r_info));
6552                        return -LIBBPF_ERRNO__FORMAT;
6553                }
6554                name = elf_sym_str(obj, sym->st_name) ?: "<?>";
6555
6556                pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
6557                         i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
6558                         (size_t)rel->r_offset, sym->st_name, name);
6559
6560                for (j = 0; j < obj->nr_maps; j++) {
6561                        map = &obj->maps[j];
6562                        if (map->sec_idx != obj->efile.btf_maps_shndx)
6563                                continue;
6564
6565                        vi = btf_var_secinfos(sec) + map->btf_var_idx;
6566                        if (vi->offset <= rel->r_offset &&
6567                            rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
6568                                break;
6569                }
6570                if (j == obj->nr_maps) {
6571                        pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
6572                                i, name, (size_t)rel->r_offset);
6573                        return -EINVAL;
6574                }
6575
6576                is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
6577                is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
6578                type = is_map_in_map ? "map" : "prog";
6579                if (is_map_in_map) {
6580                        if (sym->st_shndx != obj->efile.btf_maps_shndx) {
6581                                pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6582                                        i, name);
6583                                return -LIBBPF_ERRNO__RELOC;
6584                        }
6585                        if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6586                            map->def.key_size != sizeof(int)) {
6587                                pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6588                                        i, map->name, sizeof(int));
6589                                return -EINVAL;
6590                        }
6591                        targ_map = bpf_object__find_map_by_name(obj, name);
6592                        if (!targ_map) {
6593                                pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
6594                                        i, name);
6595                                return -ESRCH;
6596                        }
6597                } else if (is_prog_array) {
6598                        targ_prog = bpf_object__find_program_by_name(obj, name);
6599                        if (!targ_prog) {
6600                                pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
6601                                        i, name);
6602                                return -ESRCH;
6603                        }
6604                        if (targ_prog->sec_idx != sym->st_shndx ||
6605                            targ_prog->sec_insn_off * 8 != sym->st_value ||
6606                            prog_is_subprog(obj, targ_prog)) {
6607                                pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
6608                                        i, name);
6609                                return -LIBBPF_ERRNO__RELOC;
6610                        }
6611                } else {
6612                        return -EINVAL;
6613                }
6614
6615                var = btf__type_by_id(obj->btf, vi->type);
6616                def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6617                if (btf_vlen(def) == 0)
6618                        return -EINVAL;
6619                member = btf_members(def) + btf_vlen(def) - 1;
6620                mname = btf__name_by_offset(obj->btf, member->name_off);
6621                if (strcmp(mname, "values"))
6622                        return -EINVAL;
6623
6624                moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6625                if (rel->r_offset - vi->offset < moff)
6626                        return -EINVAL;
6627
6628                moff = rel->r_offset - vi->offset - moff;
6629                /* here we use BPF pointer size, which is always 64 bit, as we
6630                 * are parsing ELF that was built for BPF target
6631                 */
6632                if (moff % bpf_ptr_sz)
6633                        return -EINVAL;
6634                moff /= bpf_ptr_sz;
6635                if (moff >= map->init_slots_sz) {
6636                        new_sz = moff + 1;
6637                        tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
6638                        if (!tmp)
6639                                return -ENOMEM;
6640                        map->init_slots = tmp;
6641                        memset(map->init_slots + map->init_slots_sz, 0,
6642                               (new_sz - map->init_slots_sz) * host_ptr_sz);
6643                        map->init_slots_sz = new_sz;
6644                }
6645                map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
6646
6647                pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
6648                         i, map->name, moff, type, name);
6649        }
6650
6651        return 0;
6652}
6653
6654static int bpf_object__collect_relos(struct bpf_object *obj)
6655{
6656        int i, err;
6657
6658        for (i = 0; i < obj->efile.sec_cnt; i++) {
6659                struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
6660                Elf64_Shdr *shdr;
6661                Elf_Data *data;
6662                int idx;
6663
6664                if (sec_desc->sec_type != SEC_RELO)
6665                        continue;
6666
6667                shdr = sec_desc->shdr;
6668                data = sec_desc->data;
6669                idx = shdr->sh_info;
6670
6671                if (shdr->sh_type != SHT_REL) {
6672                        pr_warn("internal error at %d\n", __LINE__);
6673                        return -LIBBPF_ERRNO__INTERNAL;
6674                }
6675
6676                if (idx == obj->efile.st_ops_shndx)
6677                        err = bpf_object__collect_st_ops_relos(obj, shdr, data);
6678                else if (idx == obj->efile.btf_maps_shndx)
6679                        err = bpf_object__collect_map_relos(obj, shdr, data);
6680                else
6681                        err = bpf_object__collect_prog_relos(obj, shdr, data);
6682                if (err)
6683                        return err;
6684        }
6685
6686        bpf_object__sort_relos(obj);
6687        return 0;
6688}
6689
6690static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6691{
6692        if (BPF_CLASS(insn->code) == BPF_JMP &&
6693            BPF_OP(insn->code) == BPF_CALL &&
6694            BPF_SRC(insn->code) == BPF_K &&
6695            insn->src_reg == 0 &&
6696            insn->dst_reg == 0) {
6697                    *func_id = insn->imm;
6698                    return true;
6699        }
6700        return false;
6701}
6702
6703static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
6704{
6705        struct bpf_insn *insn = prog->insns;
6706        enum bpf_func_id func_id;
6707        int i;
6708
6709        if (obj->gen_loader)
6710                return 0;
6711
6712        for (i = 0; i < prog->insns_cnt; i++, insn++) {
6713                if (!insn_is_helper_call(insn, &func_id))
6714                        continue;
6715
6716                /* on kernels that don't yet support
6717                 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
6718                 * to bpf_probe_read() which works well for old kernels
6719                 */
6720                switch (func_id) {
6721                case BPF_FUNC_probe_read_kernel:
6722                case BPF_FUNC_probe_read_user:
6723                        if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6724                                insn->imm = BPF_FUNC_probe_read;
6725                        break;
6726                case BPF_FUNC_probe_read_kernel_str:
6727                case BPF_FUNC_probe_read_user_str:
6728                        if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
6729                                insn->imm = BPF_FUNC_probe_read_str;
6730                        break;
6731                default:
6732                        break;
6733                }
6734        }
6735        return 0;
6736}
6737
6738static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
6739                                     int *btf_obj_fd, int *btf_type_id);
6740
6741/* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
6742static int libbpf_prepare_prog_load(struct bpf_program *prog,
6743                                    struct bpf_prog_load_opts *opts, long cookie)
6744{
6745        enum sec_def_flags def = cookie;
6746
6747        /* old kernels might not support specifying expected_attach_type */
6748        if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
6749                opts->expected_attach_type = 0;
6750
6751        if (def & SEC_SLEEPABLE)
6752                opts->prog_flags |= BPF_F_SLEEPABLE;
6753
6754        if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
6755                opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
6756
6757        if (def & SEC_DEPRECATED) {
6758                pr_warn("SEC(\"%s\") is deprecated, please see https://github.com/libbpf/libbpf/wiki/Libbpf-1.0-migration-guide#bpf-program-sec-annotation-deprecations for details\n",
6759                        prog->sec_name);
6760        }
6761
6762        if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
6763                int btf_obj_fd = 0, btf_type_id = 0, err;
6764                const char *attach_name;
6765
6766                attach_name = strchr(prog->sec_name, '/');
6767                if (!attach_name) {
6768                        /* if BPF program is annotated with just SEC("fentry")
6769                         * (or similar) without declaratively specifying
6770                         * target, then it is expected that target will be
6771                         * specified with bpf_program__set_attach_target() at
6772                         * runtime before BPF object load step. If not, then
6773                         * there is nothing to load into the kernel as BPF
6774                         * verifier won't be able to validate BPF program
6775                         * correctness anyways.
6776                         */
6777                        pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
6778                                prog->name);
6779                        return -EINVAL;
6780                }
6781                attach_name++; /* skip over / */
6782
6783                err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
6784                if (err)
6785                        return err;
6786
6787                /* cache resolved BTF FD and BTF type ID in the prog */
6788                prog->attach_btf_obj_fd = btf_obj_fd;
6789                prog->attach_btf_id = btf_type_id;
6790
6791                /* but by now libbpf common logic is not utilizing
6792                 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
6793                 * this callback is called after opts were populated by
6794                 * libbpf, so this callback has to update opts explicitly here
6795                 */
6796                opts->attach_btf_obj_fd = btf_obj_fd;
6797                opts->attach_btf_id = btf_type_id;
6798        }
6799        return 0;
6800}
6801
6802static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
6803
6804static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog,
6805                                         struct bpf_insn *insns, int insns_cnt,
6806                                         const char *license, __u32 kern_version,
6807                                         int *prog_fd)
6808{
6809        LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
6810        const char *prog_name = NULL;
6811        char *cp, errmsg[STRERR_BUFSIZE];
6812        size_t log_buf_size = 0;
6813        char *log_buf = NULL, *tmp;
6814        int btf_fd, ret, err;
6815        bool own_log_buf = true;
6816        __u32 log_level = prog->log_level;
6817
6818        if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6819                /*
6820                 * The program type must be set.  Most likely we couldn't find a proper
6821                 * section definition at load time, and thus we didn't infer the type.
6822                 */
6823                pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6824                        prog->name, prog->sec_name);
6825                return -EINVAL;
6826        }
6827
6828        if (!insns || !insns_cnt)
6829                return -EINVAL;
6830
6831        load_attr.expected_attach_type = prog->expected_attach_type;
6832        if (kernel_supports(obj, FEAT_PROG_NAME))
6833                prog_name = prog->name;
6834        load_attr.attach_prog_fd = prog->attach_prog_fd;
6835        load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6836        load_attr.attach_btf_id = prog->attach_btf_id;
6837        load_attr.kern_version = kern_version;
6838        load_attr.prog_ifindex = prog->prog_ifindex;
6839
6840        /* specify func_info/line_info only if kernel supports them */
6841        btf_fd = bpf_object__btf_fd(obj);
6842        if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
6843                load_attr.prog_btf_fd = btf_fd;
6844                load_attr.func_info = prog->func_info;
6845                load_attr.func_info_rec_size = prog->func_info_rec_size;
6846                load_attr.func_info_cnt = prog->func_info_cnt;
6847                load_attr.line_info = prog->line_info;
6848                load_attr.line_info_rec_size = prog->line_info_rec_size;
6849                load_attr.line_info_cnt = prog->line_info_cnt;
6850        }
6851        load_attr.log_level = log_level;
6852        load_attr.prog_flags = prog->prog_flags;
6853        load_attr.fd_array = obj->fd_array;
6854
6855        /* adjust load_attr if sec_def provides custom preload callback */
6856        if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
6857                err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
6858                if (err < 0) {
6859                        pr_warn("prog '%s': failed to prepare load attributes: %d\n",
6860                                prog->name, err);
6861                        return err;
6862                }
6863                insns = prog->insns;
6864                insns_cnt = prog->insns_cnt;
6865        }
6866
6867        if (obj->gen_loader) {
6868                bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
6869                                   license, insns, insns_cnt, &load_attr,
6870                                   prog - obj->programs);
6871                *prog_fd = -1;
6872                return 0;
6873        }
6874
6875retry_load:
6876        /* if log_level is zero, we don't request logs initially even if
6877         * custom log_buf is specified; if the program load fails, then we'll
6878         * bump log_level to 1 and use either custom log_buf or we'll allocate
6879         * our own and retry the load to get details on what failed
6880         */
6881        if (log_level) {
6882                if (prog->log_buf) {
6883                        log_buf = prog->log_buf;
6884                        log_buf_size = prog->log_size;
6885                        own_log_buf = false;
6886                } else if (obj->log_buf) {
6887                        log_buf = obj->log_buf;
6888                        log_buf_size = obj->log_size;
6889                        own_log_buf = false;
6890                } else {
6891                        log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
6892                        tmp = realloc(log_buf, log_buf_size);
6893                        if (!tmp) {
6894                                ret = -ENOMEM;
6895                                goto out;
6896                        }
6897                        log_buf = tmp;
6898                        log_buf[0] = '\0';
6899                        own_log_buf = true;
6900                }
6901        }
6902
6903        load_attr.log_buf = log_buf;
6904        load_attr.log_size = log_buf_size;
6905        load_attr.log_level = log_level;
6906
6907        ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
6908        if (ret >= 0) {
6909                if (log_level && own_log_buf) {
6910                        pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
6911                                 prog->name, log_buf);
6912                }
6913
6914                if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
6915                        struct bpf_map *map;
6916                        int i;
6917
6918                        for (i = 0; i < obj->nr_maps; i++) {
6919                                map = &prog->obj->maps[i];
6920                                if (map->libbpf_type != LIBBPF_MAP_RODATA)
6921                                        continue;
6922
6923                                if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
6924                                        cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6925                                        pr_warn("prog '%s': failed to bind map '%s': %s\n",
6926                                                prog->name, map->real_name, cp);
6927                                        /* Don't fail hard if can't bind rodata. */
6928                                }
6929                        }
6930                }
6931
6932                *prog_fd = ret;
6933                ret = 0;
6934                goto out;
6935        }
6936
6937        if (log_level == 0) {
6938                log_level = 1;
6939                goto retry_load;
6940        }
6941        /* On ENOSPC, increase log buffer size and retry, unless custom
6942         * log_buf is specified.
6943         * Be careful to not overflow u32, though. Kernel's log buf size limit
6944         * isn't part of UAPI so it can always be bumped to full 4GB. So don't
6945         * multiply by 2 unless we are sure we'll fit within 32 bits.
6946         * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
6947         */
6948        if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
6949                goto retry_load;
6950
6951        ret = -errno;
6952
6953        /* post-process verifier log to improve error descriptions */
6954        fixup_verifier_log(prog, log_buf, log_buf_size);
6955
6956        cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6957        pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
6958        pr_perm_msg(ret);
6959
6960        if (own_log_buf && log_buf && log_buf[0] != '\0') {
6961                pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
6962                        prog->name, log_buf);
6963        }
6964
6965out:
6966        if (own_log_buf)
6967                free(log_buf);
6968        return ret;
6969}
6970
6971static char *find_prev_line(char *buf, char *cur)
6972{
6973        char *p;
6974
6975        if (cur == buf) /* end of a log buf */
6976                return NULL;
6977
6978        p = cur - 1;
6979        while (p - 1 >= buf && *(p - 1) != '\n')
6980                p--;
6981
6982        return p;
6983}
6984
6985static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
6986                      char *orig, size_t orig_sz, const char *patch)
6987{
6988        /* size of the remaining log content to the right from the to-be-replaced part */
6989        size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
6990        size_t patch_sz = strlen(patch);
6991
6992        if (patch_sz != orig_sz) {
6993                /* If patch line(s) are longer than original piece of verifier log,
6994                 * shift log contents by (patch_sz - orig_sz) bytes to the right
6995                 * starting from after to-be-replaced part of the log.
6996                 *
6997                 * If patch line(s) are shorter than original piece of verifier log,
6998                 * shift log contents by (orig_sz - patch_sz) bytes to the left
6999                 * starting from after to-be-replaced part of the log
7000                 *
7001                 * We need to be careful about not overflowing available
7002                 * buf_sz capacity. If that's the case, we'll truncate the end
7003                 * of the original log, as necessary.
7004                 */
7005                if (patch_sz > orig_sz) {
7006                        if (orig + patch_sz >= buf + buf_sz) {
7007                                /* patch is big enough to cover remaining space completely */
7008                                patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
7009                                rem_sz = 0;
7010                        } else if (patch_sz - orig_sz > buf_sz - log_sz) {
7011                                /* patch causes part of remaining log to be truncated */
7012                                rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
7013                        }
7014                }
7015                /* shift remaining log to the right by calculated amount */
7016                memmove(orig + patch_sz, orig + orig_sz, rem_sz);
7017        }
7018
7019        memcpy(orig, patch, patch_sz);
7020}
7021
7022static void fixup_log_failed_core_relo(struct bpf_program *prog,
7023                                       char *buf, size_t buf_sz, size_t log_sz,
7024                                       char *line1, char *line2, char *line3)
7025{
7026        /* Expected log for failed and not properly guarded CO-RE relocation:
7027         * line1 -> 123: (85) call unknown#195896080
7028         * line2 -> invalid func unknown#195896080
7029         * line3 -> <anything else or end of buffer>
7030         *
7031         * "123" is the index of the instruction that was poisoned. We extract
7032         * instruction index to find corresponding CO-RE relocation and
7033         * replace this part of the log with more relevant information about
7034         * failed CO-RE relocation.
7035         */
7036        const struct bpf_core_relo *relo;
7037        struct bpf_core_spec spec;
7038        char patch[512], spec_buf[256];
7039        int insn_idx, err, spec_len;
7040
7041        if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
7042                return;
7043
7044        relo = find_relo_core(prog, insn_idx);
7045        if (!relo)
7046                return;
7047
7048        err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7049        if (err)
7050                return;
7051
7052        spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
7053        snprintf(patch, sizeof(patch),
7054                 "%d: <invalid CO-RE relocation>\n"
7055                 "failed to resolve CO-RE relocation %s%s\n",
7056                 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
7057
7058        patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7059}
7060
7061static void fixup_log_missing_map_load(struct bpf_program *prog,
7062                                       char *buf, size_t buf_sz, size_t log_sz,
7063                                       char *line1, char *line2, char *line3)
7064{
7065        /* Expected log for failed and not properly guarded CO-RE relocation:
7066         * line1 -> 123: (85) call unknown#2001000345
7067         * line2 -> invalid func unknown#2001000345
7068         * line3 -> <anything else or end of buffer>
7069         *
7070         * "123" is the index of the instruction that was poisoned.
7071         * "345" in "2001000345" are map index in obj->maps to fetch map name.
7072         */
7073        struct bpf_object *obj = prog->obj;
7074        const struct bpf_map *map;
7075        int insn_idx, map_idx;
7076        char patch[128];
7077
7078        if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
7079                return;
7080
7081        map_idx -= MAP_LDIMM64_POISON_BASE;
7082        if (map_idx < 0 || map_idx >= obj->nr_maps)
7083                return;
7084        map = &obj->maps[map_idx];
7085
7086        snprintf(patch, sizeof(patch),
7087                 "%d: <invalid BPF map reference>\n"
7088                 "BPF map '%s' is referenced but wasn't created\n",
7089                 insn_idx, map->name);
7090
7091        patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7092}
7093
7094static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
7095{
7096        /* look for familiar error patterns in last N lines of the log */
7097        const size_t max_last_line_cnt = 10;
7098        char *prev_line, *cur_line, *next_line;
7099        size_t log_sz;
7100        int i;
7101
7102        if (!buf)
7103                return;
7104
7105        log_sz = strlen(buf) + 1;
7106        next_line = buf + log_sz - 1;
7107
7108        for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
7109                cur_line = find_prev_line(buf, next_line);
7110                if (!cur_line)
7111                        return;
7112
7113                /* failed CO-RE relocation case */
7114                if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
7115                        prev_line = find_prev_line(buf, cur_line);
7116                        if (!prev_line)
7117                                continue;
7118
7119                        fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
7120                                                   prev_line, cur_line, next_line);
7121                        return;
7122                } else if (str_has_pfx(cur_line, "invalid func unknown#"MAP_LDIMM64_POISON_PFX)) {
7123                        prev_line = find_prev_line(buf, cur_line);
7124                        if (!prev_line)
7125                                continue;
7126
7127                        fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
7128                                                   prev_line, cur_line, next_line);
7129                        return;
7130                }
7131        }
7132}
7133
7134static int bpf_program_record_relos(struct bpf_program *prog)
7135{
7136        struct bpf_object *obj = prog->obj;
7137        int i;
7138
7139        for (i = 0; i < prog->nr_reloc; i++) {
7140                struct reloc_desc *relo = &prog->reloc_desc[i];
7141                struct extern_desc *ext = &obj->externs[relo->sym_off];
7142
7143                switch (relo->type) {
7144                case RELO_EXTERN_VAR:
7145                        if (ext->type != EXT_KSYM)
7146                                continue;
7147                        bpf_gen__record_extern(obj->gen_loader, ext->name,
7148                                               ext->is_weak, !ext->ksym.type_id,
7149                                               BTF_KIND_VAR, relo->insn_idx);
7150                        break;
7151                case RELO_EXTERN_FUNC:
7152                        bpf_gen__record_extern(obj->gen_loader, ext->name,
7153                                               ext->is_weak, false, BTF_KIND_FUNC,
7154                                               relo->insn_idx);
7155                        break;
7156                case RELO_CORE: {
7157                        struct bpf_core_relo cr = {
7158                                .insn_off = relo->insn_idx * 8,
7159                                .type_id = relo->core_relo->type_id,
7160                                .access_str_off = relo->core_relo->access_str_off,
7161                                .kind = relo->core_relo->kind,
7162                        };
7163
7164                        bpf_gen__record_relo_core(obj->gen_loader, &cr);
7165                        break;
7166                }
7167                default:
7168                        continue;
7169                }
7170        }
7171        return 0;
7172}
7173
7174static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7175                                const char *license, __u32 kern_ver)
7176{
7177        int err = 0, fd, i;
7178
7179        if (obj->loaded) {
7180                pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
7181                return libbpf_err(-EINVAL);
7182        }
7183
7184        if (prog->instances.nr < 0 || !prog->instances.fds) {
7185                if (prog->preprocessor) {
7186                        pr_warn("Internal error: can't load program '%s'\n",
7187                                prog->name);
7188                        return libbpf_err(-LIBBPF_ERRNO__INTERNAL);
7189                }
7190
7191                prog->instances.fds = malloc(sizeof(int));
7192                if (!prog->instances.fds) {
7193                        pr_warn("Not enough memory for BPF fds\n");
7194                        return libbpf_err(-ENOMEM);
7195                }
7196                prog->instances.nr = 1;
7197                prog->instances.fds[0] = -1;
7198        }
7199
7200        if (!prog->preprocessor) {
7201                if (prog->instances.nr != 1) {
7202                        pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
7203                                prog->name, prog->instances.nr);
7204                }
7205                if (obj->gen_loader)
7206                        bpf_program_record_relos(prog);
7207                err = bpf_object_load_prog_instance(obj, prog,
7208                                                    prog->insns, prog->insns_cnt,
7209                                                    license, kern_ver, &fd);
7210                if (!err)
7211                        prog->instances.fds[0] = fd;
7212                goto out;
7213        }
7214
7215        for (i = 0; i < prog->instances.nr; i++) {
7216                struct bpf_prog_prep_result result;
7217                bpf_program_prep_t preprocessor = prog->preprocessor;
7218
7219                memset(&result, 0, sizeof(result));
7220                err = preprocessor(prog, i, prog->insns,
7221                                   prog->insns_cnt, &result);
7222                if (err) {
7223                        pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
7224                                i, prog->name);
7225                        goto out;
7226                }
7227
7228                if (!result.new_insn_ptr || !result.new_insn_cnt) {
7229                        pr_debug("Skip loading the %dth instance of program '%s'\n",
7230                                 i, prog->name);
7231                        prog->instances.fds[i] = -1;
7232                        if (result.pfd)
7233                                *result.pfd = -1;
7234                        continue;
7235                }
7236
7237                err = bpf_object_load_prog_instance(obj, prog,
7238                                                    result.new_insn_ptr, result.new_insn_cnt,
7239                                                    license, kern_ver, &fd);
7240                if (err) {
7241                        pr_warn("Loading the %dth instance of program '%s' failed\n",
7242                                i, prog->name);
7243                        goto out;
7244                }
7245
7246                if (result.pfd)
7247                        *result.pfd = fd;
7248                prog->instances.fds[i] = fd;
7249        }
7250out:
7251        if (err)
7252                pr_warn("failed to load program '%s'\n", prog->name);
7253        return libbpf_err(err);
7254}
7255
7256int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_ver)
7257{
7258        return bpf_object_load_prog(prog->obj, prog, license, kern_ver);
7259}
7260
7261static int
7262bpf_object__load_progs(struct bpf_object *obj, int log_level)
7263{
7264        struct bpf_program *prog;
7265        size_t i;
7266        int err;
7267
7268        for (i = 0; i < obj->nr_programs; i++) {
7269                prog = &obj->programs[i];
7270                err = bpf_object__sanitize_prog(obj, prog);
7271                if (err)
7272                        return err;
7273        }
7274
7275        for (i = 0; i < obj->nr_programs; i++) {
7276                prog = &obj->programs[i];
7277                if (prog_is_subprog(obj, prog))
7278                        continue;
7279                if (!prog->autoload) {
7280                        pr_debug("prog '%s': skipped loading\n", prog->name);
7281                        continue;
7282                }
7283                prog->log_level |= log_level;
7284                err = bpf_object_load_prog(obj, prog, obj->license, obj->kern_version);
7285                if (err)
7286                        return err;
7287        }
7288
7289        bpf_object__free_relocs(obj);
7290        return 0;
7291}
7292
7293static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7294
7295static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7296{
7297        struct bpf_program *prog;
7298        int err;
7299
7300        bpf_object__for_each_program(prog, obj) {
7301                prog->sec_def = find_sec_def(prog->sec_name);
7302                if (!prog->sec_def) {
7303                        /* couldn't guess, but user might manually specify */
7304                        pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7305                                prog->name, prog->sec_name);
7306                        continue;
7307                }
7308
7309                prog->type = prog->sec_def->prog_type;
7310                prog->expected_attach_type = prog->sec_def->expected_attach_type;
7311
7312#pragma GCC diagnostic push
7313#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
7314                if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
7315                    prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
7316                        prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
7317#pragma GCC diagnostic pop
7318
7319                /* sec_def can have custom callback which should be called
7320                 * after bpf_program is initialized to adjust its properties
7321                 */
7322                if (prog->sec_def->prog_setup_fn) {
7323                        err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
7324                        if (err < 0) {
7325                                pr_warn("prog '%s': failed to initialize: %d\n",
7326                                        prog->name, err);
7327                                return err;
7328                        }
7329                }
7330        }
7331
7332        return 0;
7333}
7334
7335static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7336                                          const struct bpf_object_open_opts *opts)
7337{
7338        const char *obj_name, *kconfig, *btf_tmp_path;
7339        struct bpf_object *obj;
7340        char tmp_name[64];
7341        int err;
7342        char *log_buf;
7343        size_t log_size;
7344        __u32 log_level;
7345
7346        if (elf_version(EV_CURRENT) == EV_NONE) {
7347                pr_warn("failed to init libelf for %s\n",
7348                        path ? : "(mem buf)");
7349                return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7350        }
7351
7352        if (!OPTS_VALID(opts, bpf_object_open_opts))
7353                return ERR_PTR(-EINVAL);
7354
7355        obj_name = OPTS_GET(opts, object_name, NULL);
7356        if (obj_buf) {
7357                if (!obj_name) {
7358                        snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7359                                 (unsigned long)obj_buf,
7360                                 (unsigned long)obj_buf_sz);
7361                        obj_name = tmp_name;
7362                }
7363                path = obj_name;
7364                pr_debug("loading object '%s' from buffer\n", obj_name);
7365        }
7366
7367        log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7368        log_size = OPTS_GET(opts, kernel_log_size, 0);
7369        log_level = OPTS_GET(opts, kernel_log_level, 0);
7370        if (log_size > UINT_MAX)
7371                return ERR_PTR(-EINVAL);
7372        if (log_size && !log_buf)
7373                return ERR_PTR(-EINVAL);
7374
7375        obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7376        if (IS_ERR(obj))
7377                return obj;
7378
7379        obj->log_buf = log_buf;
7380        obj->log_size = log_size;
7381        obj->log_level = log_level;
7382
7383        btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7384        if (btf_tmp_path) {
7385                if (strlen(btf_tmp_path) >= PATH_MAX) {
7386                        err = -ENAMETOOLONG;
7387                        goto out;
7388                }
7389                obj->btf_custom_path = strdup(btf_tmp_path);
7390                if (!obj->btf_custom_path) {
7391                        err = -ENOMEM;
7392                        goto out;
7393                }
7394        }
7395
7396        kconfig = OPTS_GET(opts, kconfig, NULL);
7397        if (kconfig) {
7398                obj->kconfig = strdup(kconfig);
7399                if (!obj->kconfig) {
7400                        err = -ENOMEM;
7401                        goto out;
7402                }
7403        }
7404
7405        err = bpf_object__elf_init(obj);
7406        err = err ? : bpf_object__check_endianness(obj);
7407        err = err ? : bpf_object__elf_collect(obj);
7408        err = err ? : bpf_object__collect_externs(obj);
7409        err = err ? : bpf_object__finalize_btf(obj);
7410        err = err ? : bpf_object__init_maps(obj, opts);
7411        err = err ? : bpf_object_init_progs(obj, opts);
7412        err = err ? : bpf_object__collect_relos(obj);
7413        if (err)
7414                goto out;
7415
7416        bpf_object__elf_finish(obj);
7417
7418        return obj;
7419out:
7420        bpf_object__close(obj);
7421        return ERR_PTR(err);
7422}
7423
7424static struct bpf_object *
7425__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
7426{
7427        DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7428                .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7429        );
7430
7431        /* param validation */
7432        if (!attr->file)
7433                return NULL;
7434
7435        pr_debug("loading %s\n", attr->file);
7436        return bpf_object_open(attr->file, NULL, 0, &opts);
7437}
7438
7439struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7440{
7441        return libbpf_ptr(__bpf_object__open_xattr(attr, 0));
7442}
7443
7444struct bpf_object *bpf_object__open(const char *path)
7445{
7446        struct bpf_object_open_attr attr = {
7447                .file           = path,
7448                .prog_type      = BPF_PROG_TYPE_UNSPEC,
7449        };
7450
7451        return libbpf_ptr(__bpf_object__open_xattr(&attr, 0));
7452}
7453
7454struct bpf_object *
7455bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7456{
7457        if (!path)
7458                return libbpf_err_ptr(-EINVAL);
7459
7460        pr_debug("loading %s\n", path);
7461
7462        return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
7463}
7464
7465struct bpf_object *
7466bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7467                     const struct bpf_object_open_opts *opts)
7468{
7469        if (!obj_buf || obj_buf_sz == 0)
7470                return libbpf_err_ptr(-EINVAL);
7471
7472        return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
7473}
7474
7475struct bpf_object *
7476bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7477                        const char *name)
7478{
7479        DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
7480                .object_name = name,
7481                /* wrong default, but backwards-compatible */
7482                .relaxed_maps = true,
7483        );
7484
7485        /* returning NULL is wrong, but backwards-compatible */
7486        if (!obj_buf || obj_buf_sz == 0)
7487                return errno = EINVAL, NULL;
7488
7489        return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts));
7490}
7491
7492static int bpf_object_unload(struct bpf_object *obj)
7493{
7494        size_t i;
7495
7496        if (!obj)
7497                return libbpf_err(-EINVAL);
7498
7499        for (i = 0; i < obj->nr_maps; i++) {
7500                zclose(obj->maps[i].fd);
7501                if (obj->maps[i].st_ops)
7502                        zfree(&obj->maps[i].st_ops->kern_vdata);
7503        }
7504
7505        for (i = 0; i < obj->nr_programs; i++)
7506                bpf_program__unload(&obj->programs[i]);
7507
7508        return 0;
7509}
7510
7511int bpf_object__unload(struct bpf_object *obj) __attribute__((alias("bpf_object_unload")));
7512
7513static int bpf_object__sanitize_maps(struct bpf_object *obj)
7514{
7515        struct bpf_map *m;
7516
7517        bpf_object__for_each_map(m, obj) {
7518                if (!bpf_map__is_internal(m))
7519                        continue;
7520                if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
7521                        m->def.map_flags ^= BPF_F_MMAPABLE;
7522        }
7523
7524        return 0;
7525}
7526
7527int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
7528{
7529        char sym_type, sym_name[500];
7530        unsigned long long sym_addr;
7531        int ret, err = 0;
7532        FILE *f;
7533
7534        f = fopen("/proc/kallsyms", "r");
7535        if (!f) {
7536                err = -errno;
7537                pr_warn("failed to open /proc/kallsyms: %d\n", err);
7538                return err;
7539        }
7540
7541        while (true) {
7542                ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7543                             &sym_addr, &sym_type, sym_name);
7544                if (ret == EOF && feof(f))
7545                        break;
7546                if (ret != 3) {
7547                        pr_warn("failed to read kallsyms entry: %d\n", ret);
7548                        err = -EINVAL;
7549                        break;
7550                }
7551
7552                err = cb(sym_addr, sym_type, sym_name, ctx);
7553                if (err)
7554                        break;
7555        }
7556
7557        fclose(f);
7558        return err;
7559}
7560
7561static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
7562                       const char *sym_name, void *ctx)
7563{
7564        struct bpf_object *obj = ctx;
7565        const struct btf_type *t;
7566        struct extern_desc *ext;
7567
7568        ext = find_extern_by_name(obj, sym_name);
7569        if (!ext || ext->type != EXT_KSYM)
7570                return 0;
7571
7572        t = btf__type_by_id(obj->btf, ext->btf_id);
7573        if (!btf_is_var(t))
7574                return 0;
7575
7576        if (ext->is_set && ext->ksym.addr != sym_addr) {
7577                pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7578                        sym_name, ext->ksym.addr, sym_addr);
7579                return -EINVAL;
7580        }
7581        if (!ext->is_set) {
7582                ext->is_set = true;
7583                ext->ksym.addr = sym_addr;
7584                pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7585        }
7586        return 0;
7587}
7588
7589static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7590{
7591        return libbpf_kallsyms_parse(kallsyms_cb, obj);
7592}
7593
7594static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
7595                            __u16 kind, struct btf **res_btf,
7596                            struct module_btf **res_mod_btf)
7597{
7598        struct module_btf *mod_btf;
7599        struct btf *btf;
7600        int i, id, err;
7601
7602        btf = obj->btf_vmlinux;
7603        mod_btf = NULL;
7604        id = btf__find_by_name_kind(btf, ksym_name, kind);
7605
7606        if (id == -ENOENT) {
7607                err = load_module_btfs(obj);
7608                if (err)
7609                        return err;
7610
7611                for (i = 0; i < obj->btf_module_cnt; i++) {
7612                        /* we assume module_btf's BTF FD is always >0 */
7613                        mod_btf = &obj->btf_modules[i];
7614                        btf = mod_btf->btf;
7615                        id = btf__find_by_name_kind_own(btf, ksym_name, kind);
7616                        if (id != -ENOENT)
7617                                break;
7618                }
7619        }
7620        if (id <= 0)
7621                return -ESRCH;
7622
7623        *res_btf = btf;
7624        *res_mod_btf = mod_btf;
7625        return id;
7626}
7627
7628static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
7629                                               struct extern_desc *ext)
7630{
7631        const struct btf_type *targ_var, *targ_type;
7632        __u32 targ_type_id, local_type_id;
7633        struct module_btf *mod_btf = NULL;
7634        const char *targ_var_name;
7635        struct btf *btf = NULL;
7636        int id, err;
7637
7638        id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
7639        if (id < 0) {
7640                if (id == -ESRCH && ext->is_weak)
7641                        return 0;
7642                pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
7643                        ext->name);
7644                return id;
7645        }
7646
7647        /* find local type_id */
7648        local_type_id = ext->ksym.type_id;
7649
7650        /* find target type_id */
7651        targ_var = btf__type_by_id(btf, id);
7652        targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7653        targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
7654
7655        err = bpf_core_types_are_compat(obj->btf, local_type_id,
7656                                        btf, targ_type_id);
7657        if (err <= 0) {
7658                const struct btf_type *local_type;
7659                const char *targ_name, *local_name;
7660
7661                local_type = btf__type_by_id(obj->btf, local_type_id);
7662                local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7663                targ_name = btf__name_by_offset(btf, targ_type->name_off);
7664
7665                pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7666                        ext->name, local_type_id,
7667                        btf_kind_str(local_type), local_name, targ_type_id,
7668                        btf_kind_str(targ_type), targ_name);
7669                return -EINVAL;
7670        }
7671
7672        ext->is_set = true;
7673        ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
7674        ext->ksym.kernel_btf_id = id;
7675        pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
7676                 ext->name, id, btf_kind_str(targ_var), targ_var_name);
7677
7678        return 0;
7679}
7680
7681static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
7682                                                struct extern_desc *ext)
7683{
7684        int local_func_proto_id, kfunc_proto_id, kfunc_id;
7685        struct module_btf *mod_btf = NULL;
7686        const struct btf_type *kern_func;
7687        struct btf *kern_btf = NULL;
7688        int ret;
7689
7690        local_func_proto_id = ext->ksym.type_id;
7691
7692        kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
7693        if (kfunc_id < 0) {
7694                if (kfunc_id == -ESRCH && ext->is_weak)
7695                        return 0;
7696                pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
7697                        ext->name);
7698                return kfunc_id;
7699        }
7700
7701        kern_func = btf__type_by_id(kern_btf, kfunc_id);
7702        kfunc_proto_id = kern_func->type;
7703
7704        ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
7705                                        kern_btf, kfunc_proto_id);
7706        if (ret <= 0) {
7707                pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n",
7708                        ext->name, local_func_proto_id, kfunc_proto_id);
7709                return -EINVAL;
7710        }
7711
7712        /* set index for module BTF fd in fd_array, if unset */
7713        if (mod_btf && !mod_btf->fd_array_idx) {
7714                /* insn->off is s16 */
7715                if (obj->fd_array_cnt == INT16_MAX) {
7716                        pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
7717                                ext->name, mod_btf->fd_array_idx);
7718                        return -E2BIG;
7719                }
7720                /* Cannot use index 0 for module BTF fd */
7721                if (!obj->fd_array_cnt)
7722                        obj->fd_array_cnt = 1;
7723
7724                ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
7725                                        obj->fd_array_cnt + 1);
7726                if (ret)
7727                        return ret;
7728                mod_btf->fd_array_idx = obj->fd_array_cnt;
7729                /* we assume module BTF FD is always >0 */
7730                obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
7731        }
7732
7733        ext->is_set = true;
7734        ext->ksym.kernel_btf_id = kfunc_id;
7735        ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
7736        pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
7737                 ext->name, kfunc_id);
7738
7739        return 0;
7740}
7741
7742static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7743{
7744        const struct btf_type *t;
7745        struct extern_desc *ext;
7746        int i, err;
7747
7748        for (i = 0; i < obj->nr_extern; i++) {
7749                ext = &obj->externs[i];
7750                if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7751                        continue;
7752
7753                if (obj->gen_loader) {
7754                        ext->is_set = true;
7755                        ext->ksym.kernel_btf_obj_fd = 0;
7756                        ext->ksym.kernel_btf_id = 0;
7757                        continue;
7758                }
7759                t = btf__type_by_id(obj->btf, ext->btf_id);
7760                if (btf_is_var(t))
7761                        err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
7762                else
7763                        err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
7764                if (err)
7765                        return err;
7766        }
7767        return 0;
7768}
7769
7770static int bpf_object__resolve_externs(struct bpf_object *obj,
7771                                       const char *extra_kconfig)
7772{
7773        bool need_config = false, need_kallsyms = false;
7774        bool need_vmlinux_btf = false;
7775        struct extern_desc *ext;
7776        void *kcfg_data = NULL;
7777        int err, i;
7778
7779        if (obj->nr_extern == 0)
7780                return 0;
7781
7782        if (obj->kconfig_map_idx >= 0)
7783                kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
7784
7785        for (i = 0; i < obj->nr_extern; i++) {
7786                ext = &obj->externs[i];
7787
7788                if (ext->type == EXT_KCFG &&
7789                    strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7790                        void *ext_val = kcfg_data + ext->kcfg.data_off;
7791                        __u32 kver = get_kernel_version();
7792
7793                        if (!kver) {
7794                                pr_warn("failed to get kernel version\n");
7795                                return -EINVAL;
7796                        }
7797                        err = set_kcfg_value_num(ext, ext_val, kver);
7798                        if (err)
7799                                return err;
7800                        pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7801                } else if (ext->type == EXT_KCFG && str_has_pfx(ext->name, "CONFIG_")) {
7802                        need_config = true;
7803                } else if (ext->type == EXT_KSYM) {
7804                        if (ext->ksym.type_id)
7805                                need_vmlinux_btf = true;
7806                        else
7807                                need_kallsyms = true;
7808                } else {
7809                        pr_warn("unrecognized extern '%s'\n", ext->name);
7810                        return -EINVAL;
7811                }
7812        }
7813        if (need_config && extra_kconfig) {
7814                err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
7815                if (err)
7816                        return -EINVAL;
7817                need_config = false;
7818                for (i = 0; i < obj->nr_extern; i++) {
7819                        ext = &obj->externs[i];
7820                        if (ext->type == EXT_KCFG && !ext->is_set) {
7821                                need_config = true;
7822                                break;
7823                        }
7824                }
7825        }
7826        if (need_config) {
7827                err = bpf_object__read_kconfig_file(obj, kcfg_data);
7828                if (err)
7829                        return -EINVAL;
7830        }
7831        if (need_kallsyms) {
7832                err = bpf_object__read_kallsyms_file(obj);
7833                if (err)
7834                        return -EINVAL;
7835        }
7836        if (need_vmlinux_btf) {
7837                err = bpf_object__resolve_ksyms_btf_id(obj);
7838                if (err)
7839                        return -EINVAL;
7840        }
7841        for (i = 0; i < obj->nr_extern; i++) {
7842                ext = &obj->externs[i];
7843
7844                if (!ext->is_set && !ext->is_weak) {
7845                        pr_warn("extern %s (strong) not resolved\n", ext->name);
7846                        return -ESRCH;
7847                } else if (!ext->is_set) {
7848                        pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7849                                 ext->name);
7850                }
7851        }
7852
7853        return 0;
7854}
7855
7856static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
7857{
7858        int err, i;
7859
7860        if (!obj)
7861                return libbpf_err(-EINVAL);
7862
7863        if (obj->loaded) {
7864                pr_warn("object '%s': load can't be attempted twice\n", obj->name);
7865                return libbpf_err(-EINVAL);
7866        }
7867
7868        if (obj->gen_loader)
7869                bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
7870
7871        err = bpf_object__probe_loading(obj);
7872        err = err ? : bpf_object__load_vmlinux_btf(obj, false);
7873        err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
7874        err = err ? : bpf_object__sanitize_and_load_btf(obj);
7875        err = err ? : bpf_object__sanitize_maps(obj);
7876        err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
7877        err = err ? : bpf_object__create_maps(obj);
7878        err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
7879        err = err ? : bpf_object__load_progs(obj, extra_log_level);
7880        err = err ? : bpf_object_init_prog_arrays(obj);
7881
7882        if (obj->gen_loader) {
7883                /* reset FDs */
7884                if (obj->btf)
7885                        btf__set_fd(obj->btf, -1);
7886                for (i = 0; i < obj->nr_maps; i++)
7887                        obj->maps[i].fd = -1;
7888                if (!err)
7889                        err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
7890        }
7891
7892        /* clean up fd_array */
7893        zfree(&obj->fd_array);
7894
7895        /* clean up module BTFs */
7896        for (i = 0; i < obj->btf_module_cnt; i++) {
7897                close(obj->btf_modules[i].fd);
7898                btf__free(obj->btf_modules[i].btf);
7899                free(obj->btf_modules[i].name);
7900        }
7901        free(obj->btf_modules);
7902
7903        /* clean up vmlinux BTF */
7904        btf__free(obj->btf_vmlinux);
7905        obj->btf_vmlinux = NULL;
7906
7907        obj->loaded = true; /* doesn't matter if successfully or not */
7908
7909        if (err)
7910                goto out;
7911
7912        return 0;
7913out:
7914        /* unpin any maps that were auto-pinned during load */
7915        for (i = 0; i < obj->nr_maps; i++)
7916                if (obj->maps[i].pinned && !obj->maps[i].reused)
7917                        bpf_map__unpin(&obj->maps[i], NULL);
7918
7919        bpf_object_unload(obj);
7920        pr_warn("failed to load object '%s'\n", obj->path);
7921        return libbpf_err(err);
7922}
7923
7924int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
7925{
7926        return bpf_object_load(attr->obj, attr->log_level, attr->target_btf_path);
7927}
7928
7929int bpf_object__load(struct bpf_object *obj)
7930{
7931        return bpf_object_load(obj, 0, NULL);
7932}
7933
7934static int make_parent_dir(const char *path)
7935{
7936        char *cp, errmsg[STRERR_BUFSIZE];
7937        char *dname, *dir;
7938        int err = 0;
7939
7940        dname = strdup(path);
7941        if (dname == NULL)
7942                return -ENOMEM;
7943
7944        dir = dirname(dname);
7945        if (mkdir(dir, 0700) && errno != EEXIST)
7946                err = -errno;
7947
7948        free(dname);
7949        if (err) {
7950                cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7951                pr_warn("failed to mkdir %s: %s\n", path, cp);
7952        }
7953        return err;
7954}
7955
7956static int check_path(const char *path)
7957{
7958        char *cp, errmsg[STRERR_BUFSIZE];
7959        struct statfs st_fs;
7960        char *dname, *dir;
7961        int err = 0;
7962
7963        if (path == NULL)
7964                return -EINVAL;
7965
7966        dname = strdup(path);
7967        if (dname == NULL)
7968                return -ENOMEM;
7969
7970        dir = dirname(dname);
7971        if (statfs(dir, &st_fs)) {
7972                cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7973                pr_warn("failed to statfs %s: %s\n", dir, cp);
7974                err = -errno;
7975        }
7976        free(dname);
7977
7978        if (!err && st_fs.f_type != BPF_FS_MAGIC) {
7979                pr_warn("specified path %s is not on BPF FS\n", path);
7980                err = -EINVAL;
7981        }
7982
7983        return err;
7984}
7985
7986static int bpf_program_pin_instance(struct bpf_program *prog, const char *path, int instance)
7987{
7988        char *cp, errmsg[STRERR_BUFSIZE];
7989        int err;
7990
7991        err = make_parent_dir(path);
7992        if (err)
7993                return libbpf_err(err);
7994
7995        err = check_path(path);
7996        if (err)
7997                return libbpf_err(err);
7998
7999        if (prog == NULL) {
8000                pr_warn("invalid program pointer\n");
8001                return libbpf_err(-EINVAL);
8002        }
8003
8004        if (instance < 0 || instance >= prog->instances.nr) {
8005                pr_warn("invalid prog instance %d of prog %s (max %d)\n",
8006                        instance, prog->name, prog->instances.nr);
8007                return libbpf_err(-EINVAL);
8008        }
8009
8010        if (bpf_obj_pin(prog->instances.fds[instance], path)) {
8011                err = -errno;
8012                cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
8013                pr_warn("failed to pin program: %s\n", cp);
8014                return libbpf_err(err);
8015        }
8016        pr_debug("pinned program '%s'\n", path);
8017
8018        return 0;
8019}
8020
8021static int bpf_program_unpin_instance(struct bpf_program *prog, const char *path, int instance)
8022{
8023        int err;
8024
8025        err = check_path(path);
8026        if (err)
8027                return libbpf_err(err);
8028
8029        if (prog == NULL) {
8030                pr_warn("invalid program pointer\n");
8031                return libbpf_err(-EINVAL);
8032        }
8033
8034        if (instance < 0 || instance >= prog->instances.nr) {
8035                pr_warn("invalid prog instance %d of prog %s (max %d)\n",
8036                        instance, prog->name, prog->instances.nr);
8037                return libbpf_err(-EINVAL);
8038        }
8039
8040        err = unlink(path);
8041        if (err != 0)
8042                return libbpf_err(-errno);
8043
8044        pr_debug("unpinned program '%s'\n", path);
8045
8046        return 0;
8047}
8048
8049__attribute__((alias("bpf_program_pin_instance")))
8050int bpf_object__pin_instance(struct bpf_program *prog, const char *path, int instance);
8051
8052__attribute__((alias("bpf_program_unpin_instance")))
8053int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance);
8054
8055int bpf_program__pin(struct bpf_program *prog, const char *path)
8056{
8057        int i, err;
8058
8059        err = make_parent_dir(path);
8060        if (err)
8061                return libbpf_err(err);
8062
8063        err = check_path(path);
8064        if (err)
8065                return libbpf_err(err);
8066
8067        if (prog == NULL) {
8068                pr_warn("invalid program pointer\n");
8069                return libbpf_err(-EINVAL);
8070        }
8071
8072        if (prog->instances.nr <= 0) {
8073                pr_warn("no instances of prog %s to pin\n", prog->name);
8074                return libbpf_err(-EINVAL);
8075        }
8076
8077        if (prog->instances.nr == 1) {
8078                /* don't create subdirs when pinning single instance */
8079                return bpf_program_pin_instance(prog, path, 0);
8080        }
8081
8082        for (i = 0; i < prog->instances.nr; i++) {
8083                char buf[PATH_MAX];
8084                int len;
8085
8086                len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
8087                if (len < 0) {
8088                        err = -EINVAL;
8089                        goto err_unpin;
8090                } else if (len >= PATH_MAX) {
8091                        err = -ENAMETOOLONG;
8092                        goto err_unpin;
8093                }
8094
8095                err = bpf_program_pin_instance(prog, buf, i);
8096                if (err)
8097                        goto err_unpin;
8098        }
8099
8100        return 0;
8101
8102err_unpin:
8103        for (i = i - 1; i >= 0; i--) {
8104                char buf[PATH_MAX];
8105                int len;
8106
8107                len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
8108                if (len < 0)
8109                        continue;
8110                else if (len >= PATH_MAX)
8111                        continue;
8112
8113                bpf_program_unpin_instance(prog, buf, i);
8114        }
8115
8116        rmdir(path);
8117
8118        return libbpf_err(err);
8119}
8120
8121int bpf_program__unpin(struct bpf_program *prog, const char *path)
8122{
8123        int i, err;
8124
8125        err = check_path(path);
8126        if (err)
8127                return libbpf_err(err);
8128
8129        if (prog == NULL) {
8130                pr_warn("invalid program pointer\n");
8131                return libbpf_err(-EINVAL);
8132        }
8133
8134        if (prog->instances.nr <= 0) {
8135                pr_warn("no instances of prog %s to pin\n", prog->name);
8136                return libbpf_err(-EINVAL);
8137        }
8138
8139        if (prog->instances.nr == 1) {
8140                /* don't create subdirs when pinning single instance */
8141                return bpf_program_unpin_instance(prog, path, 0);
8142        }
8143
8144        for (i = 0; i < prog->instances.nr; i++) {
8145                char buf[PATH_MAX];
8146                int len;
8147
8148                len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
8149                if (len < 0)
8150                        return libbpf_err(-EINVAL);
8151                else if (len >= PATH_MAX)
8152                        return libbpf_err(-ENAMETOOLONG);
8153
8154                err = bpf_program_unpin_instance(prog, buf, i);
8155                if (err)
8156                        return err;
8157        }
8158
8159        err = rmdir(path);
8160        if (err)
8161                return libbpf_err(-errno);
8162
8163        return 0;
8164}
8165
8166int bpf_map__pin(struct bpf_map *map, const char *path)
8167{
8168        char *cp, errmsg[STRERR_BUFSIZE];
8169        int err;
8170
8171        if (map == NULL) {
8172                pr_warn("invalid map pointer\n");
8173                return libbpf_err(-EINVAL);
8174        }
8175
8176        if (map->pin_path) {
8177                if (path && strcmp(path, map->pin_path)) {
8178                        pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8179                                bpf_map__name(map), map->pin_path, path);
8180                        return libbpf_err(-EINVAL);
8181                } else if (map->pinned) {
8182                        pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8183                                 bpf_map__name(map), map->pin_path);
8184                        return 0;
8185                }
8186        } else {
8187                if (!path) {
8188                        pr_warn("missing a path to pin map '%s' at\n",
8189                                bpf_map__name(map));
8190                        return libbpf_err(-EINVAL);
8191                } else if (map->pinned) {
8192                        pr_warn("map '%s' already pinned\n", bpf_map__name(map));
8193                        return libbpf_err(-EEXIST);
8194                }
8195
8196                map->pin_path = strdup(path);
8197                if (!map->pin_path) {
8198                        err = -errno;
8199                        goto out_err;
8200                }
8201        }
8202
8203        err = make_parent_dir(map->pin_path);
8204        if (err)
8205                return libbpf_err(err);
8206
8207        err = check_path(map->pin_path);
8208        if (err)
8209                return libbpf_err(err);
8210
8211        if (bpf_obj_pin(map->fd, map->pin_path)) {
8212                err = -errno;
8213                goto out_err;
8214        }
8215
8216        map->pinned = true;
8217        pr_debug("pinned map '%s'\n", map->pin_path);
8218
8219        return 0;
8220
8221out_err:
8222        cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8223        pr_warn("failed to pin map: %s\n", cp);
8224        return libbpf_err(err);
8225}
8226
8227int bpf_map__unpin(struct bpf_map *map, const char *path)
8228{
8229        int err;
8230
8231        if (map == NULL) {
8232                pr_warn("invalid map pointer\n");
8233                return libbpf_err(-EINVAL);
8234        }
8235
8236        if (map->pin_path) {
8237                if (path && strcmp(path, map->pin_path)) {
8238                        pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8239                                bpf_map__name(map), map->pin_path, path);
8240                        return libbpf_err(-EINVAL);
8241                }
8242                path = map->pin_path;
8243        } else if (!path) {
8244                pr_warn("no path to unpin map '%s' from\n",
8245                        bpf_map__name(map));
8246                return libbpf_err(-EINVAL);
8247        }
8248
8249        err = check_path(path);
8250        if (err)
8251                return libbpf_err(err);
8252
8253        err = unlink(path);
8254        if (err != 0)
8255                return libbpf_err(-errno);
8256
8257        map->pinned = false;
8258        pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
8259
8260        return 0;
8261}
8262
8263int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8264{
8265        char *new = NULL;
8266
8267        if (path) {
8268                new = strdup(path);
8269                if (!new)
8270                        return libbpf_err(-errno);
8271        }
8272
8273        free(map->pin_path);
8274        map->pin_path = new;
8275        return 0;
8276}
8277
8278__alias(bpf_map__pin_path)
8279const char *bpf_map__get_pin_path(const struct bpf_map *map);
8280
8281const char *bpf_map__pin_path(const struct bpf_map *map)
8282{
8283        return map->pin_path;
8284}
8285
8286bool bpf_map__is_pinned(const struct bpf_map *map)
8287{
8288        return map->pinned;
8289}
8290
8291static void sanitize_pin_path(char *s)
8292{
8293        /* bpffs disallows periods in path names */
8294        while (*s) {
8295                if (*s == '.')
8296                        *s = '_';
8297                s++;
8298        }
8299}
8300
8301int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8302{
8303        struct bpf_map *map;
8304        int err;
8305
8306        if (!obj)
8307                return libbpf_err(-ENOENT);
8308
8309        if (!obj->loaded) {
8310                pr_warn("object not yet loaded; load it first\n");
8311                return libbpf_err(-ENOENT);
8312        }
8313
8314        bpf_object__for_each_map(map, obj) {
8315                char *pin_path = NULL;
8316                char buf[PATH_MAX];
8317
8318                if (!map->autocreate)
8319                        continue;
8320
8321                if (path) {
8322                        int len;
8323
8324                        len = snprintf(buf, PATH_MAX, "%s/%s", path,
8325                                       bpf_map__name(map));
8326                        if (len < 0) {
8327                                err = -EINVAL;
8328                                goto err_unpin_maps;
8329                        } else if (len >= PATH_MAX) {
8330                                err = -ENAMETOOLONG;
8331                                goto err_unpin_maps;
8332                        }
8333                        sanitize_pin_path(buf);
8334                        pin_path = buf;
8335                } else if (!map->pin_path) {
8336                        continue;
8337                }
8338
8339                err = bpf_map__pin(map, pin_path);
8340                if (err)
8341                        goto err_unpin_maps;
8342        }
8343
8344        return 0;
8345
8346err_unpin_maps:
8347        while ((map = bpf_object__prev_map(obj, map))) {
8348                if (!map->pin_path)
8349                        continue;
8350
8351                bpf_map__unpin(map, NULL);
8352        }
8353
8354        return libbpf_err(err);
8355}
8356
8357int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8358{
8359        struct bpf_map *map;
8360        int err;
8361
8362        if (!obj)
8363                return libbpf_err(-ENOENT);
8364
8365        bpf_object__for_each_map(map, obj) {
8366                char *pin_path = NULL;
8367                char buf[PATH_MAX];
8368
8369                if (path) {
8370                        int len;
8371
8372                        len = snprintf(buf, PATH_MAX, "%s/%s", path,
8373                                       bpf_map__name(map));
8374                        if (len < 0)
8375                                return libbpf_err(-EINVAL);
8376                        else if (len >= PATH_MAX)
8377                                return libbpf_err(-ENAMETOOLONG);
8378                        sanitize_pin_path(buf);
8379                        pin_path = buf;
8380                } else if (!map->pin_path) {
8381                        continue;
8382                }
8383
8384                err = bpf_map__unpin(map, pin_path);
8385                if (err)
8386                        return libbpf_err(err);
8387        }
8388
8389        return 0;
8390}
8391
8392int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8393{
8394        struct bpf_program *prog;
8395        int err;
8396
8397        if (!obj)
8398                return libbpf_err(-ENOENT);
8399
8400        if (!obj->loaded) {
8401                pr_warn("object not yet loaded; load it first\n");
8402                return libbpf_err(-ENOENT);
8403        }
8404
8405        bpf_object__for_each_program(prog, obj) {
8406                char buf[PATH_MAX];
8407                int len;
8408
8409                len = snprintf(buf, PATH_MAX, "%s/%s", path,
8410                               prog->pin_name);
8411                if (len < 0) {
8412                        err = -EINVAL;
8413                        goto err_unpin_programs;
8414                } else if (len >= PATH_MAX) {
8415                        err = -ENAMETOOLONG;
8416                        goto err_unpin_programs;
8417                }
8418
8419                err = bpf_program__pin(prog, buf);
8420                if (err)
8421                        goto err_unpin_programs;
8422        }
8423
8424        return 0;
8425
8426err_unpin_programs:
8427        while ((prog = bpf_object__prev_program(obj, prog))) {
8428                char buf[PATH_MAX];
8429                int len;
8430
8431                len = snprintf(buf, PATH_MAX, "%s/%s", path,
8432                               prog->pin_name);
8433                if (len < 0)
8434                        continue;
8435                else if (len >= PATH_MAX)
8436                        continue;
8437
8438                bpf_program__unpin(prog, buf);
8439        }
8440
8441        return libbpf_err(err);
8442}
8443
8444int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8445{
8446        struct bpf_program *prog;
8447        int err;
8448
8449        if (!obj)
8450                return libbpf_err(-ENOENT);
8451
8452        bpf_object__for_each_program(prog, obj) {
8453                char buf[PATH_MAX];
8454                int len;
8455
8456                len = snprintf(buf, PATH_MAX, "%s/%s", path,
8457                               prog->pin_name);
8458                if (len < 0)
8459                        return libbpf_err(-EINVAL);
8460                else if (len >= PATH_MAX)
8461                        return libbpf_err(-ENAMETOOLONG);
8462
8463                err = bpf_program__unpin(prog, buf);
8464                if (err)
8465                        return libbpf_err(err);
8466        }
8467
8468        return 0;
8469}
8470
8471int bpf_object__pin(struct bpf_object *obj, const char *path)
8472{
8473        int err;
8474
8475        err = bpf_object__pin_maps(obj, path);
8476        if (err)
8477                return libbpf_err(err);
8478
8479        err = bpf_object__pin_programs(obj, path);
8480        if (err) {
8481                bpf_object__unpin_maps(obj, path);
8482                return libbpf_err(err);
8483        }
8484
8485        return 0;
8486}
8487
8488static void bpf_map__destroy(struct bpf_map *map)
8489{
8490        if (map->clear_priv)
8491                map->clear_priv(map, map->priv);
8492        map->priv = NULL;
8493        map->clear_priv = NULL;
8494
8495        if (map->inner_map) {
8496                bpf_map__destroy(map->inner_map);
8497                zfree(&map->inner_map);
8498        }
8499
8500        zfree(&map->init_slots);
8501        map->init_slots_sz = 0;
8502
8503        if (map->mmaped) {
8504                munmap(map->mmaped, bpf_map_mmap_sz(map));
8505                map->mmaped = NULL;
8506        }
8507
8508        if (map->st_ops) {
8509                zfree(&map->st_ops->data);
8510                zfree(&map->st_ops->progs);
8511                zfree(&map->st_ops->kern_func_off);
8512                zfree(&map->st_ops);
8513        }
8514
8515        zfree(&map->name);
8516        zfree(&map->real_name);
8517        zfree(&map->pin_path);
8518
8519        if (map->fd >= 0)
8520                zclose(map->fd);
8521}
8522
8523void bpf_object__close(struct bpf_object *obj)
8524{
8525        size_t i;
8526
8527        if (IS_ERR_OR_NULL(obj))
8528                return;
8529
8530        if (obj->clear_priv)
8531                obj->clear_priv(obj, obj->priv);
8532
8533        usdt_manager_free(obj->usdt_man);
8534        obj->usdt_man = NULL;
8535
8536        bpf_gen__free(obj->gen_loader);
8537        bpf_object__elf_finish(obj);
8538        bpf_object_unload(obj);
8539        btf__free(obj->btf);
8540        btf_ext__free(obj->btf_ext);
8541
8542        for (i = 0; i < obj->nr_maps; i++)
8543                bpf_map__destroy(&obj->maps[i]);
8544
8545        zfree(&obj->btf_custom_path);
8546        zfree(&obj->kconfig);
8547        zfree(&obj->externs);
8548        obj->nr_extern = 0;
8549
8550        zfree(&obj->maps);
8551        obj->nr_maps = 0;
8552
8553        if (obj->programs && obj->nr_programs) {
8554                for (i = 0; i < obj->nr_programs; i++)
8555                        bpf_program__exit(&obj->programs[i]);
8556        }
8557        zfree(&obj->programs);
8558
8559        list_del(&obj->list);
8560        free(obj);
8561}
8562
8563struct bpf_object *
8564bpf_object__next(struct bpf_object *prev)
8565{
8566        struct bpf_object *next;
8567        bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
8568
8569        if (strict)
8570                return NULL;
8571
8572        if (!prev)
8573                next = list_first_entry(&bpf_objects_list,
8574                                        struct bpf_object,
8575                                        list);
8576        else
8577                next = list_next_entry(prev, list);
8578
8579        /* Empty list is noticed here so don't need checking on entry. */
8580        if (&next->list == &bpf_objects_list)
8581                return NULL;
8582
8583        return next;
8584}
8585
8586const char *bpf_object__name(const struct bpf_object *obj)
8587{
8588        return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8589}
8590
8591unsigned int bpf_object__kversion(const struct bpf_object *obj)
8592{
8593        return obj ? obj->kern_version : 0;
8594}
8595
8596struct btf *bpf_object__btf(const struct bpf_object *obj)
8597{
8598        return obj ? obj->btf : NULL;
8599}
8600
8601int bpf_object__btf_fd(const struct bpf_object *obj)
8602{
8603        return obj->btf ? btf__fd(obj->btf) : -1;
8604}
8605
8606int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8607{
8608        if (obj->loaded)
8609                return libbpf_err(-EINVAL);
8610
8611        obj->kern_version = kern_version;
8612
8613        return 0;
8614}
8615
8616int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8617                         bpf_object_clear_priv_t clear_priv)
8618{
8619        if (obj->priv && obj->clear_priv)
8620                obj->clear_priv(obj, obj->priv);
8621
8622        obj->priv = priv;
8623        obj->clear_priv = clear_priv;
8624        return 0;
8625}
8626
8627void *bpf_object__priv(const struct bpf_object *obj)
8628{
8629        return obj ? obj->priv : libbpf_err_ptr(-EINVAL);
8630}
8631
8632int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8633{
8634        struct bpf_gen *gen;
8635
8636        if (!opts)
8637                return -EFAULT;
8638        if (!OPTS_VALID(opts, gen_loader_opts))
8639                return -EINVAL;
8640        gen = calloc(sizeof(*gen), 1);
8641        if (!gen)
8642                return -ENOMEM;
8643        gen->opts = opts;
8644        obj->gen_loader = gen;
8645        return 0;
8646}
8647
8648static struct bpf_program *
8649__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8650                    bool forward)
8651{
8652        size_t nr_programs = obj->nr_programs;
8653        ssize_t idx;
8654
8655        if (!nr_programs)
8656                return NULL;
8657
8658        if (!p)
8659                /* Iter from the beginning */
8660                return forward ? &obj->programs[0] :
8661                        &obj->programs[nr_programs - 1];
8662
8663        if (p->obj != obj) {
8664                pr_warn("error: program handler doesn't match object\n");
8665                return errno = EINVAL, NULL;
8666        }
8667
8668        idx = (p - obj->programs) + (forward ? 1 : -1);
8669        if (idx >= obj->nr_programs || idx < 0)
8670                return NULL;
8671        return &obj->programs[idx];
8672}
8673
8674struct bpf_program *
8675bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
8676{
8677        return bpf_object__next_program(obj, prev);
8678}
8679
8680struct bpf_program *
8681bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
8682{
8683        struct bpf_program *prog = prev;
8684
8685        do {
8686                prog = __bpf_program__iter(prog, obj, true);
8687        } while (prog && prog_is_subprog(obj, prog));
8688
8689        return prog;
8690}
8691
8692struct bpf_program *
8693bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
8694{
8695        return bpf_object__prev_program(obj, next);
8696}
8697
8698struct bpf_program *
8699bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
8700{
8701        struct bpf_program *prog = next;
8702
8703        do {
8704                prog = __bpf_program__iter(prog, obj, false);
8705        } while (prog && prog_is_subprog(obj, prog));
8706
8707        return prog;
8708}
8709
8710int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8711                          bpf_program_clear_priv_t clear_priv)
8712{
8713        if (prog->priv && prog->clear_priv)
8714                prog->clear_priv(prog, prog->priv);
8715
8716        prog->priv = priv;
8717        prog->clear_priv = clear_priv;
8718        return 0;
8719}
8720
8721void *bpf_program__priv(const struct bpf_program *prog)
8722{
8723        return prog ? prog->priv : libbpf_err_ptr(-EINVAL);
8724}
8725
8726void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8727{
8728        prog->prog_ifindex = ifindex;
8729}
8730
8731const char *bpf_program__name(const struct bpf_program *prog)
8732{
8733        return prog->name;
8734}
8735
8736const char *bpf_program__section_name(const struct bpf_program *prog)
8737{
8738        return prog->sec_name;
8739}
8740
8741const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
8742{
8743        const char *title;
8744
8745        title = prog->sec_name;
8746        if (needs_copy) {
8747                title = strdup(title);
8748                if (!title) {
8749                        pr_warn("failed to strdup program title\n");
8750                        return libbpf_err_ptr(-ENOMEM);
8751                }
8752        }
8753
8754        return title;
8755}
8756
8757bool bpf_program__autoload(const struct bpf_program *prog)
8758{
8759        return prog->autoload;
8760}
8761
8762int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8763{
8764        if (prog->obj->loaded)
8765                return libbpf_err(-EINVAL);
8766
8767        prog->autoload = autoload;
8768        return 0;
8769}
8770
8771static int bpf_program_nth_fd(const struct bpf_program *prog, int n);
8772
8773int bpf_program__fd(const struct bpf_program *prog)
8774{
8775        return bpf_program_nth_fd(prog, 0);
8776}
8777
8778size_t bpf_program__size(const struct bpf_program *prog)
8779{
8780        return prog->insns_cnt * BPF_INSN_SZ;
8781}
8782
8783const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
8784{
8785        return prog->insns;
8786}
8787
8788size_t bpf_program__insn_cnt(const struct bpf_program *prog)
8789{
8790        return prog->insns_cnt;
8791}
8792
8793int bpf_program__set_insns(struct bpf_program *prog,
8794                           struct bpf_insn *new_insns, size_t new_insn_cnt)
8795{
8796        struct bpf_insn *insns;
8797
8798        if (prog->obj->loaded)
8799                return -EBUSY;
8800
8801        insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
8802        if (!insns) {
8803                pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
8804                return -ENOMEM;
8805        }
8806        memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
8807
8808        prog->insns = insns;
8809        prog->insns_cnt = new_insn_cnt;
8810        return 0;
8811}
8812
8813int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8814                          bpf_program_prep_t prep)
8815{
8816        int *instances_fds;
8817
8818        if (nr_instances <= 0 || !prep)
8819                return libbpf_err(-EINVAL);
8820
8821        if (prog->instances.nr > 0 || prog->instances.fds) {
8822                pr_warn("Can't set pre-processor after loading\n");
8823                return libbpf_err(-EINVAL);
8824        }
8825
8826        instances_fds = malloc(sizeof(int) * nr_instances);
8827        if (!instances_fds) {
8828                pr_warn("alloc memory failed for fds\n");
8829                return libbpf_err(-ENOMEM);
8830        }
8831
8832        /* fill all fd with -1 */
8833        memset(instances_fds, -1, sizeof(int) * nr_instances);
8834
8835        prog->instances.nr = nr_instances;
8836        prog->instances.fds = instances_fds;
8837        prog->preprocessor = prep;
8838        return 0;
8839}
8840
8841__attribute__((alias("bpf_program_nth_fd")))
8842int bpf_program__nth_fd(const struct bpf_program *prog, int n);
8843
8844static int bpf_program_nth_fd(const struct bpf_program *prog, int n)
8845{
8846        int fd;
8847
8848        if (!prog)
8849                return libbpf_err(-EINVAL);
8850
8851        if (n >= prog->instances.nr || n < 0) {
8852                pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
8853                        n, prog->name, prog->instances.nr);
8854                return libbpf_err(-EINVAL);
8855        }
8856
8857        fd = prog->instances.fds[n];
8858        if (fd < 0) {
8859                pr_warn("%dth instance of program '%s' is invalid\n",
8860                        n, prog->name);
8861                return libbpf_err(-ENOENT);
8862        }
8863
8864        return fd;
8865}
8866
8867__alias(bpf_program__type)
8868enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
8869
8870enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
8871{
8872        return prog->type;
8873}
8874
8875int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
8876{
8877        if (prog->obj->loaded)
8878                return libbpf_err(-EBUSY);
8879
8880        prog->type = type;
8881        return 0;
8882}
8883
8884static bool bpf_program__is_type(const struct bpf_program *prog,
8885                                 enum bpf_prog_type type)
8886{
8887        return prog ? (prog->type == type) : false;
8888}
8889
8890#define BPF_PROG_TYPE_FNS(NAME, TYPE)                           \
8891int bpf_program__set_##NAME(struct bpf_program *prog)           \
8892{                                                               \
8893        if (!prog)                                              \
8894                return libbpf_err(-EINVAL);                     \
8895        return bpf_program__set_type(prog, TYPE);                       \
8896}                                                               \
8897                                                                \
8898bool bpf_program__is_##NAME(const struct bpf_program *prog)     \
8899{                                                               \
8900        return bpf_program__is_type(prog, TYPE);                \
8901}                                                               \
8902
8903BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
8904BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
8905BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
8906BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8907BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
8908BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
8909BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
8910BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8911BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
8912BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
8913BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
8914BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
8915BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
8916
8917__alias(bpf_program__expected_attach_type)
8918enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
8919
8920enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
8921{
8922        return prog->expected_attach_type;
8923}
8924
8925int bpf_program__set_expected_attach_type(struct bpf_program *prog,
8926                                           enum bpf_attach_type type)
8927{
8928        if (prog->obj->loaded)
8929                return libbpf_err(-EBUSY);
8930
8931        prog->expected_attach_type = type;
8932        return 0;
8933}
8934
8935__u32 bpf_program__flags(const struct bpf_program *prog)
8936{
8937        return prog->prog_flags;
8938}
8939
8940int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
8941{
8942        if (prog->obj->loaded)
8943                return libbpf_err(-EBUSY);
8944
8945        prog->prog_flags = flags;
8946        return 0;
8947}
8948
8949__u32 bpf_program__log_level(const struct bpf_program *prog)
8950{
8951        return prog->log_level;
8952}
8953
8954int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
8955{
8956        if (prog->obj->loaded)
8957                return libbpf_err(-EBUSY);
8958
8959        prog->log_level = log_level;
8960        return 0;
8961}
8962
8963const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
8964{
8965        *log_size = prog->log_size;
8966        return prog->log_buf;
8967}
8968
8969int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
8970{
8971        if (log_size && !log_buf)
8972                return -EINVAL;
8973        if (prog->log_size > UINT_MAX)
8974                return -EINVAL;
8975        if (prog->obj->loaded)
8976                return -EBUSY;
8977
8978        prog->log_buf = log_buf;
8979        prog->log_size = log_size;
8980        return 0;
8981}
8982
8983#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) {                        \
8984        .sec = (char *)sec_pfx,                                             \
8985        .prog_type = BPF_PROG_TYPE_##ptype,                                 \
8986        .expected_attach_type = atype,                                      \
8987        .cookie = (long)(flags),                                            \
8988        .prog_prepare_load_fn = libbpf_prepare_prog_load,                   \
8989        __VA_ARGS__                                                         \
8990}
8991
8992static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8993static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8994static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8995static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8996static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8997static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8998static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
8999static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9000static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9001
9002static const struct bpf_sec_def section_defs[] = {
9003        SEC_DEF("socket",               SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX),
9004        SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9005        SEC_DEF("sk_reuseport",         SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9006        SEC_DEF("kprobe+",              KPROBE, 0, SEC_NONE, attach_kprobe),
9007        SEC_DEF("uprobe+",              KPROBE, 0, SEC_NONE, attach_uprobe),
9008        SEC_DEF("kretprobe+",           KPROBE, 0, SEC_NONE, attach_kprobe),
9009        SEC_DEF("uretprobe+",           KPROBE, 0, SEC_NONE, attach_uprobe),
9010        SEC_DEF("kprobe.multi+",        KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9011        SEC_DEF("kretprobe.multi+",     KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9012        SEC_DEF("usdt+",                KPROBE, 0, SEC_NONE, attach_usdt),
9013        SEC_DEF("tc",                   SCHED_CLS, 0, SEC_NONE),
9014        SEC_DEF("classifier",           SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED),
9015        SEC_DEF("action",               SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
9016        SEC_DEF("tracepoint+",          TRACEPOINT, 0, SEC_NONE, attach_tp),
9017        SEC_DEF("tp+",                  TRACEPOINT, 0, SEC_NONE, attach_tp),
9018        SEC_DEF("raw_tracepoint+",      RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9019        SEC_DEF("raw_tp+",              RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9020        SEC_DEF("raw_tracepoint.w+",    RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9021        SEC_DEF("raw_tp.w+",            RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9022        SEC_DEF("tp_btf+",              TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
9023        SEC_DEF("fentry+",              TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
9024        SEC_DEF("fmod_ret+",            TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
9025        SEC_DEF("fexit+",               TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
9026        SEC_DEF("fentry.s+",            TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9027        SEC_DEF("fmod_ret.s+",          TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9028        SEC_DEF("fexit.s+",             TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9029        SEC_DEF("freplace+",            EXT, 0, SEC_ATTACH_BTF, attach_trace),
9030        SEC_DEF("lsm+",                 LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
9031        SEC_DEF("lsm.s+",               LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
9032        SEC_DEF("iter+",                TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
9033        SEC_DEF("iter.s+",              TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
9034        SEC_DEF("syscall",              SYSCALL, 0, SEC_SLEEPABLE),
9035        SEC_DEF("xdp.frags/devmap",     XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
9036        SEC_DEF("xdp/devmap",           XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
9037        SEC_DEF("xdp_devmap/",          XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE | SEC_DEPRECATED),
9038        SEC_DEF("xdp.frags/cpumap",     XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
9039        SEC_DEF("xdp/cpumap",           XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
9040        SEC_DEF("xdp_cpumap/",          XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE | SEC_DEPRECATED),
9041        SEC_DEF("xdp.frags",            XDP, BPF_XDP, SEC_XDP_FRAGS),
9042        SEC_DEF("xdp",                  XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9043        SEC_DEF("perf_event",           PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX),
9044        SEC_DEF("lwt_in",               LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX),
9045        SEC_DEF("lwt_out",              LWT_OUT, 0, SEC_NONE | SEC_SLOPPY_PFX),
9046        SEC_DEF("lwt_xmit",             LWT_XMIT, 0, SEC_NONE | SEC_SLOPPY_PFX),
9047        SEC_DEF("lwt_seg6local",        LWT_SEG6LOCAL, 0, SEC_NONE | SEC_SLOPPY_PFX),
9048        SEC_DEF("cgroup_skb/ingress",   CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9049        SEC_DEF("cgroup_skb/egress",    CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9050        SEC_DEF("cgroup/skb",           CGROUP_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
9051        SEC_DEF("cgroup/sock_create",   CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9052        SEC_DEF("cgroup/sock_release",  CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9053        SEC_DEF("cgroup/sock",          CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9054        SEC_DEF("cgroup/post_bind4",    CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9055        SEC_DEF("cgroup/post_bind6",    CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9056        SEC_DEF("cgroup/dev",           CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9057        SEC_DEF("sockops",              SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9058        SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9059        SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9060        SEC_DEF("sk_skb",               SK_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
9061        SEC_DEF("sk_msg",               SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9062        SEC_DEF("lirc_mode2",           LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9063        SEC_DEF("flow_dissector",       FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
9064        SEC_DEF("cgroup/bind4",         CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9065        SEC_DEF("cgroup/bind6",         CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9066        SEC_DEF("cgroup/connect4",      CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9067        SEC_DEF("cgroup/connect6",      CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9068        SEC_DEF("cgroup/sendmsg4",      CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9069        SEC_DEF("cgroup/sendmsg6",      CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9070        SEC_DEF("cgroup/recvmsg4",      CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9071        SEC_DEF("cgroup/recvmsg6",      CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9072        SEC_DEF("cgroup/getpeername4",  CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9073        SEC_DEF("cgroup/getpeername6",  CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9074        SEC_DEF("cgroup/getsockname4",  CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9075        SEC_DEF("cgroup/getsockname6",  CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9076        SEC_DEF("cgroup/sysctl",        CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9077        SEC_DEF("cgroup/getsockopt",    CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9078        SEC_DEF("cgroup/setsockopt",    CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9079        SEC_DEF("struct_ops+",          STRUCT_OPS, 0, SEC_NONE),
9080        SEC_DEF("sk_lookup",            SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
9081};
9082
9083static size_t custom_sec_def_cnt;
9084static struct bpf_sec_def *custom_sec_defs;
9085static struct bpf_sec_def custom_fallback_def;
9086static bool has_custom_fallback_def;
9087
9088static int last_custom_sec_def_handler_id;
9089
9090int libbpf_register_prog_handler(const char *sec,
9091                                 enum bpf_prog_type prog_type,
9092                                 enum bpf_attach_type exp_attach_type,
9093                                 const struct libbpf_prog_handler_opts *opts)
9094{
9095        struct bpf_sec_def *sec_def;
9096
9097        if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
9098                return libbpf_err(-EINVAL);
9099
9100        if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
9101                return libbpf_err(-E2BIG);
9102
9103        if (sec) {
9104                sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
9105                                              sizeof(*sec_def));
9106                if (!sec_def)
9107                        return libbpf_err(-ENOMEM);
9108
9109                custom_sec_defs = sec_def;
9110                sec_def = &custom_sec_defs[custom_sec_def_cnt];
9111        } else {
9112                if (has_custom_fallback_def)
9113                        return libbpf_err(-EBUSY);
9114
9115                sec_def = &custom_fallback_def;
9116        }
9117
9118        sec_def->sec = sec ? strdup(sec) : NULL;
9119        if (sec && !sec_def->sec)
9120                return libbpf_err(-ENOMEM);
9121
9122        sec_def->prog_type = prog_type;
9123        sec_def->expected_attach_type = exp_attach_type;
9124        sec_def->cookie = OPTS_GET(opts, cookie, 0);
9125
9126        sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9127        sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9128        sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9129
9130        sec_def->handler_id = ++last_custom_sec_def_handler_id;
9131
9132        if (sec)
9133                custom_sec_def_cnt++;
9134        else
9135                has_custom_fallback_def = true;
9136
9137        return sec_def->handler_id;
9138}
9139
9140int libbpf_unregister_prog_handler(int handler_id)
9141{
9142        struct bpf_sec_def *sec_defs;
9143        int i;
9144
9145        if (handler_id <= 0)
9146                return libbpf_err(-EINVAL);
9147
9148        if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
9149                memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
9150                has_custom_fallback_def = false;
9151                return 0;
9152        }
9153
9154        for (i = 0; i < custom_sec_def_cnt; i++) {
9155                if (custom_sec_defs[i].handler_id == handler_id)
9156                        break;
9157        }
9158
9159        if (i == custom_sec_def_cnt)
9160                return libbpf_err(-ENOENT);
9161
9162        free(custom_sec_defs[i].sec);
9163        for (i = i + 1; i < custom_sec_def_cnt; i++)
9164                custom_sec_defs[i - 1] = custom_sec_defs[i];
9165        custom_sec_def_cnt--;
9166
9167        /* try to shrink the array, but it's ok if we couldn't */
9168        sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
9169        if (sec_defs)
9170                custom_sec_defs = sec_defs;
9171
9172        return 0;
9173}
9174
9175static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name,
9176                            bool allow_sloppy)
9177{
9178        size_t len = strlen(sec_def->sec);
9179
9180        /* "type/" always has to have proper SEC("type/extras") form */
9181        if (sec_def->sec[len - 1] == '/') {
9182                if (str_has_pfx(sec_name, sec_def->sec))
9183                        return true;
9184                return false;
9185        }
9186
9187        /* "type+" means it can be either exact SEC("type") or
9188         * well-formed SEC("type/extras") with proper '/' separator
9189         */
9190        if (sec_def->sec[len - 1] == '+') {
9191                len--;
9192                /* not even a prefix */
9193                if (strncmp(sec_name, sec_def->sec, len) != 0)
9194                        return false;
9195                /* exact match or has '/' separator */
9196                if (sec_name[len] == '\0' || sec_name[len] == '/')
9197                        return true;
9198                return false;
9199        }
9200
9201        /* SEC_SLOPPY_PFX definitions are allowed to be just prefix
9202         * matches, unless strict section name mode
9203         * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the
9204         * match has to be exact.
9205         */
9206        if (allow_sloppy && str_has_pfx(sec_name, sec_def->sec))
9207                return true;
9208
9209        /* Definitions not marked SEC_SLOPPY_PFX (e.g.,
9210         * SEC("syscall")) are exact matches in both modes.
9211         */
9212        return strcmp(sec_name, sec_def->sec) == 0;
9213}
9214
9215static const struct bpf_sec_def *find_sec_def(const char *sec_name)
9216{
9217        const struct bpf_sec_def *sec_def;
9218        int i, n;
9219        bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME, allow_sloppy;
9220
9221        n = custom_sec_def_cnt;
9222        for (i = 0; i < n; i++) {
9223                sec_def = &custom_sec_defs[i];
9224                if (sec_def_matches(sec_def, sec_name, false))
9225                        return sec_def;
9226        }
9227
9228        n = ARRAY_SIZE(section_defs);
9229        for (i = 0; i < n; i++) {
9230                sec_def = &section_defs[i];
9231                allow_sloppy = (sec_def->cookie & SEC_SLOPPY_PFX) && !strict;
9232                if (sec_def_matches(sec_def, sec_name, allow_sloppy))
9233                        return sec_def;
9234        }
9235
9236        if (has_custom_fallback_def)
9237                return &custom_fallback_def;
9238
9239        return NULL;
9240}
9241
9242#define MAX_TYPE_NAME_SIZE 32
9243
9244static char *libbpf_get_type_names(bool attach_type)
9245{
9246        int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
9247        char *buf;
9248
9249        buf = malloc(len);
9250        if (!buf)
9251                return NULL;
9252
9253        buf[0] = '\0';
9254        /* Forge string buf with all available names */
9255        for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9256                const struct bpf_sec_def *sec_def = &section_defs[i];
9257
9258                if (attach_type) {
9259                        if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9260                                continue;
9261
9262                        if (!(sec_def->cookie & SEC_ATTACHABLE))
9263                                continue;
9264                }
9265
9266                if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
9267                        free(buf);
9268                        return NULL;
9269                }
9270                strcat(buf, " ");
9271                strcat(buf, section_defs[i].sec);
9272        }
9273
9274        return buf;
9275}
9276
9277int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9278                             enum bpf_attach_type *expected_attach_type)
9279{
9280        const struct bpf_sec_def *sec_def;
9281        char *type_names;
9282
9283        if (!name)
9284                return libbpf_err(-EINVAL);
9285
9286        sec_def = find_sec_def(name);
9287        if (sec_def) {
9288                *prog_type = sec_def->prog_type;
9289                *expected_attach_type = sec_def->expected_attach_type;
9290                return 0;
9291        }
9292
9293        pr_debug("failed to guess program type from ELF section '%s'\n", name);
9294        type_names = libbpf_get_type_names(false);
9295        if (type_names != NULL) {
9296                pr_debug("supported section(type) names are:%s\n", type_names);
9297                free(type_names);
9298        }
9299
9300        return libbpf_err(-ESRCH);
9301}
9302
9303static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
9304                                                     size_t offset)
9305{
9306        struct bpf_map *map;
9307        size_t i;
9308
9309        for (i = 0; i < obj->nr_maps; i++) {
9310                map = &obj->maps[i];
9311                if (!bpf_map__is_struct_ops(map))
9312                        continue;
9313                if (map->sec_offset <= offset &&
9314                    offset - map->sec_offset < map->def.value_size)
9315                        return map;
9316        }
9317
9318        return NULL;
9319}
9320
9321/* Collect the reloc from ELF and populate the st_ops->progs[] */
9322static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
9323                                            Elf64_Shdr *shdr, Elf_Data *data)
9324{
9325        const struct btf_member *member;
9326        struct bpf_struct_ops *st_ops;
9327        struct bpf_program *prog;
9328        unsigned int shdr_idx;
9329        const struct btf *btf;
9330        struct bpf_map *map;
9331        unsigned int moff, insn_idx;
9332        const char *name;
9333        __u32 member_idx;
9334        Elf64_Sym *sym;
9335        Elf64_Rel *rel;
9336        int i, nrels;
9337
9338        btf = obj->btf;
9339        nrels = shdr->sh_size / shdr->sh_entsize;
9340        for (i = 0; i < nrels; i++) {
9341                rel = elf_rel_by_idx(data, i);
9342                if (!rel) {
9343                        pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9344                        return -LIBBPF_ERRNO__FORMAT;
9345                }
9346
9347                sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9348                if (!sym) {
9349                        pr_warn("struct_ops reloc: symbol %zx not found\n",
9350                                (size_t)ELF64_R_SYM(rel->r_info));
9351                        return -LIBBPF_ERRNO__FORMAT;
9352                }
9353
9354                name = elf_sym_str(obj, sym->st_name) ?: "<?>";
9355                map = find_struct_ops_map_by_offset(obj, rel->r_offset);
9356                if (!map) {
9357                        pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
9358                                (size_t)rel->r_offset);
9359                        return -EINVAL;
9360                }
9361
9362                moff = rel->r_offset - map->sec_offset;
9363                shdr_idx = sym->st_shndx;
9364                st_ops = map->st_ops;
9365                pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9366                         map->name,
9367                         (long long)(rel->r_info >> 32),
9368                         (long long)sym->st_value,
9369                         shdr_idx, (size_t)rel->r_offset,
9370                         map->sec_offset, sym->st_name, name);
9371
9372                if (shdr_idx >= SHN_LORESERVE) {
9373                        pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9374                                map->name, (size_t)rel->r_offset, shdr_idx);
9375                        return -LIBBPF_ERRNO__RELOC;
9376                }
9377                if (sym->st_value % BPF_INSN_SZ) {
9378                        pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
9379                                map->name, (unsigned long long)sym->st_value);
9380                        return -LIBBPF_ERRNO__FORMAT;
9381                }
9382                insn_idx = sym->st_value / BPF_INSN_SZ;
9383
9384                member = find_member_by_offset(st_ops->type, moff * 8);
9385                if (!member) {
9386                        pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9387                                map->name, moff);
9388                        return -EINVAL;
9389                }
9390                member_idx = member - btf_members(st_ops->type);
9391                name = btf__name_by_offset(btf, member->name_off);
9392
9393                if (!resolve_func_ptr(btf, member->type, NULL)) {
9394                        pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9395                                map->name, name);
9396                        return -EINVAL;
9397                }
9398
9399                prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9400                if (!prog) {
9401                        pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9402                                map->name, shdr_idx, name);
9403                        return -EINVAL;
9404                }
9405
9406                /* prevent the use of BPF prog with invalid type */
9407                if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
9408                        pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
9409                                map->name, prog->name);
9410                        return -EINVAL;
9411                }
9412
9413                /* if we haven't yet processed this BPF program, record proper
9414                 * attach_btf_id and member_idx
9415                 */
9416                if (!prog->attach_btf_id) {
9417                        prog->attach_btf_id = st_ops->type_id;
9418                        prog->expected_attach_type = member_idx;
9419                }
9420
9421                /* struct_ops BPF prog can be re-used between multiple
9422                 * .struct_ops as long as it's the same struct_ops struct
9423                 * definition and the same function pointer field
9424                 */
9425                if (prog->attach_btf_id != st_ops->type_id ||
9426                    prog->expected_attach_type != member_idx) {
9427                        pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
9428                                map->name, prog->name, prog->sec_name, prog->type,
9429                                prog->attach_btf_id, prog->expected_attach_type, name);
9430                        return -EINVAL;
9431                }
9432
9433                st_ops->progs[member_idx] = prog;
9434        }
9435
9436        return 0;
9437}
9438
9439#define BTF_TRACE_PREFIX "btf_trace_"
9440#define BTF_LSM_PREFIX "bpf_lsm_"
9441#define BTF_ITER_PREFIX "bpf_iter_"
9442#define BTF_MAX_NAME_SIZE 128
9443
9444void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9445                                const char **prefix, int *kind)
9446{
9447        switch (attach_type) {
9448        case BPF_TRACE_RAW_TP:
9449                *prefix = BTF_TRACE_PREFIX;
9450                *kind = BTF_KIND_TYPEDEF;
9451                break;
9452        case BPF_LSM_MAC:
9453                *prefix = BTF_LSM_PREFIX;
9454                *kind = BTF_KIND_FUNC;
9455                break;
9456        case BPF_TRACE_ITER:
9457                *prefix = BTF_ITER_PREFIX;
9458                *kind = BTF_KIND_FUNC;
9459                break;
9460        default:
9461                *prefix = "";
9462                *kind = BTF_KIND_FUNC;
9463        }
9464}
9465
9466static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9467                                   const char *name, __u32 kind)
9468{
9469        char btf_type_name[BTF_MAX_NAME_SIZE];
9470        int ret;
9471
9472        ret = snprintf(btf_type_name, sizeof(btf_type_name),
9473                       "%s%s", prefix, name);
9474        /* snprintf returns the number of characters written excluding the
9475         * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
9476         * indicates truncation.
9477         */
9478        if (ret < 0 || ret >= sizeof(btf_type_name))
9479                return -ENAMETOOLONG;
9480        return btf__find_by_name_kind(btf, btf_type_name, kind);
9481}
9482
9483static inline int find_attach_btf_id(struct btf *btf, const char *name,
9484                                     enum bpf_attach_type attach_type)
9485{
9486        const char *prefix;
9487        int kind;
9488
9489        btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9490        return find_btf_by_prefix_kind(btf, prefix, name, kind);
9491}
9492
9493int libbpf_find_vmlinux_btf_id(const char *name,
9494                               enum bpf_attach_type attach_type)
9495{
9496        struct btf *btf;
9497        int err;
9498
9499        btf = btf__load_vmlinux_btf();
9500        err = libbpf_get_error(btf);
9501        if (err) {
9502                pr_warn("vmlinux BTF is not found\n");
9503                return libbpf_err(err);
9504        }
9505
9506        err = find_attach_btf_id(btf, name, attach_type);
9507        if (err <= 0)
9508                pr_warn("%s is not found in vmlinux BTF\n", name);
9509
9510        btf__free(btf);
9511        return libbpf_err(err);
9512}
9513
9514static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9515{
9516        struct bpf_prog_info info = {};
9517        __u32 info_len = sizeof(info);
9518        struct btf *btf;
9519        int err;
9520
9521        err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
9522        if (err) {
9523                pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n",
9524                        attach_prog_fd, err);
9525                return err;
9526        }
9527
9528        err = -EINVAL;
9529        if (!info.btf_id) {
9530                pr_warn("The target program doesn't have BTF\n");
9531                goto out;
9532        }
9533        btf = btf__load_from_kernel_by_id(info.btf_id);
9534        err = libbpf_get_error(btf);
9535        if (err) {
9536                pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
9537                goto out;
9538        }
9539        err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9540        btf__free(btf);
9541        if (err <= 0) {
9542                pr_warn("%s is not found in prog's BTF\n", name);
9543                goto out;
9544        }
9545out:
9546        return err;
9547}
9548
9549static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9550                              enum bpf_attach_type attach_type,
9551                              int *btf_obj_fd, int *btf_type_id)
9552{
9553        int ret, i;
9554
9555        ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9556        if (ret > 0) {
9557                *btf_obj_fd = 0; /* vmlinux BTF */
9558                *btf_type_id = ret;
9559                return 0;
9560        }
9561        if (ret != -ENOENT)
9562                return ret;
9563
9564        ret = load_module_btfs(obj);
9565        if (ret)
9566                return ret;
9567
9568        for (i = 0; i < obj->btf_module_cnt; i++) {
9569                const struct module_btf *mod = &obj->btf_modules[i];
9570
9571                ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9572                if (ret > 0) {
9573                        *btf_obj_fd = mod->fd;
9574                        *btf_type_id = ret;
9575                        return 0;
9576                }
9577                if (ret == -ENOENT)
9578                        continue;
9579
9580                return ret;
9581        }
9582
9583        return -ESRCH;
9584}
9585
9586static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9587                                     int *btf_obj_fd, int *btf_type_id)
9588{
9589        enum bpf_attach_type attach_type = prog->expected_attach_type;
9590        __u32 attach_prog_fd = prog->attach_prog_fd;
9591        int err = 0;
9592
9593        /* BPF program's BTF ID */
9594        if (attach_prog_fd) {
9595                err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9596                if (err < 0) {
9597                        pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9598                                 attach_prog_fd, attach_name, err);
9599                        return err;
9600                }
9601                *btf_obj_fd = 0;
9602                *btf_type_id = err;
9603                return 0;
9604        }
9605
9606        /* kernel/module BTF ID */
9607        if (prog->obj->gen_loader) {
9608                bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9609                *btf_obj_fd = 0;
9610                *btf_type_id = 1;
9611        } else {
9612                err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9613        }
9614        if (err) {
9615                pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
9616                return err;
9617        }
9618        return 0;
9619}
9620
9621int libbpf_attach_type_by_name(const char *name,
9622                               enum bpf_attach_type *attach_type)
9623{
9624        char *type_names;
9625        const struct bpf_sec_def *sec_def;
9626
9627        if (!name)
9628                return libbpf_err(-EINVAL);
9629
9630        sec_def = find_sec_def(name);
9631        if (!sec_def) {
9632                pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9633                type_names = libbpf_get_type_names(true);
9634                if (type_names != NULL) {
9635                        pr_debug("attachable section(type) names are:%s\n", type_names);
9636                        free(type_names);
9637                }
9638
9639                return libbpf_err(-EINVAL);
9640        }
9641
9642        if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9643                return libbpf_err(-EINVAL);
9644        if (!(sec_def->cookie & SEC_ATTACHABLE))
9645                return libbpf_err(-EINVAL);
9646
9647        *attach_type = sec_def->expected_attach_type;
9648        return 0;
9649}
9650
9651int bpf_map__fd(const struct bpf_map *map)
9652{
9653        return map ? map->fd : libbpf_err(-EINVAL);
9654}
9655
9656const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9657{
9658        return map ? &map->def : libbpf_err_ptr(-EINVAL);
9659}
9660
9661static bool map_uses_real_name(const struct bpf_map *map)
9662{
9663        /* Since libbpf started to support custom .data.* and .rodata.* maps,
9664         * their user-visible name differs from kernel-visible name. Users see
9665         * such map's corresponding ELF section name as a map name.
9666         * This check distinguishes .data/.rodata from .data.* and .rodata.*
9667         * maps to know which name has to be returned to the user.
9668         */
9669        if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
9670                return true;
9671        if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
9672                return true;
9673        return false;
9674}
9675
9676const char *bpf_map__name(const struct bpf_map *map)
9677{
9678        if (!map)
9679                return NULL;
9680
9681        if (map_uses_real_name(map))
9682                return map->real_name;
9683
9684        return map->name;
9685}
9686
9687enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9688{
9689        return map->def.type;
9690}
9691
9692int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9693{
9694        if (map->fd >= 0)
9695                return libbpf_err(-EBUSY);
9696        map->def.type = type;
9697        return 0;
9698}
9699
9700__u32 bpf_map__map_flags(const struct bpf_map *map)
9701{
9702        return map->def.map_flags;
9703}
9704
9705int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9706{
9707        if (map->fd >= 0)
9708                return libbpf_err(-EBUSY);
9709        map->def.map_flags = flags;
9710        return 0;
9711}
9712
9713__u64 bpf_map__map_extra(const struct bpf_map *map)
9714{
9715        return map->map_extra;
9716}
9717
9718int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
9719{
9720        if (map->fd >= 0)
9721                return libbpf_err(-EBUSY);
9722        map->map_extra = map_extra;
9723        return 0;
9724}
9725
9726__u32 bpf_map__numa_node(const struct bpf_map *map)
9727{
9728        return map->numa_node;
9729}
9730
9731int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9732{
9733        if (map->fd >= 0)
9734                return libbpf_err(-EBUSY);
9735        map->numa_node = numa_node;
9736        return 0;
9737}
9738
9739__u32 bpf_map__key_size(const struct bpf_map *map)
9740{
9741        return map->def.key_size;
9742}
9743
9744int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9745{
9746        if (map->fd >= 0)
9747                return libbpf_err(-EBUSY);
9748        map->def.key_size = size;
9749        return 0;
9750}
9751
9752__u32 bpf_map__value_size(const struct bpf_map *map)
9753{
9754        return map->def.value_size;
9755}
9756
9757int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9758{
9759        if (map->fd >= 0)
9760                return libbpf_err(-EBUSY);
9761        map->def.value_size = size;
9762        return 0;
9763}
9764
9765__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
9766{
9767        return map ? map->btf_key_type_id : 0;
9768}
9769
9770__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
9771{
9772        return map ? map->btf_value_type_id : 0;
9773}
9774
9775int bpf_map__set_priv(struct bpf_map *map, void *priv,
9776                     bpf_map_clear_priv_t clear_priv)
9777{
9778        if (!map)
9779                return libbpf_err(-EINVAL);
9780
9781        if (map->priv) {
9782                if (map->clear_priv)
9783                        map->clear_priv(map, map->priv);
9784        }
9785
9786        map->priv = priv;
9787        map->clear_priv = clear_priv;
9788        return 0;
9789}
9790
9791void *bpf_map__priv(const struct bpf_map *map)
9792{
9793        return map ? map->priv : libbpf_err_ptr(-EINVAL);
9794}
9795
9796int bpf_map__set_initial_value(struct bpf_map *map,
9797                               const void *data, size_t size)
9798{
9799        if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9800            size != map->def.value_size || map->fd >= 0)
9801                return libbpf_err(-EINVAL);
9802
9803        memcpy(map->mmaped, data, size);
9804        return 0;
9805}
9806
9807const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
9808{
9809        if (!map->mmaped)
9810                return NULL;
9811        *psize = map->def.value_size;
9812        return map->mmaped;
9813}
9814
9815bool bpf_map__is_offload_neutral(const struct bpf_map *map)
9816{
9817        return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9818}
9819
9820bool bpf_map__is_internal(const struct bpf_map *map)
9821{
9822        return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9823}
9824
9825__u32 bpf_map__ifindex(const struct bpf_map *map)
9826{
9827        return map->map_ifindex;
9828}
9829
9830int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9831{
9832        if (map->fd >= 0)
9833                return libbpf_err(-EBUSY);
9834        map->map_ifindex = ifindex;
9835        return 0;
9836}
9837
9838int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9839{
9840        if (!bpf_map_type__is_map_in_map(map->def.type)) {
9841                pr_warn("error: unsupported map type\n");
9842                return libbpf_err(-EINVAL);
9843        }
9844        if (map->inner_map_fd != -1) {
9845                pr_warn("error: inner_map_fd already specified\n");
9846                return libbpf_err(-EINVAL);
9847        }
9848        if (map->inner_map) {
9849                bpf_map__destroy(map->inner_map);
9850                zfree(&map->inner_map);
9851        }
9852        map->inner_map_fd = fd;
9853        return 0;
9854}
9855
9856static struct bpf_map *
9857__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9858{
9859        ssize_t idx;
9860        struct bpf_map *s, *e;
9861
9862        if (!obj || !obj->maps)
9863                return errno = EINVAL, NULL;
9864
9865        s = obj->maps;
9866        e = obj->maps + obj->nr_maps;
9867
9868        if ((m < s) || (m >= e)) {
9869                pr_warn("error in %s: map handler doesn't belong to object\n",
9870                         __func__);
9871                return errno = EINVAL, NULL;
9872        }
9873
9874        idx = (m - obj->maps) + i;
9875        if (idx >= obj->nr_maps || idx < 0)
9876                return NULL;
9877        return &obj->maps[idx];
9878}
9879
9880struct bpf_map *
9881bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
9882{
9883        return bpf_object__next_map(obj, prev);
9884}
9885
9886struct bpf_map *
9887bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
9888{
9889        if (prev == NULL)
9890                return obj->maps;
9891
9892        return __bpf_map__iter(prev, obj, 1);
9893}
9894
9895struct bpf_map *
9896bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
9897{
9898        return bpf_object__prev_map(obj, next);
9899}
9900
9901struct bpf_map *
9902bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
9903{
9904        if (next == NULL) {
9905                if (!obj->nr_maps)
9906                        return NULL;
9907                return obj->maps + obj->nr_maps - 1;
9908        }
9909
9910        return __bpf_map__iter(next, obj, -1);
9911}
9912
9913struct bpf_map *
9914bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
9915{
9916        struct bpf_map *pos;
9917
9918        bpf_object__for_each_map(pos, obj) {
9919                /* if it's a special internal map name (which always starts
9920                 * with dot) then check if that special name matches the
9921                 * real map name (ELF section name)
9922                 */
9923                if (name[0] == '.') {
9924                        if (pos->real_name && strcmp(pos->real_name, name) == 0)
9925                                return pos;
9926                        continue;
9927                }
9928                /* otherwise map name has to be an exact match */
9929                if (map_uses_real_name(pos)) {
9930                        if (strcmp(pos->real_name, name) == 0)
9931                                return pos;
9932                        continue;
9933                }
9934                if (strcmp(pos->name, name) == 0)
9935                        return pos;
9936        }
9937        return errno = ENOENT, NULL;
9938}
9939
9940int
9941bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
9942{
9943        return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9944}
9945
9946struct bpf_map *
9947bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9948{
9949        return libbpf_err_ptr(-ENOTSUP);
9950}
9951
9952static int validate_map_op(const struct bpf_map *map, size_t key_sz,
9953                           size_t value_sz, bool check_value_sz)
9954{
9955        if (map->fd <= 0)
9956                return -ENOENT;
9957
9958        if (map->def.key_size != key_sz) {
9959                pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
9960                        map->name, key_sz, map->def.key_size);
9961                return -EINVAL;
9962        }
9963
9964        if (!check_value_sz)
9965                return 0;
9966
9967        switch (map->def.type) {
9968        case BPF_MAP_TYPE_PERCPU_ARRAY:
9969        case BPF_MAP_TYPE_PERCPU_HASH:
9970        case BPF_MAP_TYPE_LRU_PERCPU_HASH:
9971        case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
9972                int num_cpu = libbpf_num_possible_cpus();
9973                size_t elem_sz = roundup(map->def.value_size, 8);
9974
9975                if (value_sz != num_cpu * elem_sz) {
9976                        pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
9977                                map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
9978                        return -EINVAL;
9979                }
9980                break;
9981        }
9982        default:
9983                if (map->def.value_size != value_sz) {
9984                        pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
9985                                map->name, value_sz, map->def.value_size);
9986                        return -EINVAL;
9987                }
9988                break;
9989        }
9990        return 0;
9991}
9992
9993int bpf_map__lookup_elem(const struct bpf_map *map,
9994                         const void *key, size_t key_sz,
9995                         void *value, size_t value_sz, __u64 flags)
9996{
9997        int err;
9998
9999        err = validate_map_op(map, key_sz, value_sz, true);
10000        if (err)
10001                return libbpf_err(err);
10002
10003        return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
10004}
10005
10006int bpf_map__update_elem(const struct bpf_map *map,
10007                         const void *key, size_t key_sz,
10008                         const void *value, size_t value_sz, __u64 flags)
10009{
10010        int err;
10011
10012        err = validate_map_op(map, key_sz, value_sz, true);
10013        if (err)
10014                return libbpf_err(err);
10015
10016        return bpf_map_update_elem(map->fd, key, value, flags);
10017}
10018
10019int bpf_map__delete_elem(const struct bpf_map *map,
10020                         const void *key, size_t key_sz, __u64 flags)
10021{
10022        int err;
10023
10024        err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10025        if (err)
10026                return libbpf_err(err);
10027
10028        return bpf_map_delete_elem_flags(map->fd, key, flags);
10029}
10030
10031int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
10032                                    const void *key, size_t key_sz,
10033                                    void *value, size_t value_sz, __u64 flags)
10034{
10035        int err;
10036
10037        err = validate_map_op(map, key_sz, value_sz, true);
10038        if (err)
10039                return libbpf_err(err);
10040
10041        return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
10042}
10043
10044int bpf_map__get_next_key(const struct bpf_map *map,
10045                          const void *cur_key, void *next_key, size_t key_sz)
10046{
10047        int err;
10048
10049        err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10050        if (err)
10051                return libbpf_err(err);
10052
10053        return bpf_map_get_next_key(map->fd, cur_key, next_key);
10054}
10055
10056long libbpf_get_error(const void *ptr)
10057{
10058        if (!IS_ERR_OR_NULL(ptr))
10059                return 0;
10060
10061        if (IS_ERR(ptr))
10062                errno = -PTR_ERR(ptr);
10063
10064        /* If ptr == NULL, then errno should be already set by the failing
10065         * API, because libbpf never returns NULL on success and it now always
10066         * sets errno on error. So no extra errno handling for ptr == NULL
10067         * case.
10068         */
10069        return -errno;
10070}
10071
10072__attribute__((alias("bpf_prog_load_xattr2")))
10073int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
10074                        struct bpf_object **pobj, int *prog_fd);
10075
10076static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
10077                                struct bpf_object **pobj, int *prog_fd)
10078{
10079        struct bpf_object_open_attr open_attr = {};
10080        struct bpf_program *prog, *first_prog = NULL;
10081        struct bpf_object *obj;
10082        struct bpf_map *map;
10083        int err;
10084
10085        if (!attr)
10086                return libbpf_err(-EINVAL);
10087        if (!attr->file)
10088                return libbpf_err(-EINVAL);
10089
10090        open_attr.file = attr->file;
10091        open_attr.prog_type = attr->prog_type;
10092
10093        obj = __bpf_object__open_xattr(&open_attr, 0);
10094        err = libbpf_get_error(obj);
10095        if (err)
10096                return libbpf_err(-ENOENT);
10097
10098        bpf_object__for_each_program(prog, obj) {
10099                enum bpf_attach_type attach_type = attr->expected_attach_type;
10100                /*
10101                 * to preserve backwards compatibility, bpf_prog_load treats
10102                 * attr->prog_type, if specified, as an override to whatever
10103                 * bpf_object__open guessed
10104                 */
10105                if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
10106                        prog->type = attr->prog_type;
10107                        prog->expected_attach_type = attach_type;
10108                }
10109                if (bpf_program__type(prog) == BPF_PROG_TYPE_UNSPEC) {
10110                        /*
10111                         * we haven't guessed from section name and user
10112                         * didn't provide a fallback type, too bad...
10113                         */
10114                        bpf_object__close(obj);
10115                        return libbpf_err(-EINVAL);
10116                }
10117
10118                prog->prog_ifindex = attr->ifindex;
10119                prog->log_level = attr->log_level;
10120                prog->prog_flags |= attr->prog_flags;
10121                if (!first_prog)
10122                        first_prog = prog;
10123        }
10124
10125        bpf_object__for_each_map(map, obj) {
10126                if (map->def.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
10127                        map->map_ifindex = attr->ifindex;
10128        }
10129
10130        if (!first_prog) {
10131                pr_warn("object file doesn't contain bpf program\n");
10132                bpf_object__close(obj);
10133                return libbpf_err(-ENOENT);
10134        }
10135
10136        err = bpf_object__load(obj);
10137        if (err) {
10138                bpf_object__close(obj);
10139                return libbpf_err(err);
10140        }
10141
10142        *pobj = obj;
10143        *prog_fd = bpf_program__fd(first_prog);
10144        return 0;
10145}
10146
10147COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1)
10148int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
10149                             struct bpf_object **pobj, int *prog_fd)
10150{
10151        struct bpf_prog_load_attr attr;
10152
10153        memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
10154        attr.file = file;
10155        attr.prog_type = type;
10156        attr.expected_attach_type = 0;
10157
10158        return bpf_prog_load_xattr2(&attr, pobj, prog_fd);
10159}
10160
10161/* Replace link's underlying BPF program with the new one */
10162int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10163{
10164        int ret;
10165
10166        ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
10167        return libbpf_err_errno(ret);
10168}
10169
10170/* Release "ownership" of underlying BPF resource (typically, BPF program
10171 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10172 * link, when destructed through bpf_link__destroy() call won't attempt to
10173 * detach/unregisted that BPF resource. This is useful in situations where,
10174 * say, attached BPF program has to outlive userspace program that attached it
10175 * in the system. Depending on type of BPF program, though, there might be
10176 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10177 * exit of userspace program doesn't trigger automatic detachment and clean up
10178 * inside the kernel.
10179 */
10180void bpf_link__disconnect(struct bpf_link *link)
10181{
10182        link->disconnected = true;
10183}
10184
10185int bpf_link__destroy(struct bpf_link *link)
10186{
10187        int err = 0;
10188
10189        if (IS_ERR_OR_NULL(link))
10190                return 0;
10191
10192        if (!link->disconnected && link->detach)
10193                err = link->detach(link);
10194        if (link->pin_path)
10195                free(link->pin_path);
10196        if (link->dealloc)
10197                link->dealloc(link);
10198        else
10199                free(link);
10200
10201        return libbpf_err(err);
10202}
10203
10204int bpf_link__fd(const struct bpf_link *link)
10205{
10206        return link->fd;
10207}
10208
10209const char *bpf_link__pin_path(const struct bpf_link *link)
10210{
10211        return link->pin_path;
10212}
10213
10214static int bpf_link__detach_fd(struct bpf_link *link)
10215{
10216        return libbpf_err_errno(close(link->fd));
10217}
10218
10219struct bpf_link *bpf_link__open(const char *path)
10220{
10221        struct bpf_link *link;
10222        int fd;
10223
10224        fd = bpf_obj_get(path);
10225        if (fd < 0) {
10226                fd = -errno;
10227                pr_warn("failed to open link at %s: %d\n", path, fd);
10228                return libbpf_err_ptr(fd);
10229        }
10230
10231        link = calloc(1, sizeof(*link));
10232        if (!link) {
10233                close(fd);
10234                return libbpf_err_ptr(-ENOMEM);
10235        }
10236        link->detach = &bpf_link__detach_fd;
10237        link->fd = fd;
10238
10239        link->pin_path = strdup(path);
10240        if (!link->pin_path) {
10241                bpf_link__destroy(link);
10242                return libbpf_err_ptr(-ENOMEM);
10243        }
10244
10245        return link;
10246}
10247
10248int bpf_link__detach(struct bpf_link *link)
10249{
10250        return bpf_link_detach(link->fd) ? -errno : 0;
10251}
10252
10253int bpf_link__pin(struct bpf_link *link, const char *path)
10254{
10255        int err;
10256
10257        if (link->pin_path)
10258                return libbpf_err(-EBUSY);
10259        err = make_parent_dir(path);
10260        if (err)
10261                return libbpf_err(err);
10262        err = check_path(path);
10263        if (err)
10264                return libbpf_err(err);
10265
10266        link->pin_path = strdup(path);
10267        if (!link->pin_path)
10268                return libbpf_err(-ENOMEM);
10269
10270        if (bpf_obj_pin(link->fd, link->pin_path)) {
10271                err = -errno;
10272                zfree(&link->pin_path);
10273                return libbpf_err(err);
10274        }
10275
10276        pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10277        return 0;
10278}
10279
10280int bpf_link__unpin(struct bpf_link *link)
10281{
10282        int err;
10283
10284        if (!link->pin_path)
10285                return libbpf_err(-EINVAL);
10286
10287        err = unlink(link->pin_path);
10288        if (err != 0)
10289                return -errno;
10290
10291        pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10292        zfree(&link->pin_path);
10293        return 0;
10294}
10295
10296struct bpf_link_perf {
10297        struct bpf_link link;
10298        int perf_event_fd;
10299        /* legacy kprobe support: keep track of probe identifier and type */
10300        char *legacy_probe_name;
10301        bool legacy_is_kprobe;
10302        bool legacy_is_retprobe;
10303};
10304
10305static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
10306static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
10307
10308static int bpf_link_perf_detach(struct bpf_link *link)
10309{
10310        struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10311        int err = 0;
10312
10313        if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
10314                err = -errno;
10315
10316        if (perf_link->perf_event_fd != link->fd)
10317                close(perf_link->perf_event_fd);
10318        close(link->fd);
10319
10320        /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
10321        if (perf_link->legacy_probe_name) {
10322                if (perf_link->legacy_is_kprobe) {
10323                        err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
10324                                                         perf_link->legacy_is_retprobe);
10325                } else {
10326                        err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
10327                                                         perf_link->legacy_is_retprobe);
10328                }
10329        }
10330
10331        return err;
10332}
10333
10334static void bpf_link_perf_dealloc(struct bpf_link *link)
10335{
10336        struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10337
10338        free(perf_link->legacy_probe_name);
10339        free(perf_link);
10340}
10341
10342struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
10343                                                     const struct bpf_perf_event_opts *opts)
10344{
10345        char errmsg[STRERR_BUFSIZE];
10346        struct bpf_link_perf *link;
10347        int prog_fd, link_fd = -1, err;
10348
10349        if (!OPTS_VALID(opts, bpf_perf_event_opts))
10350                return libbpf_err_ptr(-EINVAL);
10351
10352        if (pfd < 0) {
10353                pr_warn("prog '%s': invalid perf event FD %d\n",
10354                        prog->name, pfd);
10355                return libbpf_err_ptr(-EINVAL);
10356        }
10357        prog_fd = bpf_program__fd(prog);
10358        if (prog_fd < 0) {
10359                pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
10360                        prog->name);
10361                return libbpf_err_ptr(-EINVAL);
10362        }
10363
10364        link = calloc(1, sizeof(*link));
10365        if (!link)
10366                return libbpf_err_ptr(-ENOMEM);
10367        link->link.detach = &bpf_link_perf_detach;
10368        link->link.dealloc = &bpf_link_perf_dealloc;
10369        link->perf_event_fd = pfd;
10370
10371        if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
10372                DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
10373                        .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
10374
10375                link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
10376                if (link_fd < 0) {
10377                        err = -errno;
10378                        pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
10379                                prog->name, pfd,
10380                                err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10381                        goto err_out;
10382                }
10383                link->link.fd = link_fd;
10384        } else {
10385                if (OPTS_GET(opts, bpf_cookie, 0)) {
10386                        pr_warn("prog '%s': user context value is not supported\n", prog->name);
10387                        err = -EOPNOTSUPP;
10388                        goto err_out;
10389                }
10390
10391                if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10392                        err = -errno;
10393                        pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
10394                                prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10395                        if (err == -EPROTO)
10396                                pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10397                                        prog->name, pfd);
10398                        goto err_out;
10399                }
10400                link->link.fd = pfd;
10401        }
10402        if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10403                err = -errno;
10404                pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
10405                        prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10406                goto err_out;
10407        }
10408
10409        return &link->link;
10410err_out:
10411        if (link_fd >= 0)
10412                close(link_fd);
10413        free(link);
10414        return libbpf_err_ptr(err);
10415}
10416
10417struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
10418{
10419        return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
10420}
10421
10422/*
10423 * this function is expected to parse integer in the range of [0, 2^31-1] from
10424 * given file using scanf format string fmt. If actual parsed value is
10425 * negative, the result might be indistinguishable from error
10426 */
10427static int parse_uint_from_file(const char *file, const char *fmt)
10428{
10429        char buf[STRERR_BUFSIZE];
10430        int err, ret;
10431        FILE *f;
10432
10433        f = fopen(file, "r");
10434        if (!f) {
10435                err = -errno;
10436                pr_debug("failed to open '%s': %s\n", file,
10437                         libbpf_strerror_r(err, buf, sizeof(buf)));
10438                return err;
10439        }
10440        err = fscanf(f, fmt, &ret);
10441        if (err != 1) {
10442                err = err == EOF ? -EIO : -errno;
10443                pr_debug("failed to parse '%s': %s\n", file,
10444                        libbpf_strerror_r(err, buf, sizeof(buf)));
10445                fclose(f);
10446                return err;
10447        }
10448        fclose(f);
10449        return ret;
10450}
10451
10452static int determine_kprobe_perf_type(void)
10453{
10454        const char *file = "/sys/bus/event_source/devices/kprobe/type";
10455
10456        return parse_uint_from_file(file, "%d\n");
10457}
10458
10459static int determine_uprobe_perf_type(void)
10460{
10461        const char *file = "/sys/bus/event_source/devices/uprobe/type";
10462
10463        return parse_uint_from_file(file, "%d\n");
10464}
10465
10466static int determine_kprobe_retprobe_bit(void)
10467{
10468        const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10469
10470        return parse_uint_from_file(file, "config:%d\n");
10471}
10472
10473static int determine_uprobe_retprobe_bit(void)
10474{
10475        const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10476
10477        return parse_uint_from_file(file, "config:%d\n");
10478}
10479
10480#define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
10481#define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
10482
10483static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
10484                                 uint64_t offset, int pid, size_t ref_ctr_off)
10485{
10486        struct perf_event_attr attr = {};
10487        char errmsg[STRERR_BUFSIZE];
10488        int type, pfd, err;
10489
10490        if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
10491                return -EINVAL;
10492
10493        type = uprobe ? determine_uprobe_perf_type()
10494                      : determine_kprobe_perf_type();
10495        if (type < 0) {
10496                pr_warn("failed to determine %s perf type: %s\n",
10497                        uprobe ? "uprobe" : "kprobe",
10498                        libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10499                return type;
10500        }
10501        if (retprobe) {
10502                int bit = uprobe ? determine_uprobe_retprobe_bit()
10503                                 : determine_kprobe_retprobe_bit();
10504
10505                if (bit < 0) {
10506                        pr_warn("failed to determine %s retprobe bit: %s\n",
10507                                uprobe ? "uprobe" : "kprobe",
10508                                libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
10509                        return bit;
10510                }
10511                attr.config |= 1 << bit;
10512        }
10513        attr.size = sizeof(attr);
10514        attr.type = type;
10515        attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10516        attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10517        attr.config2 = offset;           /* kprobe_addr or probe_offset */
10518
10519        /* pid filter is meaningful only for uprobes */
10520        pfd = syscall(__NR_perf_event_open, &attr,
10521                      pid < 0 ? -1 : pid /* pid */,
10522                      pid == -1 ? 0 : -1 /* cpu */,
10523                      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10524        if (pfd < 0) {
10525                err = -errno;
10526                pr_warn("%s perf_event_open() failed: %s\n",
10527                        uprobe ? "uprobe" : "kprobe",
10528                        libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10529                return err;
10530        }
10531        return pfd;
10532}
10533
10534static int append_to_file(const char *file, const char *fmt, ...)
10535{
10536        int fd, n, err = 0;
10537        va_list ap;
10538
10539        fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
10540        if (fd < 0)
10541                return -errno;
10542
10543        va_start(ap, fmt);
10544        n = vdprintf(fd, fmt, ap);
10545        va_end(ap);
10546
10547        if (n < 0)
10548                err = -errno;
10549
10550        close(fd);
10551        return err;
10552}
10553
10554static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
10555                                         const char *kfunc_name, size_t offset)
10556{
10557        static int index = 0;
10558
10559        snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
10560                 __sync_fetch_and_add(&index, 1));
10561}
10562
10563static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
10564                                   const char *kfunc_name, size_t offset)
10565{
10566        const char *file = "/sys/kernel/debug/tracing/kprobe_events";
10567
10568        return append_to_file(file, "%c:%s/%s %s+0x%zx",
10569                              retprobe ? 'r' : 'p',
10570                              retprobe ? "kretprobes" : "kprobes",
10571                              probe_name, kfunc_name, offset);
10572}
10573
10574static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
10575{
10576        const char *file = "/sys/kernel/debug/tracing/kprobe_events";
10577
10578        return append_to_file(file, "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name);
10579}
10580
10581static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10582{
10583        char file[256];
10584
10585        snprintf(file, sizeof(file),
10586                 "/sys/kernel/debug/tracing/events/%s/%s/id",
10587                 retprobe ? "kretprobes" : "kprobes", probe_name);
10588
10589        return parse_uint_from_file(file, "%d\n");
10590}
10591
10592static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10593                                         const char *kfunc_name, size_t offset, int pid)
10594{
10595        struct perf_event_attr attr = {};
10596        char errmsg[STRERR_BUFSIZE];
10597        int type, pfd, err;
10598
10599        err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
10600        if (err < 0) {
10601                pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
10602                        kfunc_name, offset,
10603                        libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10604                return err;
10605        }
10606        type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
10607        if (type < 0) {
10608                pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
10609                        kfunc_name, offset,
10610                        libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10611                return type;
10612        }
10613        attr.size = sizeof(attr);
10614        attr.config = type;
10615        attr.type = PERF_TYPE_TRACEPOINT;
10616
10617        pfd = syscall(__NR_perf_event_open, &attr,
10618                      pid < 0 ? -1 : pid, /* pid */
10619                      pid == -1 ? 0 : -1, /* cpu */
10620                      -1 /* group_fd */,  PERF_FLAG_FD_CLOEXEC);
10621        if (pfd < 0) {
10622                err = -errno;
10623                pr_warn("legacy kprobe perf_event_open() failed: %s\n",
10624                        libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10625                return err;
10626        }
10627        return pfd;
10628}
10629
10630struct bpf_link *
10631bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
10632                                const char *func_name,
10633                                const struct bpf_kprobe_opts *opts)
10634{
10635        DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
10636        char errmsg[STRERR_BUFSIZE];
10637        char *legacy_probe = NULL;
10638        struct bpf_link *link;
10639        size_t offset;
10640        bool retprobe, legacy;
10641        int pfd, err;
10642
10643        if (!OPTS_VALID(opts, bpf_kprobe_opts))
10644                return libbpf_err_ptr(-EINVAL);
10645
10646        retprobe = OPTS_GET(opts, retprobe, false);
10647        offset = OPTS_GET(opts, offset, 0);
10648        pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10649
10650        legacy = determine_kprobe_perf_type() < 0;
10651        if (!legacy) {
10652                pfd = perf_event_open_probe(false /* uprobe */, retprobe,
10653                                            func_name, offset,
10654                                            -1 /* pid */, 0 /* ref_ctr_off */);
10655        } else {
10656                char probe_name[256];
10657
10658                gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
10659                                             func_name, offset);
10660
10661                legacy_probe = strdup(probe_name);
10662                if (!legacy_probe)
10663                        return libbpf_err_ptr(-ENOMEM);
10664
10665                pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
10666                                                    offset, -1 /* pid */);
10667        }
10668        if (pfd < 0) {
10669                err = -errno;
10670                pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
10671                        prog->name, retprobe ? "kretprobe" : "kprobe",
10672                        func_name, offset,
10673                        libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10674                goto err_out;
10675        }
10676        link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10677        err = libbpf_get_error(link);
10678        if (err) {
10679                close(pfd);
10680                pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
10681                        prog->name, retprobe ? "kretprobe" : "kprobe",
10682                        func_name, offset,
10683                        libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10684                goto err_out;
10685        }
10686        if (legacy) {
10687                struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10688
10689                perf_link->legacy_probe_name = legacy_probe;
10690                perf_link->legacy_is_kprobe = true;
10691                perf_link->legacy_is_retprobe = retprobe;
10692        }
10693
10694        return link;
10695err_out:
10696        free(legacy_probe);
10697        return libbpf_err_ptr(err);
10698}
10699
10700struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
10701                                            bool retprobe,
10702                                            const char *func_name)
10703{
10704        DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
10705                .retprobe = retprobe,
10706        );
10707
10708        return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
10709}
10710
10711/* Adapted from perf/util/string.c */
10712static bool glob_match(const char *str, const char *pat)
10713{
10714        while (*str && *pat && *pat != '*') {
10715                if (*pat == '?') {      /* Matches any single character */
10716                        str++;
10717                        pat++;
10718                        continue;
10719                }
10720                if (*str != *pat)
10721                        return false;
10722                str++;
10723                pat++;
10724        }
10725        /* Check wild card */
10726        if (*pat == '*') {
10727                while (*pat == '*')
10728                        pat++;
10729                if (!*pat) /* Tail wild card matches all */
10730                        return true;
10731                while (*str)
10732                        if (glob_match(str++, pat))
10733                                return true;
10734        }
10735        return !*str && !*pat;
10736}
10737
10738struct kprobe_multi_resolve {
10739        const char *pattern;
10740        unsigned long *addrs;
10741        size_t cap;
10742        size_t cnt;
10743};
10744
10745static int
10746resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type,
10747                        const char *sym_name, void *ctx)
10748{
10749        struct kprobe_multi_resolve *res = ctx;
10750        int err;
10751
10752        if (!glob_match(sym_name, res->pattern))
10753                return 0;
10754
10755        err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long),
10756                                res->cnt + 1);
10757        if (err)
10758                return err;
10759
10760        res->addrs[res->cnt++] = (unsigned long) sym_addr;
10761        return 0;
10762}
10763
10764struct bpf_link *
10765bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
10766                                      const char *pattern,
10767                                      const struct bpf_kprobe_multi_opts *opts)
10768{
10769        LIBBPF_OPTS(bpf_link_create_opts, lopts);
10770        struct kprobe_multi_resolve res = {
10771                .pattern = pattern,
10772        };
10773        struct bpf_link *link = NULL;
10774        char errmsg[STRERR_BUFSIZE];
10775        const unsigned long *addrs;
10776        int err, link_fd, prog_fd;
10777        const __u64 *cookies;
10778        const char **syms;
10779        bool retprobe;
10780        size_t cnt;
10781
10782        if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
10783                return libbpf_err_ptr(-EINVAL);
10784
10785        syms    = OPTS_GET(opts, syms, false);
10786        addrs   = OPTS_GET(opts, addrs, false);
10787        cnt     = OPTS_GET(opts, cnt, false);
10788        cookies = OPTS_GET(opts, cookies, false);
10789
10790        if (!pattern && !addrs && !syms)
10791                return libbpf_err_ptr(-EINVAL);
10792        if (pattern && (addrs || syms || cookies || cnt))
10793                return libbpf_err_ptr(-EINVAL);
10794        if (!pattern && !cnt)
10795                return libbpf_err_ptr(-EINVAL);
10796        if (addrs && syms)
10797                return libbpf_err_ptr(-EINVAL);
10798
10799        if (pattern) {
10800                err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res);
10801                if (err)
10802                        goto error;
10803                if (!res.cnt) {
10804                        err = -ENOENT;
10805                        goto error;
10806                }
10807                addrs = res.addrs;
10808                cnt = res.cnt;
10809        }
10810
10811        retprobe = OPTS_GET(opts, retprobe, false);
10812
10813        lopts.kprobe_multi.syms = syms;
10814        lopts.kprobe_multi.addrs = addrs;
10815        lopts.kprobe_multi.cookies = cookies;
10816        lopts.kprobe_multi.cnt = cnt;
10817        lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
10818
10819        link = calloc(1, sizeof(*link));
10820        if (!link) {
10821                err = -ENOMEM;
10822                goto error;
10823        }
10824        link->detach = &bpf_link__detach_fd;
10825
10826        prog_fd = bpf_program__fd(prog);
10827        link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
10828        if (link_fd < 0) {
10829                err = -errno;
10830                pr_warn("prog '%s': failed to attach: %s\n",
10831                        prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10832                goto error;
10833        }
10834        link->fd = link_fd;
10835        free(res.addrs);
10836        return link;
10837
10838error:
10839        free(link);
10840        free(res.addrs);
10841        return libbpf_err_ptr(err);
10842}
10843
10844static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10845{
10846        DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
10847        unsigned long offset = 0;
10848        const char *func_name;
10849        char *func;
10850        int n;
10851
10852        *link = NULL;
10853
10854        /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
10855        if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
10856                return 0;
10857
10858        opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
10859        if (opts.retprobe)
10860                func_name = prog->sec_name + sizeof("kretprobe/") - 1;
10861        else
10862                func_name = prog->sec_name + sizeof("kprobe/") - 1;
10863
10864        n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
10865        if (n < 1) {
10866                pr_warn("kprobe name is invalid: %s\n", func_name);
10867                return -EINVAL;
10868        }
10869        if (opts.retprobe && offset != 0) {
10870                free(func);
10871                pr_warn("kretprobes do not support offset specification\n");
10872                return -EINVAL;
10873        }
10874
10875        opts.offset = offset;
10876        *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
10877        free(func);
10878        return libbpf_get_error(*link);
10879}
10880
10881static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
10882{
10883        LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
10884        const char *spec;
10885        char *pattern;
10886        int n;
10887
10888        *link = NULL;
10889
10890        /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
10891        if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
10892            strcmp(prog->sec_name, "kretprobe.multi") == 0)
10893                return 0;
10894
10895        opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
10896        if (opts.retprobe)
10897                spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
10898        else
10899                spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
10900
10901        n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
10902        if (n < 1) {
10903                pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
10904                return -EINVAL;
10905        }
10906
10907        *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
10908        free(pattern);
10909        return libbpf_get_error(*link);
10910}
10911
10912static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
10913                                         const char *binary_path, uint64_t offset)
10914{
10915        int i;
10916
10917        snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
10918
10919        /* sanitize binary_path in the probe name */
10920        for (i = 0; buf[i]; i++) {
10921                if (!isalnum(buf[i]))
10922                        buf[i] = '_';
10923        }
10924}
10925
10926static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
10927                                          const char *binary_path, size_t offset)
10928{
10929        const char *file = "/sys/kernel/debug/tracing/uprobe_events";
10930
10931        return append_to_file(file, "%c:%s/%s %s:0x%zx",
10932                              retprobe ? 'r' : 'p',
10933                              retprobe ? "uretprobes" : "uprobes",
10934                              probe_name, binary_path, offset);
10935}
10936
10937static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
10938{
10939        const char *file = "/sys/kernel/debug/tracing/uprobe_events";
10940
10941        return append_to_file(file, "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name);
10942}
10943
10944static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10945{
10946        char file[512];
10947
10948        snprintf(file, sizeof(file),
10949                 "/sys/kernel/debug/tracing/events/%s/%s/id",
10950                 retprobe ? "uretprobes" : "uprobes", probe_name);
10951
10952        return parse_uint_from_file(file, "%d\n");
10953}
10954
10955static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
10956                                         const char *binary_path, size_t offset, int pid)
10957{
10958        struct perf_event_attr attr;
10959        int type, pfd, err;
10960
10961        err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
10962        if (err < 0) {
10963                pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
10964                        binary_path, (size_t)offset, err);
10965                return err;
10966        }
10967        type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
10968        if (type < 0) {
10969                pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
10970                        binary_path, offset, err);
10971                return type;
10972        }
10973
10974        memset(&attr, 0, sizeof(attr));
10975        attr.size = sizeof(attr);
10976        attr.config = type;
10977        attr.type = PERF_TYPE_TRACEPOINT;
10978
10979        pfd = syscall(__NR_perf_event_open, &attr,
10980                      pid < 0 ? -1 : pid, /* pid */
10981                      pid == -1 ? 0 : -1, /* cpu */
10982                      -1 /* group_fd */,  PERF_FLAG_FD_CLOEXEC);
10983        if (pfd < 0) {
10984                err = -errno;
10985                pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
10986                return err;
10987        }
10988        return pfd;
10989}
10990
10991/* uprobes deal in relative offsets; subtract the base address associated with
10992 * the mapped binary.  See Documentation/trace/uprobetracer.rst for more
10993 * details.
10994 */
10995static long elf_find_relative_offset(const char *filename, Elf *elf, long addr)
10996{
10997        size_t n;
10998        int i;
10999
11000        if (elf_getphdrnum(elf, &n)) {
11001                pr_warn("elf: failed to find program headers for '%s': %s\n", filename,
11002                        elf_errmsg(-1));
11003                return -ENOENT;
11004        }
11005
11006        for (i = 0; i < n; i++) {
11007                int seg_start, seg_end, seg_offset;
11008                GElf_Phdr phdr;
11009
11010                if (!gelf_getphdr(elf, i, &phdr)) {
11011                        pr_warn("elf: failed to get program header %d from '%s': %s\n", i, filename,
11012                                elf_errmsg(-1));
11013                        return -ENOENT;
11014                }
11015                if (phdr.p_type != PT_LOAD || !(phdr.p_flags & PF_X))
11016                        continue;
11017
11018                seg_start = phdr.p_vaddr;
11019                seg_end = seg_start + phdr.p_memsz;
11020                seg_offset = phdr.p_offset;
11021                if (addr >= seg_start && addr < seg_end)
11022                        return addr - seg_start + seg_offset;
11023        }
11024        pr_warn("elf: failed to find prog header containing 0x%lx in '%s'\n", addr, filename);
11025        return -ENOENT;
11026}
11027
11028/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */
11029static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
11030{
11031        while ((scn = elf_nextscn(elf, scn)) != NULL) {
11032                GElf_Shdr sh;
11033
11034                if (!gelf_getshdr(scn, &sh))
11035                        continue;
11036                if (sh.sh_type == sh_type)
11037                        return scn;
11038        }
11039        return NULL;
11040}
11041
11042/* Find offset of function name in object specified by path.  "name" matches
11043 * symbol name or name@@LIB for library functions.
11044 */
11045static long elf_find_func_offset(const char *binary_path, const char *name)
11046{
11047        int fd, i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
11048        bool is_shared_lib, is_name_qualified;
11049        char errmsg[STRERR_BUFSIZE];
11050        long ret = -ENOENT;
11051        size_t name_len;
11052        GElf_Ehdr ehdr;
11053        Elf *elf;
11054
11055        fd = open(binary_path, O_RDONLY | O_CLOEXEC);
11056        if (fd < 0) {
11057                ret = -errno;
11058                pr_warn("failed to open %s: %s\n", binary_path,
11059                        libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
11060                return ret;
11061        }
11062        elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
11063        if (!elf) {
11064                pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
11065                close(fd);
11066                return -LIBBPF_ERRNO__FORMAT;
11067        }
11068        if (!gelf_getehdr(elf, &ehdr)) {
11069                pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
11070                ret = -LIBBPF_ERRNO__FORMAT;
11071                goto out;
11072        }
11073        /* for shared lib case, we do not need to calculate relative offset */
11074        is_shared_lib = ehdr.e_type == ET_DYN;
11075
11076        name_len = strlen(name);
11077        /* Does name specify "@@LIB"? */
11078        is_name_qualified = strstr(name, "@@") != NULL;
11079
11080        /* Search SHT_DYNSYM, SHT_SYMTAB for symbol.  This search order is used because if
11081         * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically
11082         * linked binary may not have SHT_DYMSYM, so absence of a section should not be
11083         * reported as a warning/error.
11084         */
11085        for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
11086                size_t nr_syms, strtabidx, idx;
11087                Elf_Data *symbols = NULL;
11088                Elf_Scn *scn = NULL;
11089                int last_bind = -1;
11090                const char *sname;
11091                GElf_Shdr sh;
11092
11093                scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL);
11094                if (!scn) {
11095                        pr_debug("elf: failed to find symbol table ELF sections in '%s'\n",
11096                                 binary_path);
11097                        continue;
11098                }
11099                if (!gelf_getshdr(scn, &sh))
11100                        continue;
11101                strtabidx = sh.sh_link;
11102                symbols = elf_getdata(scn, 0);
11103                if (!symbols) {
11104                        pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n",
11105                                binary_path, elf_errmsg(-1));
11106                        ret = -LIBBPF_ERRNO__FORMAT;
11107                        goto out;
11108                }
11109                nr_syms = symbols->d_size / sh.sh_entsize;
11110
11111                for (idx = 0; idx < nr_syms; idx++) {
11112                        int curr_bind;
11113                        GElf_Sym sym;
11114
11115                        if (!gelf_getsym(symbols, idx, &sym))
11116                                continue;
11117
11118                        if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
11119                                continue;
11120
11121                        sname = elf_strptr(elf, strtabidx, sym.st_name);
11122                        if (!sname)
11123                                continue;
11124
11125                        curr_bind = GELF_ST_BIND(sym.st_info);
11126
11127                        /* User can specify func, func@@LIB or func@@LIB_VERSION. */
11128                        if (strncmp(sname, name, name_len) != 0)
11129                                continue;
11130                        /* ...but we don't want a search for "foo" to match 'foo2" also, so any
11131                         * additional characters in sname should be of the form "@@LIB".
11132                         */
11133                        if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@')
11134                                continue;
11135
11136                        if (ret >= 0) {
11137                                /* handle multiple matches */
11138                                if (last_bind != STB_WEAK && curr_bind != STB_WEAK) {
11139                                        /* Only accept one non-weak bind. */
11140                                        pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n",
11141                                                sname, name, binary_path);
11142                                        ret = -LIBBPF_ERRNO__FORMAT;
11143                                        goto out;
11144                                } else if (curr_bind == STB_WEAK) {
11145                                        /* already have a non-weak bind, and
11146                                         * this is a weak bind, so ignore.
11147                                         */
11148                                        continue;
11149                                }
11150                        }
11151                        ret = sym.st_value;
11152                        last_bind = curr_bind;
11153                }
11154                /* For binaries that are not shared libraries, we need relative offset */
11155                if (ret > 0 && !is_shared_lib)
11156                        ret = elf_find_relative_offset(binary_path, elf, ret);
11157                if (ret > 0)
11158                        break;
11159        }
11160
11161        if (ret > 0) {
11162                pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path,
11163                         ret);
11164        } else {
11165                if (ret == 0) {
11166                        pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path,
11167                                is_shared_lib ? "should not be 0 in a shared library" :
11168                                                "try using shared library path instead");
11169                        ret = -ENOENT;
11170                } else {
11171                        pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path);
11172                }
11173        }
11174out:
11175        elf_end(elf);
11176        close(fd);
11177        return ret;
11178}
11179
11180static const char *arch_specific_lib_paths(void)
11181{
11182        /*
11183         * Based on https://packages.debian.org/sid/libc6.
11184         *
11185         * Assume that the traced program is built for the same architecture
11186         * as libbpf, which should cover the vast majority of cases.
11187         */
11188#if defined(__x86_64__)
11189        return "/lib/x86_64-linux-gnu";
11190#elif defined(__i386__)
11191        return "/lib/i386-linux-gnu";
11192#elif defined(__s390x__)
11193        return "/lib/s390x-linux-gnu";
11194#elif defined(__s390__)
11195        return "/lib/s390-linux-gnu";
11196#elif defined(__arm__) && defined(__SOFTFP__)
11197        return "/lib/arm-linux-gnueabi";
11198#elif defined(__arm__) && !defined(__SOFTFP__)
11199        return "/lib/arm-linux-gnueabihf";
11200#elif defined(__aarch64__)
11201        return "/lib/aarch64-linux-gnu";
11202#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
11203        return "/lib/mips64el-linux-gnuabi64";
11204#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
11205        return "/lib/mipsel-linux-gnu";
11206#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11207        return "/lib/powerpc64le-linux-gnu";
11208#elif defined(__sparc__) && defined(__arch64__)
11209        return "/lib/sparc64-linux-gnu";
11210#elif defined(__riscv) && __riscv_xlen == 64
11211        return "/lib/riscv64-linux-gnu";
11212#else
11213        return NULL;
11214#endif
11215}
11216
11217/* Get full path to program/shared library. */
11218static int resolve_full_path(const char *file, char *result, size_t result_sz)
11219{
11220        const char *search_paths[3] = {};
11221        int i;
11222
11223        if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
11224                search_paths[0] = getenv("LD_LIBRARY_PATH");
11225                search_paths[1] = "/usr/lib64:/usr/lib";
11226                search_paths[2] = arch_specific_lib_paths();
11227        } else {
11228                search_paths[0] = getenv("PATH");
11229                search_paths[1] = "/usr/bin:/usr/sbin";
11230        }
11231
11232        for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
11233                const char *s;
11234
11235                if (!search_paths[i])
11236                        continue;
11237                for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
11238                        char *next_path;
11239                        int seg_len;
11240
11241                        if (s[0] == ':')
11242                                s++;
11243                        next_path = strchr(s, ':');
11244                        seg_len = next_path ? next_path - s : strlen(s);
11245                        if (!seg_len)
11246                                continue;
11247                        snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
11248                        /* ensure it is an executable file/link */
11249                        if (access(result, R_OK | X_OK) < 0)
11250                                continue;
11251                        pr_debug("resolved '%s' to '%s'\n", file, result);
11252                        return 0;
11253                }
11254        }
11255        return -ENOENT;
11256}
11257
11258LIBBPF_API struct bpf_link *
11259bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
11260                                const char *binary_path, size_t func_offset,
11261                                const struct bpf_uprobe_opts *opts)
11262{
11263        DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11264        char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
11265        char full_binary_path[PATH_MAX];
11266        struct bpf_link *link;
11267        size_t ref_ctr_off;
11268        int pfd, err;
11269        bool retprobe, legacy;
11270        const char *func_name;
11271
11272        if (!OPTS_VALID(opts, bpf_uprobe_opts))
11273                return libbpf_err_ptr(-EINVAL);
11274
11275        retprobe = OPTS_GET(opts, retprobe, false);
11276        ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
11277        pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11278
11279        if (binary_path && !strchr(binary_path, '/')) {
11280                err = resolve_full_path(binary_path, full_binary_path,
11281                                        sizeof(full_binary_path));
11282                if (err) {
11283                        pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11284                                prog->name, binary_path, err);
11285                        return libbpf_err_ptr(err);
11286                }
11287                binary_path = full_binary_path;
11288        }
11289        func_name = OPTS_GET(opts, func_name, NULL);
11290        if (func_name) {
11291                long sym_off;
11292
11293                if (!binary_path) {
11294                        pr_warn("prog '%s': name-based attach requires binary_path\n",
11295                                prog->name);
11296                        return libbpf_err_ptr(-EINVAL);
11297                }
11298                sym_off = elf_find_func_offset(binary_path, func_name);
11299                if (sym_off < 0)
11300                        return libbpf_err_ptr(sym_off);
11301                func_offset += sym_off;
11302        }
11303
11304        legacy = determine_uprobe_perf_type() < 0;
11305        if (!legacy) {
11306                pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
11307                                            func_offset, pid, ref_ctr_off);
11308        } else {
11309                char probe_name[PATH_MAX + 64];
11310
11311                if (ref_ctr_off)
11312                        return libbpf_err_ptr(-EINVAL);
11313
11314                gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
11315                                             binary_path, func_offset);
11316
11317                legacy_probe = strdup(probe_name);
11318                if (!legacy_probe)
11319                        return libbpf_err_ptr(-ENOMEM);
11320
11321                pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
11322                                                    binary_path, func_offset, pid);
11323        }
11324        if (pfd < 0) {
11325                err = -errno;
11326                pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
11327                        prog->name, retprobe ? "uretprobe" : "uprobe",
11328                        binary_path, func_offset,
11329                        libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11330                goto err_out;
11331        }
11332
11333        link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11334        err = libbpf_get_error(link);
11335        if (err) {
11336                close(pfd);
11337                pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
11338                        prog->name, retprobe ? "uretprobe" : "uprobe",
11339                        binary_path, func_offset,
11340                        libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11341                goto err_out;
11342        }
11343        if (legacy) {
11344                struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11345
11346                perf_link->legacy_probe_name = legacy_probe;
11347                perf_link->legacy_is_kprobe = false;
11348                perf_link->legacy_is_retprobe = retprobe;
11349        }
11350        return link;
11351err_out:
11352        free(legacy_probe);
11353        return libbpf_err_ptr(err);
11354
11355}
11356
11357/* Format of u[ret]probe section definition supporting auto-attach:
11358 * u[ret]probe/binary:function[+offset]
11359 *
11360 * binary can be an absolute/relative path or a filename; the latter is resolved to a
11361 * full binary path via bpf_program__attach_uprobe_opts.
11362 *
11363 * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
11364 * specified (and auto-attach is not possible) or the above format is specified for
11365 * auto-attach.
11366 */
11367static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11368{
11369        DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
11370        char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
11371        int n, ret = -EINVAL;
11372        long offset = 0;
11373
11374        *link = NULL;
11375
11376        n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[a-zA-Z0-9_.]+%li",
11377                   &probe_type, &binary_path, &func_name, &offset);
11378        switch (n) {
11379        case 1:
11380                /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11381                ret = 0;
11382                break;
11383        case 2:
11384                pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
11385                        prog->name, prog->sec_name);
11386                break;
11387        case 3:
11388        case 4:
11389                opts.retprobe = strcmp(probe_type, "uretprobe") == 0;
11390                if (opts.retprobe && offset != 0) {
11391                        pr_warn("prog '%s': uretprobes do not support offset specification\n",
11392                                prog->name);
11393                        break;
11394                }
11395                opts.func_name = func_name;
11396                *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
11397                ret = libbpf_get_error(*link);
11398                break;
11399        default:
11400                pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11401                        prog->sec_name);
11402                break;
11403        }
11404        free(probe_type);
11405        free(binary_path);
11406        free(func_name);
11407
11408        return ret;
11409}
11410
11411struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
11412                                            bool retprobe, pid_t pid,
11413                                            const char *binary_path,
11414                                            size_t func_offset)
11415{
11416        DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
11417
11418        return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
11419}
11420
11421struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
11422                                          pid_t pid, const char *binary_path,
11423                                          const char *usdt_provider, const char *usdt_name,
11424                                          const struct bpf_usdt_opts *opts)
11425{
11426        char resolved_path[512];
11427        struct bpf_object *obj = prog->obj;
11428        struct bpf_link *link;
11429        __u64 usdt_cookie;
11430        int err;
11431
11432        if (!OPTS_VALID(opts, bpf_uprobe_opts))
11433                return libbpf_err_ptr(-EINVAL);
11434
11435        if (bpf_program__fd(prog) < 0) {
11436                pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
11437                        prog->name);
11438                return libbpf_err_ptr(-EINVAL);
11439        }
11440
11441        if (!strchr(binary_path, '/')) {
11442                err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
11443                if (err) {
11444                        pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11445                                prog->name, binary_path, err);
11446                        return libbpf_err_ptr(err);
11447                }
11448                binary_path = resolved_path;
11449        }
11450
11451        /* USDT manager is instantiated lazily on first USDT attach. It will
11452         * be destroyed together with BPF object in bpf_object__close().
11453         */
11454        if (IS_ERR(obj->usdt_man))
11455                return libbpf_ptr(obj->usdt_man);
11456        if (!obj->usdt_man) {
11457                obj->usdt_man = usdt_manager_new(obj);
11458                if (IS_ERR(obj->usdt_man))
11459                        return libbpf_ptr(obj->usdt_man);
11460        }
11461
11462        usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
11463        link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
11464                                        usdt_provider, usdt_name, usdt_cookie);
11465        err = libbpf_get_error(link);
11466        if (err)
11467                return libbpf_err_ptr(err);
11468        return link;
11469}
11470
11471static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11472{
11473        char *path = NULL, *provider = NULL, *name = NULL;
11474        const char *sec_name;
11475        int n, err;
11476
11477        sec_name = bpf_program__section_name(prog);
11478        if (strcmp(sec_name, "usdt") == 0) {
11479                /* no auto-attach for just SEC("usdt") */
11480                *link = NULL;
11481                return 0;
11482        }
11483
11484        n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
11485        if (n != 3) {
11486                pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
11487                        sec_name);
11488                err = -EINVAL;
11489        } else {
11490                *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
11491                                                 provider, name, NULL);
11492                err = libbpf_get_error(*link);
11493        }
11494        free(path);
11495        free(provider);
11496        free(name);
11497        return err;
11498}
11499
11500static int determine_tracepoint_id(const char *tp_category,
11501                                   const char *tp_name)
11502{
11503        char file[PATH_MAX];
11504        int ret;
11505
11506        ret = snprintf(file, sizeof(file),
11507                       "/sys/kernel/debug/tracing/events/%s/%s/id",
11508                       tp_category, tp_name);
11509        if (ret < 0)
11510                return -errno;
11511        if (ret >= sizeof(file)) {
11512                pr_debug("tracepoint %s/%s path is too long\n",
11513                         tp_category, tp_name);
11514                return -E2BIG;
11515        }
11516        return parse_uint_from_file(file, "%d\n");
11517}
11518
11519static int perf_event_open_tracepoint(const char *tp_category,
11520                                      const char *tp_name)
11521{
11522        struct perf_event_attr attr = {};
11523        char errmsg[STRERR_BUFSIZE];
11524        int tp_id, pfd, err;
11525
11526        tp_id = determine_tracepoint_id(tp_category, tp_name);
11527        if (tp_id < 0) {
11528                pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
11529                        tp_category, tp_name,
11530                        libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
11531                return tp_id;
11532        }
11533
11534        attr.type = PERF_TYPE_TRACEPOINT;
11535        attr.size = sizeof(attr);
11536        attr.config = tp_id;
11537
11538        pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
11539                      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11540        if (pfd < 0) {
11541                err = -errno;
11542                pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
11543                        tp_category, tp_name,
11544                        libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11545                return err;
11546        }
11547        return pfd;
11548}
11549
11550struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
11551                                                     const char *tp_category,
11552                                                     const char *tp_name,
11553                                                     const struct bpf_tracepoint_opts *opts)
11554{
11555        DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11556        char errmsg[STRERR_BUFSIZE];
11557        struct bpf_link *link;
11558        int pfd, err;
11559
11560        if (!OPTS_VALID(opts, bpf_tracepoint_opts))
11561                return libbpf_err_ptr(-EINVAL);
11562
11563        pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11564
11565        pfd = perf_event_open_tracepoint(tp_category, tp_name);
11566        if (pfd < 0) {
11567                pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
11568                        prog->name, tp_category, tp_name,
11569                        libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
11570                return libbpf_err_ptr(pfd);
11571        }
11572        link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11573        err = libbpf_get_error(link);
11574        if (err) {
11575                close(pfd);
11576                pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
11577                        prog->name, tp_category, tp_name,
11578                        libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11579                return libbpf_err_ptr(err);
11580        }
11581        return link;
11582}
11583
11584struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
11585                                                const char *tp_category,
11586                                                const char *tp_name)
11587{
11588        return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
11589}
11590
11591static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11592{
11593        char *sec_name, *tp_cat, *tp_name;
11594
11595        *link = NULL;
11596
11597        /* no auto-attach for SEC("tp") or SEC("tracepoint") */
11598        if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
11599                return 0;
11600
11601        sec_name = strdup(prog->sec_name);
11602        if (!sec_name)
11603                return -ENOMEM;
11604
11605        /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
11606        if (str_has_pfx(prog->sec_name, "tp/"))
11607                tp_cat = sec_name + sizeof("tp/") - 1;
11608        else
11609                tp_cat = sec_name + sizeof("tracepoint/") - 1;
11610        tp_name = strchr(tp_cat, '/');
11611        if (!tp_name) {
11612                free(sec_name);
11613                return -EINVAL;
11614        }
11615        *tp_name = '\0';
11616        tp_name++;
11617
11618        *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
11619        free(sec_name);
11620        return libbpf_get_error(*link);
11621}
11622
11623struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
11624                                                    const char *tp_name)
11625{
11626        char errmsg[STRERR_BUFSIZE];
11627        struct bpf_link *link;
11628        int prog_fd, pfd;
11629
11630        prog_fd = bpf_program__fd(prog);
11631        if (prog_fd < 0) {
11632                pr_warn("prog '%s': can't attach before loaded\n", prog->name);
11633                return libbpf_err_ptr(-EINVAL);
11634        }
11635
11636        link = calloc(1, sizeof(*link));
11637        if (!link)
11638                return libbpf_err_ptr(-ENOMEM);
11639        link->detach = &bpf_link__detach_fd;
11640
11641        pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
11642        if (pfd < 0) {
11643                pfd = -errno;
11644                free(link);
11645                pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
11646                        prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
11647                return libbpf_err_ptr(pfd);
11648        }
11649        link->fd = pfd;
11650        return link;
11651}
11652
11653static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11654{
11655        static const char *const prefixes[] = {
11656                "raw_tp",
11657                "raw_tracepoint",
11658                "raw_tp.w",
11659                "raw_tracepoint.w",
11660        };
11661        size_t i;
11662        const char *tp_name = NULL;
11663
11664        *link = NULL;
11665
11666        for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
11667                size_t pfx_len;
11668
11669                if (!str_has_pfx(prog->sec_name, prefixes[i]))
11670                        continue;
11671
11672                pfx_len = strlen(prefixes[i]);
11673                /* no auto-attach case of, e.g., SEC("raw_tp") */
11674                if (prog->sec_name[pfx_len] == '\0')
11675                        return 0;
11676
11677                if (prog->sec_name[pfx_len] != '/')
11678                        continue;
11679
11680                tp_name = prog->sec_name + pfx_len + 1;
11681                break;
11682        }
11683
11684        if (!tp_name) {
11685                pr_warn("prog '%s': invalid section name '%s'\n",
11686                        prog->name, prog->sec_name);
11687                return -EINVAL;
11688        }
11689
11690        *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
11691        return libbpf_get_error(link);
11692}
11693
11694/* Common logic for all BPF program types that attach to a btf_id */
11695static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
11696                                                   const struct bpf_trace_opts *opts)
11697{
11698        LIBBPF_OPTS(bpf_link_create_opts, link_opts);
11699        char errmsg[STRERR_BUFSIZE];
11700        struct bpf_link *link;
11701        int prog_fd, pfd;
11702
11703        if (!OPTS_VALID(opts, bpf_trace_opts))
11704                return libbpf_err_ptr(-EINVAL);
11705
11706        prog_fd = bpf_program__fd(prog);
11707        if (prog_fd < 0) {
11708                pr_warn("prog '%s': can't attach before loaded\n", prog->name);
11709                return libbpf_err_ptr(-EINVAL);
11710        }
11711
11712        link = calloc(1, sizeof(*link));
11713        if (!link)
11714                return libbpf_err_ptr(-ENOMEM);
11715        link->detach = &bpf_link__detach_fd;
11716
11717        /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
11718        link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
11719        pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
11720        if (pfd < 0) {
11721                pfd = -errno;
11722                free(link);
11723                pr_warn("prog '%s': failed to attach: %s\n",
11724                        prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
11725                return libbpf_err_ptr(pfd);
11726        }
11727        link->fd = pfd;
11728        return link;
11729}
11730
11731struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
11732{
11733        return bpf_program__attach_btf_id(prog, NULL);
11734}
11735
11736struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
11737                                                const struct bpf_trace_opts *opts)
11738{
11739        return bpf_program__attach_btf_id(prog, opts);
11740}
11741
11742struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
11743{
11744        return bpf_program__attach_btf_id(prog, NULL);
11745}
11746
11747static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11748{
11749        *link = bpf_program__attach_trace(prog);
11750        return libbpf_get_error(*link);
11751}
11752
11753static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11754{
11755        *link = bpf_program__attach_lsm(prog);
11756        return libbpf_get_error(*link);
11757}
11758
11759static struct bpf_link *
11760bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id,
11761                       const char *target_name)
11762{
11763        DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
11764                            .target_btf_id = btf_id);
11765        enum bpf_attach_type attach_type;
11766        char errmsg[STRERR_BUFSIZE];
11767        struct bpf_link *link;
11768        int prog_fd, link_fd;
11769
11770        prog_fd = bpf_program__fd(prog);
11771        if (prog_fd < 0) {
11772                pr_warn("prog '%s': can't attach before loaded\n", prog->name);
11773                return libbpf_err_ptr(-EINVAL);
11774        }
11775
11776        link = calloc(1, sizeof(*link));
11777        if (!link)
11778                return libbpf_err_ptr(-ENOMEM);
11779        link->detach = &bpf_link__detach_fd;
11780
11781        attach_type = bpf_program__expected_attach_type(prog);
11782        link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
11783        if (link_fd < 0) {
11784                link_fd = -errno;
11785                free(link);
11786                pr_warn("prog '%s': failed to attach to %s: %s\n",
11787                        prog->name, target_name,
11788                        libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
11789                return libbpf_err_ptr(link_fd);
11790        }
11791        link->fd = link_fd;
11792        return link;
11793}
11794
11795struct bpf_link *
11796bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
11797{
11798        return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
11799}
11800
11801struct bpf_link *
11802bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
11803{
11804        return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
11805}
11806
11807struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
11808{
11809        /* target_fd/target_ifindex use the same field in LINK_CREATE */
11810        return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
11811}
11812
11813struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
11814                                              int target_fd,
11815                                              const char *attach_func_name)
11816{
11817        int btf_id;
11818
11819        if (!!target_fd != !!attach_func_name) {
11820                pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
11821                        prog->name);
11822                return libbpf_err_ptr(-EINVAL);
11823        }
11824
11825        if (prog->type != BPF_PROG_TYPE_EXT) {
11826                pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
11827                        prog->name);
11828                return libbpf_err_ptr(-EINVAL);
11829        }
11830
11831        if (target_fd) {
11832                btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
11833                if (btf_id < 0)
11834                        return libbpf_err_ptr(btf_id);
11835
11836                return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
11837        } else {
11838                /* no target, so use raw_tracepoint_open for compatibility
11839                 * with old kernels
11840                 */
11841                return bpf_program__attach_trace(prog);
11842        }
11843}
11844
11845struct bpf_link *
11846bpf_program__attach_iter(const struct bpf_program *prog,
11847                         const struct bpf_iter_attach_opts *opts)
11848{
11849        DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
11850        char errmsg[STRERR_BUFSIZE];
11851        struct bpf_link *link;
11852        int prog_fd, link_fd;
11853        __u32 target_fd = 0;
11854
11855        if (!OPTS_VALID(opts, bpf_iter_attach_opts))
11856                return libbpf_err_ptr(-EINVAL);
11857
11858        link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
11859        link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
11860
11861        prog_fd = bpf_program__fd(prog);
11862        if (prog_fd < 0) {
11863                pr_warn("prog '%s': can't attach before loaded\n", prog->name);
11864                return libbpf_err_ptr(-EINVAL);
11865        }
11866
11867        link = calloc(1, sizeof(*link));
11868        if (!link)
11869                return libbpf_err_ptr(-ENOMEM);
11870        link->detach = &bpf_link__detach_fd;
11871
11872        link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
11873                                  &link_create_opts);
11874        if (link_fd < 0) {
11875                link_fd = -errno;
11876                free(link);
11877                pr_warn("prog '%s': failed to attach to iterator: %s\n",
11878                        prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
11879                return libbpf_err_ptr(link_fd);
11880        }
11881        link->fd = link_fd;
11882        return link;
11883}
11884
11885static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11886{
11887        *link = bpf_program__attach_iter(prog, NULL);
11888        return libbpf_get_error(*link);
11889}
11890
11891struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
11892{
11893        struct bpf_link *link = NULL;
11894        int err;
11895
11896        if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
11897                return libbpf_err_ptr(-EOPNOTSUPP);
11898
11899        err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
11900        if (err)
11901                return libbpf_err_ptr(err);
11902
11903        /* When calling bpf_program__attach() explicitly, auto-attach support
11904         * is expected to work, so NULL returned link is considered an error.
11905         * This is different for skeleton's attach, see comment in
11906         * bpf_object__attach_skeleton().
11907         */
11908        if (!link)
11909                return libbpf_err_ptr(-EOPNOTSUPP);
11910
11911        return link;
11912}
11913
11914static int bpf_link__detach_struct_ops(struct bpf_link *link)
11915{
11916        __u32 zero = 0;
11917
11918        if (bpf_map_delete_elem(link->fd, &zero))
11919                return -errno;
11920
11921        return 0;
11922}
11923
11924struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
11925{
11926        struct bpf_struct_ops *st_ops;
11927        struct bpf_link *link;
11928        __u32 i, zero = 0;
11929        int err;
11930
11931        if (!bpf_map__is_struct_ops(map) || map->fd == -1)
11932                return libbpf_err_ptr(-EINVAL);
11933
11934        link = calloc(1, sizeof(*link));
11935        if (!link)
11936                return libbpf_err_ptr(-EINVAL);
11937
11938        st_ops = map->st_ops;
11939        for (i = 0; i < btf_vlen(st_ops->type); i++) {
11940                struct bpf_program *prog = st_ops->progs[i];
11941                void *kern_data;
11942                int prog_fd;
11943
11944                if (!prog)
11945                        continue;
11946
11947                prog_fd = bpf_program__fd(prog);
11948                kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
11949                *(unsigned long *)kern_data = prog_fd;
11950        }
11951
11952        err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
11953        if (err) {
11954                err = -errno;
11955                free(link);
11956                return libbpf_err_ptr(err);
11957        }
11958
11959        link->detach = bpf_link__detach_struct_ops;
11960        link->fd = map->fd;
11961
11962        return link;
11963}
11964
11965static enum bpf_perf_event_ret
11966perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
11967                       void **copy_mem, size_t *copy_size,
11968                       bpf_perf_event_print_t fn, void *private_data)
11969{
11970        struct perf_event_mmap_page *header = mmap_mem;
11971        __u64 data_head = ring_buffer_read_head(header);
11972        __u64 data_tail = header->data_tail;
11973        void *base = ((__u8 *)header) + page_size;
11974        int ret = LIBBPF_PERF_EVENT_CONT;
11975        struct perf_event_header *ehdr;
11976        size_t ehdr_size;
11977
11978        while (data_head != data_tail) {
11979                ehdr = base + (data_tail & (mmap_size - 1));
11980                ehdr_size = ehdr->size;
11981
11982                if (((void *)ehdr) + ehdr_size > base + mmap_size) {
11983                        void *copy_start = ehdr;
11984                        size_t len_first = base + mmap_size - copy_start;
11985                        size_t len_secnd = ehdr_size - len_first;
11986
11987                        if (*copy_size < ehdr_size) {
11988                                free(*copy_mem);
11989                                *copy_mem = malloc(ehdr_size);
11990                                if (!*copy_mem) {
11991                                        *copy_size = 0;
11992                                        ret = LIBBPF_PERF_EVENT_ERROR;
11993                                        break;
11994                                }
11995                                *copy_size = ehdr_size;
11996                        }
11997
11998                        memcpy(*copy_mem, copy_start, len_first);
11999                        memcpy(*copy_mem + len_first, base, len_secnd);
12000                        ehdr = *copy_mem;
12001                }
12002
12003                ret = fn(ehdr, private_data);
12004                data_tail += ehdr_size;
12005                if (ret != LIBBPF_PERF_EVENT_CONT)
12006                        break;
12007        }
12008
12009        ring_buffer_write_tail(header, data_tail);
12010        return libbpf_err(ret);
12011}
12012
12013__attribute__((alias("perf_event_read_simple")))
12014enum bpf_perf_event_ret
12015bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
12016                           void **copy_mem, size_t *copy_size,
12017                           bpf_perf_event_print_t fn, void *private_data);
12018
12019struct perf_buffer;
12020
12021struct perf_buffer_params {
12022        struct perf_event_attr *attr;
12023        /* if event_cb is specified, it takes precendence */
12024        perf_buffer_event_fn event_cb;
12025        /* sample_cb and lost_cb are higher-level common-case callbacks */
12026        perf_buffer_sample_fn sample_cb;
12027        perf_buffer_lost_fn lost_cb;
12028        void *ctx;
12029        int cpu_cnt;
12030        int *cpus;
12031        int *map_keys;
12032};
12033
12034struct perf_cpu_buf {
12035        struct perf_buffer *pb;
12036        void *base; /* mmap()'ed memory */
12037        void *buf; /* for reconstructing segmented data */
12038        size_t buf_size;
12039        int fd;
12040        int cpu;
12041        int map_key;
12042};
12043
12044struct perf_buffer {
12045        perf_buffer_event_fn event_cb;
12046        perf_buffer_sample_fn sample_cb;
12047        perf_buffer_lost_fn lost_cb;
12048        void *ctx; /* passed into callbacks */
12049
12050        size_t page_size;
12051        size_t mmap_size;
12052        struct perf_cpu_buf **cpu_bufs;
12053        struct epoll_event *events;
12054        int cpu_cnt; /* number of allocated CPU buffers */
12055        int epoll_fd; /* perf event FD */
12056        int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
12057};
12058
12059static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
12060                                      struct perf_cpu_buf *cpu_buf)
12061{
12062        if (!cpu_buf)
12063                return;
12064        if (cpu_buf->base &&
12065            munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
12066                pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
12067        if (cpu_buf->fd >= 0) {
12068                ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
12069                close(cpu_buf->fd);
12070        }
12071        free(cpu_buf->buf);
12072        free(cpu_buf);
12073}
12074
12075void perf_buffer__free(struct perf_buffer *pb)
12076{
12077        int i;
12078
12079        if (IS_ERR_OR_NULL(pb))
12080                return;
12081        if (pb->cpu_bufs) {
12082                for (i = 0; i < pb->cpu_cnt; i++) {
12083                        struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
12084
12085                        if (!cpu_buf)
12086                                continue;
12087
12088                        bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
12089                        perf_buffer__free_cpu_buf(pb, cpu_buf);
12090                }
12091                free(pb->cpu_bufs);
12092        }
12093        if (pb->epoll_fd >= 0)
12094                close(pb->epoll_fd);
12095        free(pb->events);
12096        free(pb);
12097}
12098
12099static struct perf_cpu_buf *
12100perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
12101                          int cpu, int map_key)
12102{
12103        struct perf_cpu_buf *cpu_buf;
12104        char msg[STRERR_BUFSIZE];
12105        int err;
12106
12107        cpu_buf = calloc(1, sizeof(*cpu_buf));
12108        if (!cpu_buf)
12109                return ERR_PTR(-ENOMEM);
12110
12111        cpu_buf->pb = pb;
12112        cpu_buf->cpu = cpu;
12113        cpu_buf->map_key = map_key;
12114
12115        cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
12116                              -1, PERF_FLAG_FD_CLOEXEC);
12117        if (cpu_buf->fd < 0) {
12118                err = -errno;
12119                pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
12120                        cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12121                goto error;
12122        }
12123
12124        cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
12125                             PROT_READ | PROT_WRITE, MAP_SHARED,
12126                             cpu_buf->fd, 0);
12127        if (cpu_buf->base == MAP_FAILED) {
12128                cpu_buf->base = NULL;
12129                err = -errno;
12130                pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
12131                        cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12132                goto error;
12133        }
12134
12135        if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
12136                err = -errno;
12137                pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
12138                        cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12139                goto error;
12140        }
12141
12142        return cpu_buf;
12143
12144error:
12145        perf_buffer__free_cpu_buf(pb, cpu_buf);
12146        return (struct perf_cpu_buf *)ERR_PTR(err);
12147}
12148
12149static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
12150                                              struct perf_buffer_params *p);
12151
12152DEFAULT_VERSION(perf_buffer__new_v0_6_0, perf_buffer__new, LIBBPF_0.6.0)
12153struct perf_buffer *perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
12154                                            perf_buffer_sample_fn sample_cb,
12155                                            perf_buffer_lost_fn lost_cb,
12156                                            void *ctx,
12157                                            const struct perf_buffer_opts *opts)
12158{
12159        struct perf_buffer_params p = {};
12160        struct perf_event_attr attr = {};
12161
12162        if (!OPTS_VALID(opts, perf_buffer_opts))
12163                return libbpf_err_ptr(-EINVAL);
12164
12165        attr.config = PERF_COUNT_SW_BPF_OUTPUT;
12166        attr.type = PERF_TYPE_SOFTWARE;
12167        attr.sample_type = PERF_SAMPLE_RAW;
12168        attr.sample_period = 1;
12169        attr.wakeup_events = 1;
12170
12171        p.attr = &attr;
12172        p.sample_cb = sample_cb;
12173        p.lost_cb = lost_cb;
12174        p.ctx = ctx;
12175
12176        return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
12177}
12178
12179COMPAT_VERSION(perf_buffer__new_deprecated, perf_buffer__new, LIBBPF_0.0.4)
12180struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
12181                                                const struct perf_buffer_opts *opts)
12182{
12183        return perf_buffer__new_v0_6_0(map_fd, page_cnt,
12184                                       opts ? opts->sample_cb : NULL,
12185                                       opts ? opts->lost_cb : NULL,
12186                                       opts ? opts->ctx : NULL,
12187                                       NULL);
12188}
12189
12190DEFAULT_VERSION(perf_buffer__new_raw_v0_6_0, perf_buffer__new_raw, LIBBPF_0.6.0)
12191struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt,
12192                                                struct perf_event_attr *attr,
12193                                                perf_buffer_event_fn event_cb, void *ctx,
12194                                                const struct perf_buffer_raw_opts *opts)
12195{
12196        struct perf_buffer_params p = {};
12197
12198        if (!attr)
12199                return libbpf_err_ptr(-EINVAL);
12200
12201        if (!OPTS_VALID(opts, perf_buffer_raw_opts))
12202                return libbpf_err_ptr(-EINVAL);
12203
12204        p.attr = attr;
12205        p.event_cb = event_cb;
12206        p.ctx = ctx;
12207        p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
12208        p.cpus = OPTS_GET(opts, cpus, NULL);
12209        p.map_keys = OPTS_GET(opts, map_keys, NULL);
12210
12211        return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
12212}
12213
12214COMPAT_VERSION(perf_buffer__new_raw_deprecated, perf_buffer__new_raw, LIBBPF_0.0.4)
12215struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
12216                                                    const struct perf_buffer_raw_opts *opts)
12217{
12218        LIBBPF_OPTS(perf_buffer_raw_opts, inner_opts,
12219                .cpu_cnt = opts->cpu_cnt,
12220                .cpus = opts->cpus,
12221                .map_keys = opts->map_keys,
12222        );
12223
12224        return perf_buffer__new_raw_v0_6_0(map_fd, page_cnt, opts->attr,
12225                                           opts->event_cb, opts->ctx, &inner_opts);
12226}
12227
12228static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
12229                                              struct perf_buffer_params *p)
12230{
12231        const char *online_cpus_file = "/sys/devices/system/cpu/online";
12232        struct bpf_map_info map;
12233        char msg[STRERR_BUFSIZE];
12234        struct perf_buffer *pb;
12235        bool *online = NULL;
12236        __u32 map_info_len;
12237        int err, i, j, n;
12238
12239        if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
12240                pr_warn("page count should be power of two, but is %zu\n",
12241                        page_cnt);
12242                return ERR_PTR(-EINVAL);
12243        }
12244
12245        /* best-effort sanity checks */
12246        memset(&map, 0, sizeof(map));
12247        map_info_len = sizeof(map);
12248        err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
12249        if (err) {
12250                err = -errno;
12251                /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
12252                 * -EBADFD, -EFAULT, or -E2BIG on real error
12253                 */
12254                if (err != -EINVAL) {
12255                        pr_warn("failed to get map info for map FD %d: %s\n",
12256                                map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
12257                        return ERR_PTR(err);
12258                }
12259                pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
12260                         map_fd);
12261        } else {
12262                if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
12263                        pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
12264                                map.name);
12265                        return ERR_PTR(-EINVAL);
12266                }
12267        }
12268
12269        pb = calloc(1, sizeof(*pb));
12270        if (!pb)
12271                return ERR_PTR(-ENOMEM);
12272
12273        pb->event_cb = p->event_cb;
12274        pb->sample_cb = p->sample_cb;
12275        pb->lost_cb = p->lost_cb;
12276        pb->ctx = p->ctx;
12277
12278        pb->page_size = getpagesize();
12279        pb->mmap_size = pb->page_size * page_cnt;
12280        pb->map_fd = map_fd;
12281
12282        pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
12283        if (pb->epoll_fd < 0) {
12284                err = -errno;
12285                pr_warn("failed to create epoll instance: %s\n",
12286                        libbpf_strerror_r(err, msg, sizeof(msg)));
12287                goto error;
12288        }
12289
12290        if (p->cpu_cnt > 0) {
12291                pb->cpu_cnt = p->cpu_cnt;
12292        } else {
12293                pb->cpu_cnt = libbpf_num_possible_cpus();
12294                if (pb->cpu_cnt < 0) {
12295                        err = pb->cpu_cnt;
12296                        goto error;
12297                }
12298                if (map.max_entries && map.max_entries < pb->cpu_cnt)
12299                        pb->cpu_cnt = map.max_entries;
12300        }
12301
12302        pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
12303        if (!pb->events) {
12304                err = -ENOMEM;
12305                pr_warn("failed to allocate events: out of memory\n");
12306                goto error;
12307        }
12308        pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
12309        if (!pb->cpu_bufs) {
12310                err = -ENOMEM;
12311                pr_warn("failed to allocate buffers: out of memory\n");
12312                goto error;
12313        }
12314
12315        err = parse_cpu_mask_file(online_cpus_file, &online, &n);
12316        if (err) {
12317                pr_warn("failed to get online CPU mask: %d\n", err);
12318                goto error;
12319        }
12320
12321        for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
12322                struct perf_cpu_buf *cpu_buf;
12323                int cpu, map_key;
12324
12325                cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
12326                map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
12327
12328                /* in case user didn't explicitly requested particular CPUs to
12329                 * be attached to, skip offline/not present CPUs
12330                 */
12331                if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
12332                        continue;
12333
12334                cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
12335                if (IS_ERR(cpu_buf)) {
12336                        err = PTR_ERR(cpu_buf);
12337                        goto error;
12338                }
12339
12340                pb->cpu_bufs[j] = cpu_buf;
12341
12342                err = bpf_map_update_elem(pb->map_fd, &map_key,
12343                                          &cpu_buf->fd, 0);
12344                if (err) {
12345                        err = -errno;
12346                        pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
12347                                cpu, map_key, cpu_buf->fd,
12348                                libbpf_strerror_r(err, msg, sizeof(msg)));
12349                        goto error;
12350                }
12351
12352                pb->events[j].events = EPOLLIN;
12353                pb->events[j].data.ptr = cpu_buf;
12354                if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
12355                              &pb->events[j]) < 0) {
12356                        err = -errno;
12357                        pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
12358                                cpu, cpu_buf->fd,
12359                                libbpf_strerror_r(err, msg, sizeof(msg)));
12360                        goto error;
12361                }
12362                j++;
12363        }
12364        pb->cpu_cnt = j;
12365        free(online);
12366
12367        return pb;
12368
12369error:
12370        free(online);
12371        if (pb)
12372                perf_buffer__free(pb);
12373        return ERR_PTR(err);
12374}
12375
12376struct perf_sample_raw {
12377        struct perf_event_header header;
12378        uint32_t size;
12379        char data[];
12380};
12381
12382struct perf_sample_lost {
12383        struct perf_event_header header;
12384        uint64_t id;
12385        uint64_t lost;
12386        uint64_t sample_id;
12387};
12388
12389static enum bpf_perf_event_ret
12390perf_buffer__process_record(struct perf_event_header *e, void *ctx)
12391{
12392        struct perf_cpu_buf *cpu_buf = ctx;
12393        struct perf_buffer *pb = cpu_buf->pb;
12394        void *data = e;
12395
12396        /* user wants full control over parsing perf event */
12397        if (pb->event_cb)
12398                return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
12399
12400        switch (e->type) {
12401        case PERF_RECORD_SAMPLE: {
12402                struct perf_sample_raw *s = data;
12403
12404                if (pb->sample_cb)
12405                        pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
12406                break;
12407        }
12408        case PERF_RECORD_LOST: {
12409                struct perf_sample_lost *s = data;
12410
12411                if (pb->lost_cb)
12412                        pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
12413                break;
12414        }
12415        default:
12416                pr_warn("unknown perf sample type %d\n", e->type);
12417                return LIBBPF_PERF_EVENT_ERROR;
12418        }
12419        return LIBBPF_PERF_EVENT_CONT;
12420}
12421
12422static int perf_buffer__process_records(struct perf_buffer *pb,
12423                                        struct perf_cpu_buf *cpu_buf)
12424{
12425        enum bpf_perf_event_ret ret;
12426
12427        ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
12428                                     pb->page_size, &cpu_buf->buf,
12429                                     &cpu_buf->buf_size,
12430                                     perf_buffer__process_record, cpu_buf);
12431        if (ret != LIBBPF_PERF_EVENT_CONT)
12432                return ret;
12433        return 0;
12434}
12435
12436int perf_buffer__epoll_fd(const struct perf_buffer *pb)
12437{
12438        return pb->epoll_fd;
12439}
12440
12441int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
12442{
12443        int i, cnt, err;
12444
12445        cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
12446        if (cnt < 0)
12447                return -errno;
12448
12449        for (i = 0; i < cnt; i++) {
12450                struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
12451
12452                err = perf_buffer__process_records(pb, cpu_buf);
12453                if (err) {
12454                        pr_warn("error while processing records: %d\n", err);
12455                        return libbpf_err(err);
12456                }
12457        }
12458        return cnt;
12459}
12460
12461/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
12462 * manager.
12463 */
12464size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
12465{
12466        return pb->cpu_cnt;
12467}
12468
12469/*
12470 * Return perf_event FD of a ring buffer in *buf_idx* slot of
12471 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
12472 * select()/poll()/epoll() Linux syscalls.
12473 */
12474int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
12475{
12476        struct perf_cpu_buf *cpu_buf;
12477
12478        if (buf_idx >= pb->cpu_cnt)
12479                return libbpf_err(-EINVAL);
12480
12481        cpu_buf = pb->cpu_bufs[buf_idx];
12482        if (!cpu_buf)
12483                return libbpf_err(-ENOENT);
12484
12485        return cpu_buf->fd;
12486}
12487
12488/*
12489 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
12490 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
12491 * consume, do nothing and return success.
12492 * Returns:
12493 *   - 0 on success;
12494 *   - <0 on failure.
12495 */
12496int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
12497{
12498        struct perf_cpu_buf *cpu_buf;
12499
12500        if (buf_idx >= pb->cpu_cnt)
12501                return libbpf_err(-EINVAL);
12502
12503        cpu_buf = pb->cpu_bufs[buf_idx];
12504        if (!cpu_buf)
12505                return libbpf_err(-ENOENT);
12506
12507        return perf_buffer__process_records(pb, cpu_buf);
12508}
12509
12510int perf_buffer__consume(struct perf_buffer *pb)
12511{
12512        int i, err;
12513
12514        for (i = 0; i < pb->cpu_cnt; i++) {
12515                struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
12516
12517                if (!cpu_buf)
12518                        continue;
12519
12520                err = perf_buffer__process_records(pb, cpu_buf);
12521                if (err) {
12522                        pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
12523                        return libbpf_err(err);
12524                }
12525        }
12526        return 0;
12527}
12528
12529struct bpf_prog_info_array_desc {
12530        int     array_offset;   /* e.g. offset of jited_prog_insns */
12531        int     count_offset;   /* e.g. offset of jited_prog_len */
12532        int     size_offset;    /* > 0: offset of rec size,
12533                                 * < 0: fix size of -size_offset
12534                                 */
12535};
12536
12537static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
12538        [BPF_PROG_INFO_JITED_INSNS] = {
12539                offsetof(struct bpf_prog_info, jited_prog_insns),
12540                offsetof(struct bpf_prog_info, jited_prog_len),
12541                -1,
12542        },
12543        [BPF_PROG_INFO_XLATED_INSNS] = {
12544                offsetof(struct bpf_prog_info, xlated_prog_insns),
12545                offsetof(struct bpf_prog_info, xlated_prog_len),
12546                -1,
12547        },
12548        [BPF_PROG_INFO_MAP_IDS] = {
12549                offsetof(struct bpf_prog_info, map_ids),
12550                offsetof(struct bpf_prog_info, nr_map_ids),
12551                -(int)sizeof(__u32),
12552        },
12553        [BPF_PROG_INFO_JITED_KSYMS] = {
12554                offsetof(struct bpf_prog_info, jited_ksyms),
12555                offsetof(struct bpf_prog_info, nr_jited_ksyms),
12556                -(int)sizeof(__u64),
12557        },
12558        [BPF_PROG_INFO_JITED_FUNC_LENS] = {
12559                offsetof(struct bpf_prog_info, jited_func_lens),
12560                offsetof(struct bpf_prog_info, nr_jited_func_lens),
12561                -(int)sizeof(__u32),
12562        },
12563        [BPF_PROG_INFO_FUNC_INFO] = {
12564                offsetof(struct bpf_prog_info, func_info),
12565                offsetof(struct bpf_prog_info, nr_func_info),
12566                offsetof(struct bpf_prog_info, func_info_rec_size),
12567        },
12568        [BPF_PROG_INFO_LINE_INFO] = {
12569                offsetof(struct bpf_prog_info, line_info),
12570                offsetof(struct bpf_prog_info, nr_line_info),
12571                offsetof(struct bpf_prog_info, line_info_rec_size),
12572        },
12573        [BPF_PROG_INFO_JITED_LINE_INFO] = {
12574                offsetof(struct bpf_prog_info, jited_line_info),
12575                offsetof(struct bpf_prog_info, nr_jited_line_info),
12576                offsetof(struct bpf_prog_info, jited_line_info_rec_size),
12577        },
12578        [BPF_PROG_INFO_PROG_TAGS] = {
12579                offsetof(struct bpf_prog_info, prog_tags),
12580                offsetof(struct bpf_prog_info, nr_prog_tags),
12581                -(int)sizeof(__u8) * BPF_TAG_SIZE,
12582        },
12583
12584};
12585
12586static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
12587                                           int offset)
12588{
12589        __u32 *array = (__u32 *)info;
12590
12591        if (offset >= 0)
12592                return array[offset / sizeof(__u32)];
12593        return -(int)offset;
12594}
12595
12596static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
12597                                           int offset)
12598{
12599        __u64 *array = (__u64 *)info;
12600
12601        if (offset >= 0)
12602                return array[offset / sizeof(__u64)];
12603        return -(int)offset;
12604}
12605
12606static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
12607                                         __u32 val)
12608{
12609        __u32 *array = (__u32 *)info;
12610
12611        if (offset >= 0)
12612                array[offset / sizeof(__u32)] = val;
12613}
12614
12615static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
12616                                         __u64 val)
12617{
12618        __u64 *array = (__u64 *)info;
12619
12620        if (offset >= 0)
12621                array[offset / sizeof(__u64)] = val;
12622}
12623
12624struct bpf_prog_info_linear *
12625bpf_program__get_prog_info_linear(int fd, __u64 arrays)
12626{
12627        struct bpf_prog_info_linear *info_linear;
12628        struct bpf_prog_info info = {};
12629        __u32 info_len = sizeof(info);
12630        __u32 data_len = 0;
12631        int i, err;
12632        void *ptr;
12633
12634        if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
12635                return libbpf_err_ptr(-EINVAL);
12636
12637        /* step 1: get array dimensions */
12638        err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
12639        if (err) {
12640                pr_debug("can't get prog info: %s", strerror(errno));
12641                return libbpf_err_ptr(-EFAULT);
12642        }
12643
12644        /* step 2: calculate total size of all arrays */
12645        for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
12646                bool include_array = (arrays & (1UL << i)) > 0;
12647                struct bpf_prog_info_array_desc *desc;
12648                __u32 count, size;
12649
12650                desc = bpf_prog_info_array_desc + i;
12651
12652                /* kernel is too old to support this field */
12653                if (info_len < desc->array_offset + sizeof(__u32) ||
12654                    info_len < desc->count_offset + sizeof(__u32) ||
12655                    (desc->size_offset > 0 && info_len < desc->size_offset))
12656                        include_array = false;
12657
12658                if (!include_array) {
12659                        arrays &= ~(1UL << i);  /* clear the bit */
12660                        continue;
12661                }
12662
12663                count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
12664                size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
12665
12666                data_len += count * size;
12667        }
12668
12669        /* step 3: allocate continuous memory */
12670        data_len = roundup(data_len, sizeof(__u64));
12671        info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
12672        if (!info_linear)
12673                return libbpf_err_ptr(-ENOMEM);
12674
12675        /* step 4: fill data to info_linear->info */
12676        info_linear->arrays = arrays;
12677        memset(&info_linear->info, 0, sizeof(info));
12678        ptr = info_linear->data;
12679
12680        for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
12681                struct bpf_prog_info_array_desc *desc;
12682                __u32 count, size;
12683
12684                if ((arrays & (1UL << i)) == 0)
12685                        continue;
12686
12687                desc  = bpf_prog_info_array_desc + i;
12688                count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
12689                size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
12690                bpf_prog_info_set_offset_u32(&info_linear->info,
12691                                             desc->count_offset, count);
12692                bpf_prog_info_set_offset_u32(&info_linear->info,
12693                                             desc->size_offset, size);
12694                bpf_prog_info_set_offset_u64(&info_linear->info,
12695                                             desc->array_offset,
12696                                             ptr_to_u64(ptr));
12697                ptr += count * size;
12698        }
12699
12700        /* step 5: call syscall again to get required arrays */
12701        err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
12702        if (err) {
12703                pr_debug("can't get prog info: %s", strerror(errno));
12704                free(info_linear);
12705                return libbpf_err_ptr(-EFAULT);
12706        }
12707
12708        /* step 6: verify the data */
12709        for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
12710                struct bpf_prog_info_array_desc *desc;
12711                __u32 v1, v2;
12712
12713                if ((arrays & (1UL << i)) == 0)
12714                        continue;
12715
12716                desc = bpf_prog_info_array_desc + i;
12717                v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
12718                v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
12719                                                   desc->count_offset);
12720                if (v1 != v2)
12721                        pr_warn("%s: mismatch in element count\n", __func__);
12722
12723                v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
12724                v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
12725                                                   desc->size_offset);
12726                if (v1 != v2)
12727                        pr_warn("%s: mismatch in rec size\n", __func__);
12728        }
12729
12730        /* step 7: update info_len and data_len */
12731        info_linear->info_len = sizeof(struct bpf_prog_info);
12732        info_linear->data_len = data_len;
12733
12734        return info_linear;
12735}
12736
12737void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
12738{
12739        int i;
12740
12741        for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
12742                struct bpf_prog_info_array_desc *desc;
12743                __u64 addr, offs;
12744
12745                if ((info_linear->arrays & (1UL << i)) == 0)
12746                        continue;
12747
12748                desc = bpf_prog_info_array_desc + i;
12749                addr = bpf_prog_info_read_offset_u64(&info_linear->info,
12750                                                     desc->array_offset);
12751                offs = addr - ptr_to_u64(info_linear->data);
12752                bpf_prog_info_set_offset_u64(&info_linear->info,
12753                                             desc->array_offset, offs);
12754        }
12755}
12756
12757void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
12758{
12759        int i;
12760
12761        for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
12762                struct bpf_prog_info_array_desc *desc;
12763                __u64 addr, offs;
12764
12765                if ((info_linear->arrays & (1UL << i)) == 0)
12766                        continue;
12767
12768                desc = bpf_prog_info_array_desc + i;
12769                offs = bpf_prog_info_read_offset_u64(&info_linear->info,
12770                                                     desc->array_offset);
12771                addr = offs + ptr_to_u64(info_linear->data);
12772                bpf_prog_info_set_offset_u64(&info_linear->info,
12773                                             desc->array_offset, addr);
12774        }
12775}
12776
12777int bpf_program__set_attach_target(struct bpf_program *prog,
12778                                   int attach_prog_fd,
12779                                   const char *attach_func_name)
12780{
12781        int btf_obj_fd = 0, btf_id = 0, err;
12782
12783        if (!prog || attach_prog_fd < 0)
12784                return libbpf_err(-EINVAL);
12785
12786        if (prog->obj->loaded)
12787                return libbpf_err(-EINVAL);
12788
12789        if (attach_prog_fd && !attach_func_name) {
12790                /* remember attach_prog_fd and let bpf_program__load() find
12791                 * BTF ID during the program load
12792                 */
12793                prog->attach_prog_fd = attach_prog_fd;
12794                return 0;
12795        }
12796
12797        if (attach_prog_fd) {
12798                btf_id = libbpf_find_prog_btf_id(attach_func_name,
12799                                                 attach_prog_fd);
12800                if (btf_id < 0)
12801                        return libbpf_err(btf_id);
12802        } else {
12803                if (!attach_func_name)
12804                        return libbpf_err(-EINVAL);
12805
12806                /* load btf_vmlinux, if not yet */
12807                err = bpf_object__load_vmlinux_btf(prog->obj, true);
12808                if (err)
12809                        return libbpf_err(err);
12810                err = find_kernel_btf_id(prog->obj, attach_func_name,
12811                                         prog->expected_attach_type,
12812                                         &btf_obj_fd, &btf_id);
12813                if (err)
12814                        return libbpf_err(err);
12815        }
12816
12817        prog->attach_btf_id = btf_id;
12818        prog->attach_btf_obj_fd = btf_obj_fd;
12819        prog->attach_prog_fd = attach_prog_fd;
12820        return 0;
12821}
12822
12823int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
12824{
12825        int err = 0, n, len, start, end = -1;
12826        bool *tmp;
12827
12828        *mask = NULL;
12829        *mask_sz = 0;
12830
12831        /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
12832        while (*s) {
12833                if (*s == ',' || *s == '\n') {
12834                        s++;
12835                        continue;
12836                }
12837                n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
12838                if (n <= 0 || n > 2) {
12839                        pr_warn("Failed to get CPU range %s: %d\n", s, n);
12840                        err = -EINVAL;
12841                        goto cleanup;
12842                } else if (n == 1) {
12843                        end = start;
12844                }
12845                if (start < 0 || start > end) {
12846                        pr_warn("Invalid CPU range [%d,%d] in %s\n",
12847                                start, end, s);
12848                        err = -EINVAL;
12849                        goto cleanup;
12850                }
12851                tmp = realloc(*mask, end + 1);
12852                if (!tmp) {
12853                        err = -ENOMEM;
12854                        goto cleanup;
12855                }
12856                *mask = tmp;
12857                memset(tmp + *mask_sz, 0, start - *mask_sz);
12858                memset(tmp + start, 1, end - start + 1);
12859                *mask_sz = end + 1;
12860                s += len;
12861        }
12862        if (!*mask_sz) {
12863                pr_warn("Empty CPU range\n");
12864                return -EINVAL;
12865        }
12866        return 0;
12867cleanup:
12868        free(*mask);
12869        *mask = NULL;
12870        return err;
12871}
12872
12873int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
12874{
12875        int fd, err = 0, len;
12876        char buf[128];
12877
12878        fd = open(fcpu, O_RDONLY | O_CLOEXEC);
12879        if (fd < 0) {
12880                err = -errno;
12881                pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
12882                return err;
12883        }
12884        len = read(fd, buf, sizeof(buf));
12885        close(fd);
12886        if (len <= 0) {
12887                err = len ? -errno : -EINVAL;
12888                pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
12889                return err;
12890        }
12891        if (len >= sizeof(buf)) {
12892                pr_warn("CPU mask is too big in file %s\n", fcpu);
12893                return -E2BIG;
12894        }
12895        buf[len] = '\0';
12896
12897        return parse_cpu_mask_str(buf, mask, mask_sz);
12898}
12899
12900int libbpf_num_possible_cpus(void)
12901{
12902        static const char *fcpu = "/sys/devices/system/cpu/possible";
12903        static int cpus;
12904        int err, n, i, tmp_cpus;
12905        bool *mask;
12906
12907        tmp_cpus = READ_ONCE(cpus);
12908        if (tmp_cpus > 0)
12909                return tmp_cpus;
12910
12911        err = parse_cpu_mask_file(fcpu, &mask, &n);
12912        if (err)
12913                return libbpf_err(err);
12914
12915        tmp_cpus = 0;
12916        for (i = 0; i < n; i++) {
12917                if (mask[i])
12918                        tmp_cpus++;
12919        }
12920        free(mask);
12921
12922        WRITE_ONCE(cpus, tmp_cpus);
12923        return tmp_cpus;
12924}
12925
12926static int populate_skeleton_maps(const struct bpf_object *obj,
12927                                  struct bpf_map_skeleton *maps,
12928                                  size_t map_cnt)
12929{
12930        int i;
12931
12932        for (i = 0; i < map_cnt; i++) {
12933                struct bpf_map **map = maps[i].map;
12934                const char *name = maps[i].name;
12935                void **mmaped = maps[i].mmaped;
12936
12937                *map = bpf_object__find_map_by_name(obj, name);
12938                if (!*map) {
12939                        pr_warn("failed to find skeleton map '%s'\n", name);
12940                        return -ESRCH;
12941                }
12942
12943                /* externs shouldn't be pre-setup from user code */
12944                if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
12945                        *mmaped = (*map)->mmaped;
12946        }
12947        return 0;
12948}
12949
12950static int populate_skeleton_progs(const struct bpf_object *obj,
12951                                   struct bpf_prog_skeleton *progs,
12952                                   size_t prog_cnt)
12953{
12954        int i;
12955
12956        for (i = 0; i < prog_cnt; i++) {
12957                struct bpf_program **prog = progs[i].prog;
12958                const char *name = progs[i].name;
12959
12960                *prog = bpf_object__find_program_by_name(obj, name);
12961                if (!*prog) {
12962                        pr_warn("failed to find skeleton program '%s'\n", name);
12963                        return -ESRCH;
12964                }
12965        }
12966        return 0;
12967}
12968
12969int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
12970                              const struct bpf_object_open_opts *opts)
12971{
12972        DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
12973                .object_name = s->name,
12974        );
12975        struct bpf_object *obj;
12976        int err;
12977
12978        /* Attempt to preserve opts->object_name, unless overriden by user
12979         * explicitly. Overwriting object name for skeletons is discouraged,
12980         * as it breaks global data maps, because they contain object name
12981         * prefix as their own map name prefix. When skeleton is generated,
12982         * bpftool is making an assumption that this name will stay the same.
12983         */
12984        if (opts) {
12985                memcpy(&skel_opts, opts, sizeof(*opts));
12986                if (!opts->object_name)
12987                        skel_opts.object_name = s->name;
12988        }
12989
12990        obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
12991        err = libbpf_get_error(obj);
12992        if (err) {
12993                pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
12994                        s->name, err);
12995                return libbpf_err(err);
12996        }
12997
12998        *s->obj = obj;
12999        err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
13000        if (err) {
13001                pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
13002                return libbpf_err(err);
13003        }
13004
13005        err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
13006        if (err) {
13007                pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
13008                return libbpf_err(err);
13009        }
13010
13011        return 0;
13012}
13013
13014int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
13015{
13016        int err, len, var_idx, i;
13017        const char *var_name;
13018        const struct bpf_map *map;
13019        struct btf *btf;
13020        __u32 map_type_id;
13021        const struct btf_type *map_type, *var_type;
13022        const struct bpf_var_skeleton *var_skel;
13023        struct btf_var_secinfo *var;
13024
13025        if (!s->obj)
13026                return libbpf_err(-EINVAL);
13027
13028        btf = bpf_object__btf(s->obj);
13029        if (!btf) {
13030                pr_warn("subskeletons require BTF at runtime (object %s)\n",
13031                        bpf_object__name(s->obj));
13032                return libbpf_err(-errno);
13033        }
13034
13035        err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
13036        if (err) {
13037                pr_warn("failed to populate subskeleton maps: %d\n", err);
13038                return libbpf_err(err);
13039        }
13040
13041        err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
13042        if (err) {
13043                pr_warn("failed to populate subskeleton maps: %d\n", err);
13044                return libbpf_err(err);
13045        }
13046
13047        for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
13048                var_skel = &s->vars[var_idx];
13049                map = *var_skel->map;
13050                map_type_id = bpf_map__btf_value_type_id(map);
13051                map_type = btf__type_by_id(btf, map_type_id);
13052
13053                if (!btf_is_datasec(map_type)) {
13054                        pr_warn("type for map '%1$s' is not a datasec: %2$s",
13055                                bpf_map__name(map),
13056                                __btf_kind_str(btf_kind(map_type)));
13057                        return libbpf_err(-EINVAL);
13058                }
13059
13060                len = btf_vlen(map_type);
13061                var = btf_var_secinfos(map_type);
13062                for (i = 0; i < len; i++, var++) {
13063                        var_type = btf__type_by_id(btf, var->type);
13064                        var_name = btf__name_by_offset(btf, var_type->name_off);
13065                        if (strcmp(var_name, var_skel->name) == 0) {
13066                                *var_skel->addr = map->mmaped + var->offset;
13067                                break;
13068                        }
13069                }
13070        }
13071        return 0;
13072}
13073
13074void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
13075{
13076        if (!s)
13077                return;
13078        free(s->maps);
13079        free(s->progs);
13080        free(s->vars);
13081        free(s);
13082}
13083
13084int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
13085{
13086        int i, err;
13087
13088        err = bpf_object__load(*s->obj);
13089        if (err) {
13090                pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
13091                return libbpf_err(err);
13092        }
13093
13094        for (i = 0; i < s->map_cnt; i++) {
13095                struct bpf_map *map = *s->maps[i].map;
13096                size_t mmap_sz = bpf_map_mmap_sz(map);
13097                int prot, map_fd = bpf_map__fd(map);
13098                void **mmaped = s->maps[i].mmaped;
13099
13100                if (!mmaped)
13101                        continue;
13102
13103                if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
13104                        *mmaped = NULL;
13105                        continue;
13106                }
13107
13108                if (map->def.map_flags & BPF_F_RDONLY_PROG)
13109                        prot = PROT_READ;
13110                else
13111                        prot = PROT_READ | PROT_WRITE;
13112
13113                /* Remap anonymous mmap()-ed "map initialization image" as
13114                 * a BPF map-backed mmap()-ed memory, but preserving the same
13115                 * memory address. This will cause kernel to change process'
13116                 * page table to point to a different piece of kernel memory,
13117                 * but from userspace point of view memory address (and its
13118                 * contents, being identical at this point) will stay the
13119                 * same. This mapping will be released by bpf_object__close()
13120                 * as per normal clean up procedure, so we don't need to worry
13121                 * about it from skeleton's clean up perspective.
13122                 */
13123                *mmaped = mmap(map->mmaped, mmap_sz, prot,
13124                                MAP_SHARED | MAP_FIXED, map_fd, 0);
13125                if (*mmaped == MAP_FAILED) {
13126                        err = -errno;
13127                        *mmaped = NULL;
13128                        pr_warn("failed to re-mmap() map '%s': %d\n",
13129                                 bpf_map__name(map), err);
13130                        return libbpf_err(err);
13131                }
13132        }
13133
13134        return 0;
13135}
13136
13137int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
13138{
13139        int i, err;
13140
13141        for (i = 0; i < s->prog_cnt; i++) {
13142                struct bpf_program *prog = *s->progs[i].prog;
13143                struct bpf_link **link = s->progs[i].link;
13144
13145                if (!prog->autoload)
13146                        continue;
13147
13148                /* auto-attaching not supported for this program */
13149                if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
13150                        continue;
13151
13152                /* if user already set the link manually, don't attempt auto-attach */
13153                if (*link)
13154                        continue;
13155
13156                err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
13157                if (err) {
13158                        pr_warn("prog '%s': failed to auto-attach: %d\n",
13159                                bpf_program__name(prog), err);
13160                        return libbpf_err(err);
13161                }
13162
13163                /* It's possible that for some SEC() definitions auto-attach
13164                 * is supported in some cases (e.g., if definition completely
13165                 * specifies target information), but is not in other cases.
13166                 * SEC("uprobe") is one such case. If user specified target
13167                 * binary and function name, such BPF program can be
13168                 * auto-attached. But if not, it shouldn't trigger skeleton's
13169                 * attach to fail. It should just be skipped.
13170                 * attach_fn signals such case with returning 0 (no error) and
13171                 * setting link to NULL.
13172                 */
13173        }
13174
13175        return 0;
13176}
13177
13178void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
13179{
13180        int i;
13181
13182        for (i = 0; i < s->prog_cnt; i++) {
13183                struct bpf_link **link = s->progs[i].link;
13184
13185                bpf_link__destroy(*link);
13186                *link = NULL;
13187        }
13188}
13189
13190void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
13191{
13192        if (!s)
13193                return;
13194
13195        if (s->progs)
13196                bpf_object__detach_skeleton(s);
13197        if (s->obj)
13198                bpf_object__close(*s->obj);
13199        free(s->maps);
13200        free(s->progs);
13201        free(s);
13202}
13203