linux/tools/lib/bpf/bpf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
   2
   3/*
   4 * common eBPF ELF operations.
   5 *
   6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
   7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
   8 * Copyright (C) 2015 Huawei Inc.
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU Lesser General Public
  12 * License as published by the Free Software Foundation;
  13 * version 2.1 of the License (not later!)
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU Lesser General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU Lesser General Public
  21 * License along with this program; if not,  see <http://www.gnu.org/licenses>
  22 */
  23
  24#include <stdlib.h>
  25#include <string.h>
  26#include <memory.h>
  27#include <unistd.h>
  28#include <asm/unistd.h>
  29#include <errno.h>
  30#include <linux/bpf.h>
  31#include "bpf.h"
  32#include "libbpf.h"
  33#include "libbpf_internal.h"
  34
  35/*
  36 * When building perf, unistd.h is overridden. __NR_bpf is
  37 * required to be defined explicitly.
  38 */
  39#ifndef __NR_bpf
  40# if defined(__i386__)
  41#  define __NR_bpf 357
  42# elif defined(__x86_64__)
  43#  define __NR_bpf 321
  44# elif defined(__aarch64__)
  45#  define __NR_bpf 280
  46# elif defined(__sparc__)
  47#  define __NR_bpf 349
  48# elif defined(__s390__)
  49#  define __NR_bpf 351
  50# elif defined(__arc__)
  51#  define __NR_bpf 280
  52# else
  53#  error __NR_bpf not defined. libbpf does not support your arch.
  54# endif
  55#endif
  56
  57static inline __u64 ptr_to_u64(const void *ptr)
  58{
  59        return (__u64) (unsigned long) ptr;
  60}
  61
  62static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
  63                          unsigned int size)
  64{
  65        return syscall(__NR_bpf, cmd, attr, size);
  66}
  67
  68static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
  69                             unsigned int size)
  70{
  71        int fd;
  72
  73        fd = sys_bpf(cmd, attr, size);
  74        return ensure_good_fd(fd);
  75}
  76
  77static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
  78{
  79        int retries = 5;
  80        int fd;
  81
  82        do {
  83                fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
  84        } while (fd < 0 && errno == EAGAIN && retries-- > 0);
  85
  86        return fd;
  87}
  88
  89int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr)
  90{
  91        union bpf_attr attr;
  92        int fd;
  93
  94        memset(&attr, '\0', sizeof(attr));
  95
  96        attr.map_type = create_attr->map_type;
  97        attr.key_size = create_attr->key_size;
  98        attr.value_size = create_attr->value_size;
  99        attr.max_entries = create_attr->max_entries;
 100        attr.map_flags = create_attr->map_flags;
 101        if (create_attr->name)
 102                memcpy(attr.map_name, create_attr->name,
 103                       min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1));
 104        attr.numa_node = create_attr->numa_node;
 105        attr.btf_fd = create_attr->btf_fd;
 106        attr.btf_key_type_id = create_attr->btf_key_type_id;
 107        attr.btf_value_type_id = create_attr->btf_value_type_id;
 108        attr.map_ifindex = create_attr->map_ifindex;
 109        if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS)
 110                attr.btf_vmlinux_value_type_id =
 111                        create_attr->btf_vmlinux_value_type_id;
 112        else
 113                attr.inner_map_fd = create_attr->inner_map_fd;
 114        attr.map_extra = create_attr->map_extra;
 115
 116        fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
 117        return libbpf_err_errno(fd);
 118}
 119
 120int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
 121{
 122        struct bpf_create_map_params p = {};
 123
 124        p.map_type = create_attr->map_type;
 125        p.key_size = create_attr->key_size;
 126        p.value_size = create_attr->value_size;
 127        p.max_entries = create_attr->max_entries;
 128        p.map_flags = create_attr->map_flags;
 129        p.name = create_attr->name;
 130        p.numa_node = create_attr->numa_node;
 131        p.btf_fd = create_attr->btf_fd;
 132        p.btf_key_type_id = create_attr->btf_key_type_id;
 133        p.btf_value_type_id = create_attr->btf_value_type_id;
 134        p.map_ifindex = create_attr->map_ifindex;
 135        if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS)
 136                p.btf_vmlinux_value_type_id =
 137                        create_attr->btf_vmlinux_value_type_id;
 138        else
 139                p.inner_map_fd = create_attr->inner_map_fd;
 140
 141        return libbpf__bpf_create_map_xattr(&p);
 142}
 143
 144int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
 145                        int key_size, int value_size, int max_entries,
 146                        __u32 map_flags, int node)
 147{
 148        struct bpf_create_map_attr map_attr = {};
 149
 150        map_attr.name = name;
 151        map_attr.map_type = map_type;
 152        map_attr.map_flags = map_flags;
 153        map_attr.key_size = key_size;
 154        map_attr.value_size = value_size;
 155        map_attr.max_entries = max_entries;
 156        if (node >= 0) {
 157                map_attr.numa_node = node;
 158                map_attr.map_flags |= BPF_F_NUMA_NODE;
 159        }
 160
 161        return bpf_create_map_xattr(&map_attr);
 162}
 163
 164int bpf_create_map(enum bpf_map_type map_type, int key_size,
 165                   int value_size, int max_entries, __u32 map_flags)
 166{
 167        struct bpf_create_map_attr map_attr = {};
 168
 169        map_attr.map_type = map_type;
 170        map_attr.map_flags = map_flags;
 171        map_attr.key_size = key_size;
 172        map_attr.value_size = value_size;
 173        map_attr.max_entries = max_entries;
 174
 175        return bpf_create_map_xattr(&map_attr);
 176}
 177
 178int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
 179                        int key_size, int value_size, int max_entries,
 180                        __u32 map_flags)
 181{
 182        struct bpf_create_map_attr map_attr = {};
 183
 184        map_attr.name = name;
 185        map_attr.map_type = map_type;
 186        map_attr.map_flags = map_flags;
 187        map_attr.key_size = key_size;
 188        map_attr.value_size = value_size;
 189        map_attr.max_entries = max_entries;
 190
 191        return bpf_create_map_xattr(&map_attr);
 192}
 193
 194int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
 195                               int key_size, int inner_map_fd, int max_entries,
 196                               __u32 map_flags, int node)
 197{
 198        union bpf_attr attr;
 199        int fd;
 200
 201        memset(&attr, '\0', sizeof(attr));
 202
 203        attr.map_type = map_type;
 204        attr.key_size = key_size;
 205        attr.value_size = 4;
 206        attr.inner_map_fd = inner_map_fd;
 207        attr.max_entries = max_entries;
 208        attr.map_flags = map_flags;
 209        if (name)
 210                memcpy(attr.map_name, name,
 211                       min(strlen(name), BPF_OBJ_NAME_LEN - 1));
 212
 213        if (node >= 0) {
 214                attr.map_flags |= BPF_F_NUMA_NODE;
 215                attr.numa_node = node;
 216        }
 217
 218        fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
 219        return libbpf_err_errno(fd);
 220}
 221
 222int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
 223                          int key_size, int inner_map_fd, int max_entries,
 224                          __u32 map_flags)
 225{
 226        return bpf_create_map_in_map_node(map_type, name, key_size,
 227                                          inner_map_fd, max_entries, map_flags,
 228                                          -1);
 229}
 230
 231static void *
 232alloc_zero_tailing_info(const void *orecord, __u32 cnt,
 233                        __u32 actual_rec_size, __u32 expected_rec_size)
 234{
 235        __u64 info_len = (__u64)actual_rec_size * cnt;
 236        void *info, *nrecord;
 237        int i;
 238
 239        info = malloc(info_len);
 240        if (!info)
 241                return NULL;
 242
 243        /* zero out bytes kernel does not understand */
 244        nrecord = info;
 245        for (i = 0; i < cnt; i++) {
 246                memcpy(nrecord, orecord, expected_rec_size);
 247                memset(nrecord + expected_rec_size, 0,
 248                       actual_rec_size - expected_rec_size);
 249                orecord += actual_rec_size;
 250                nrecord += actual_rec_size;
 251        }
 252
 253        return info;
 254}
 255
 256int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
 257{
 258        void *finfo = NULL, *linfo = NULL;
 259        union bpf_attr attr;
 260        int fd;
 261
 262        if (!load_attr->log_buf != !load_attr->log_buf_sz)
 263                return libbpf_err(-EINVAL);
 264
 265        if (load_attr->log_level > (4 | 2 | 1) || (load_attr->log_level && !load_attr->log_buf))
 266                return libbpf_err(-EINVAL);
 267
 268        memset(&attr, 0, sizeof(attr));
 269        attr.prog_type = load_attr->prog_type;
 270        attr.expected_attach_type = load_attr->expected_attach_type;
 271
 272        if (load_attr->attach_prog_fd)
 273                attr.attach_prog_fd = load_attr->attach_prog_fd;
 274        else
 275                attr.attach_btf_obj_fd = load_attr->attach_btf_obj_fd;
 276        attr.attach_btf_id = load_attr->attach_btf_id;
 277
 278        attr.prog_ifindex = load_attr->prog_ifindex;
 279        attr.kern_version = load_attr->kern_version;
 280
 281        attr.insn_cnt = (__u32)load_attr->insn_cnt;
 282        attr.insns = ptr_to_u64(load_attr->insns);
 283        attr.license = ptr_to_u64(load_attr->license);
 284
 285        attr.log_level = load_attr->log_level;
 286        if (attr.log_level) {
 287                attr.log_buf = ptr_to_u64(load_attr->log_buf);
 288                attr.log_size = load_attr->log_buf_sz;
 289        }
 290
 291        attr.prog_btf_fd = load_attr->prog_btf_fd;
 292        attr.prog_flags = load_attr->prog_flags;
 293
 294        attr.func_info_rec_size = load_attr->func_info_rec_size;
 295        attr.func_info_cnt = load_attr->func_info_cnt;
 296        attr.func_info = ptr_to_u64(load_attr->func_info);
 297
 298        attr.line_info_rec_size = load_attr->line_info_rec_size;
 299        attr.line_info_cnt = load_attr->line_info_cnt;
 300        attr.line_info = ptr_to_u64(load_attr->line_info);
 301        attr.fd_array = ptr_to_u64(load_attr->fd_array);
 302
 303        if (load_attr->name)
 304                memcpy(attr.prog_name, load_attr->name,
 305                       min(strlen(load_attr->name), (size_t)BPF_OBJ_NAME_LEN - 1));
 306
 307        fd = sys_bpf_prog_load(&attr, sizeof(attr));
 308        if (fd >= 0)
 309                return fd;
 310
 311        /* After bpf_prog_load, the kernel may modify certain attributes
 312         * to give user space a hint how to deal with loading failure.
 313         * Check to see whether we can make some changes and load again.
 314         */
 315        while (errno == E2BIG && (!finfo || !linfo)) {
 316                if (!finfo && attr.func_info_cnt &&
 317                    attr.func_info_rec_size < load_attr->func_info_rec_size) {
 318                        /* try with corrected func info records */
 319                        finfo = alloc_zero_tailing_info(load_attr->func_info,
 320                                                        load_attr->func_info_cnt,
 321                                                        load_attr->func_info_rec_size,
 322                                                        attr.func_info_rec_size);
 323                        if (!finfo) {
 324                                errno = E2BIG;
 325                                goto done;
 326                        }
 327
 328                        attr.func_info = ptr_to_u64(finfo);
 329                        attr.func_info_rec_size = load_attr->func_info_rec_size;
 330                } else if (!linfo && attr.line_info_cnt &&
 331                           attr.line_info_rec_size <
 332                           load_attr->line_info_rec_size) {
 333                        linfo = alloc_zero_tailing_info(load_attr->line_info,
 334                                                        load_attr->line_info_cnt,
 335                                                        load_attr->line_info_rec_size,
 336                                                        attr.line_info_rec_size);
 337                        if (!linfo) {
 338                                errno = E2BIG;
 339                                goto done;
 340                        }
 341
 342                        attr.line_info = ptr_to_u64(linfo);
 343                        attr.line_info_rec_size = load_attr->line_info_rec_size;
 344                } else {
 345                        break;
 346                }
 347
 348                fd = sys_bpf_prog_load(&attr, sizeof(attr));
 349                if (fd >= 0)
 350                        goto done;
 351        }
 352
 353        if (load_attr->log_level || !load_attr->log_buf)
 354                goto done;
 355
 356        /* Try again with log */
 357        attr.log_buf = ptr_to_u64(load_attr->log_buf);
 358        attr.log_size = load_attr->log_buf_sz;
 359        attr.log_level = 1;
 360        load_attr->log_buf[0] = 0;
 361
 362        fd = sys_bpf_prog_load(&attr, sizeof(attr));
 363done:
 364        /* free() doesn't affect errno, so we don't need to restore it */
 365        free(finfo);
 366        free(linfo);
 367        return libbpf_err_errno(fd);
 368}
 369
 370int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
 371                           char *log_buf, size_t log_buf_sz)
 372{
 373        struct bpf_prog_load_params p = {};
 374
 375        if (!load_attr || !log_buf != !log_buf_sz)
 376                return libbpf_err(-EINVAL);
 377
 378        p.prog_type = load_attr->prog_type;
 379        p.expected_attach_type = load_attr->expected_attach_type;
 380        switch (p.prog_type) {
 381        case BPF_PROG_TYPE_STRUCT_OPS:
 382        case BPF_PROG_TYPE_LSM:
 383                p.attach_btf_id = load_attr->attach_btf_id;
 384                break;
 385        case BPF_PROG_TYPE_TRACING:
 386        case BPF_PROG_TYPE_EXT:
 387                p.attach_btf_id = load_attr->attach_btf_id;
 388                p.attach_prog_fd = load_attr->attach_prog_fd;
 389                break;
 390        default:
 391                p.prog_ifindex = load_attr->prog_ifindex;
 392                p.kern_version = load_attr->kern_version;
 393        }
 394        p.insn_cnt = load_attr->insns_cnt;
 395        p.insns = load_attr->insns;
 396        p.license = load_attr->license;
 397        p.log_level = load_attr->log_level;
 398        p.log_buf = log_buf;
 399        p.log_buf_sz = log_buf_sz;
 400        p.prog_btf_fd = load_attr->prog_btf_fd;
 401        p.func_info_rec_size = load_attr->func_info_rec_size;
 402        p.func_info_cnt = load_attr->func_info_cnt;
 403        p.func_info = load_attr->func_info;
 404        p.line_info_rec_size = load_attr->line_info_rec_size;
 405        p.line_info_cnt = load_attr->line_info_cnt;
 406        p.line_info = load_attr->line_info;
 407        p.name = load_attr->name;
 408        p.prog_flags = load_attr->prog_flags;
 409
 410        return libbpf__bpf_prog_load(&p);
 411}
 412
 413int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
 414                     size_t insns_cnt, const char *license,
 415                     __u32 kern_version, char *log_buf,
 416                     size_t log_buf_sz)
 417{
 418        struct bpf_load_program_attr load_attr;
 419
 420        memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
 421        load_attr.prog_type = type;
 422        load_attr.expected_attach_type = 0;
 423        load_attr.name = NULL;
 424        load_attr.insns = insns;
 425        load_attr.insns_cnt = insns_cnt;
 426        load_attr.license = license;
 427        load_attr.kern_version = kern_version;
 428
 429        return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
 430}
 431
 432int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
 433                       size_t insns_cnt, __u32 prog_flags, const char *license,
 434                       __u32 kern_version, char *log_buf, size_t log_buf_sz,
 435                       int log_level)
 436{
 437        union bpf_attr attr;
 438        int fd;
 439
 440        memset(&attr, 0, sizeof(attr));
 441        attr.prog_type = type;
 442        attr.insn_cnt = (__u32)insns_cnt;
 443        attr.insns = ptr_to_u64(insns);
 444        attr.license = ptr_to_u64(license);
 445        attr.log_buf = ptr_to_u64(log_buf);
 446        attr.log_size = log_buf_sz;
 447        attr.log_level = log_level;
 448        log_buf[0] = 0;
 449        attr.kern_version = kern_version;
 450        attr.prog_flags = prog_flags;
 451
 452        fd = sys_bpf_prog_load(&attr, sizeof(attr));
 453        return libbpf_err_errno(fd);
 454}
 455
 456int bpf_map_update_elem(int fd, const void *key, const void *value,
 457                        __u64 flags)
 458{
 459        union bpf_attr attr;
 460        int ret;
 461
 462        memset(&attr, 0, sizeof(attr));
 463        attr.map_fd = fd;
 464        attr.key = ptr_to_u64(key);
 465        attr.value = ptr_to_u64(value);
 466        attr.flags = flags;
 467
 468        ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
 469        return libbpf_err_errno(ret);
 470}
 471
 472int bpf_map_lookup_elem(int fd, const void *key, void *value)
 473{
 474        union bpf_attr attr;
 475        int ret;
 476
 477        memset(&attr, 0, sizeof(attr));
 478        attr.map_fd = fd;
 479        attr.key = ptr_to_u64(key);
 480        attr.value = ptr_to_u64(value);
 481
 482        ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
 483        return libbpf_err_errno(ret);
 484}
 485
 486int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
 487{
 488        union bpf_attr attr;
 489        int ret;
 490
 491        memset(&attr, 0, sizeof(attr));
 492        attr.map_fd = fd;
 493        attr.key = ptr_to_u64(key);
 494        attr.value = ptr_to_u64(value);
 495        attr.flags = flags;
 496
 497        ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
 498        return libbpf_err_errno(ret);
 499}
 500
 501int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
 502{
 503        union bpf_attr attr;
 504        int ret;
 505
 506        memset(&attr, 0, sizeof(attr));
 507        attr.map_fd = fd;
 508        attr.key = ptr_to_u64(key);
 509        attr.value = ptr_to_u64(value);
 510
 511        ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
 512        return libbpf_err_errno(ret);
 513}
 514
 515int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
 516{
 517        union bpf_attr attr;
 518        int ret;
 519
 520        memset(&attr, 0, sizeof(attr));
 521        attr.map_fd = fd;
 522        attr.key = ptr_to_u64(key);
 523        attr.value = ptr_to_u64(value);
 524        attr.flags = flags;
 525
 526        ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
 527        return libbpf_err_errno(ret);
 528}
 529
 530int bpf_map_delete_elem(int fd, const void *key)
 531{
 532        union bpf_attr attr;
 533        int ret;
 534
 535        memset(&attr, 0, sizeof(attr));
 536        attr.map_fd = fd;
 537        attr.key = ptr_to_u64(key);
 538
 539        ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
 540        return libbpf_err_errno(ret);
 541}
 542
 543int bpf_map_get_next_key(int fd, const void *key, void *next_key)
 544{
 545        union bpf_attr attr;
 546        int ret;
 547
 548        memset(&attr, 0, sizeof(attr));
 549        attr.map_fd = fd;
 550        attr.key = ptr_to_u64(key);
 551        attr.next_key = ptr_to_u64(next_key);
 552
 553        ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
 554        return libbpf_err_errno(ret);
 555}
 556
 557int bpf_map_freeze(int fd)
 558{
 559        union bpf_attr attr;
 560        int ret;
 561
 562        memset(&attr, 0, sizeof(attr));
 563        attr.map_fd = fd;
 564
 565        ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
 566        return libbpf_err_errno(ret);
 567}
 568
 569static int bpf_map_batch_common(int cmd, int fd, void  *in_batch,
 570                                void *out_batch, void *keys, void *values,
 571                                __u32 *count,
 572                                const struct bpf_map_batch_opts *opts)
 573{
 574        union bpf_attr attr;
 575        int ret;
 576
 577        if (!OPTS_VALID(opts, bpf_map_batch_opts))
 578                return libbpf_err(-EINVAL);
 579
 580        memset(&attr, 0, sizeof(attr));
 581        attr.batch.map_fd = fd;
 582        attr.batch.in_batch = ptr_to_u64(in_batch);
 583        attr.batch.out_batch = ptr_to_u64(out_batch);
 584        attr.batch.keys = ptr_to_u64(keys);
 585        attr.batch.values = ptr_to_u64(values);
 586        attr.batch.count = *count;
 587        attr.batch.elem_flags  = OPTS_GET(opts, elem_flags, 0);
 588        attr.batch.flags = OPTS_GET(opts, flags, 0);
 589
 590        ret = sys_bpf(cmd, &attr, sizeof(attr));
 591        *count = attr.batch.count;
 592
 593        return libbpf_err_errno(ret);
 594}
 595
 596int bpf_map_delete_batch(int fd, void *keys, __u32 *count,
 597                         const struct bpf_map_batch_opts *opts)
 598{
 599        return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
 600                                    NULL, keys, NULL, count, opts);
 601}
 602
 603int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
 604                         void *values, __u32 *count,
 605                         const struct bpf_map_batch_opts *opts)
 606{
 607        return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
 608                                    out_batch, keys, values, count, opts);
 609}
 610
 611int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
 612                                    void *keys, void *values, __u32 *count,
 613                                    const struct bpf_map_batch_opts *opts)
 614{
 615        return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
 616                                    fd, in_batch, out_batch, keys, values,
 617                                    count, opts);
 618}
 619
 620int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count,
 621                         const struct bpf_map_batch_opts *opts)
 622{
 623        return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
 624                                    keys, values, count, opts);
 625}
 626
 627int bpf_obj_pin(int fd, const char *pathname)
 628{
 629        union bpf_attr attr;
 630        int ret;
 631
 632        memset(&attr, 0, sizeof(attr));
 633        attr.pathname = ptr_to_u64((void *)pathname);
 634        attr.bpf_fd = fd;
 635
 636        ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
 637        return libbpf_err_errno(ret);
 638}
 639
 640int bpf_obj_get(const char *pathname)
 641{
 642        union bpf_attr attr;
 643        int fd;
 644
 645        memset(&attr, 0, sizeof(attr));
 646        attr.pathname = ptr_to_u64((void *)pathname);
 647
 648        fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
 649        return libbpf_err_errno(fd);
 650}
 651
 652int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
 653                    unsigned int flags)
 654{
 655        DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
 656                .flags = flags,
 657        );
 658
 659        return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts);
 660}
 661
 662int bpf_prog_attach_xattr(int prog_fd, int target_fd,
 663                          enum bpf_attach_type type,
 664                          const struct bpf_prog_attach_opts *opts)
 665{
 666        union bpf_attr attr;
 667        int ret;
 668
 669        if (!OPTS_VALID(opts, bpf_prog_attach_opts))
 670                return libbpf_err(-EINVAL);
 671
 672        memset(&attr, 0, sizeof(attr));
 673        attr.target_fd     = target_fd;
 674        attr.attach_bpf_fd = prog_fd;
 675        attr.attach_type   = type;
 676        attr.attach_flags  = OPTS_GET(opts, flags, 0);
 677        attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
 678
 679        ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
 680        return libbpf_err_errno(ret);
 681}
 682
 683int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
 684{
 685        union bpf_attr attr;
 686        int ret;
 687
 688        memset(&attr, 0, sizeof(attr));
 689        attr.target_fd   = target_fd;
 690        attr.attach_type = type;
 691
 692        ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
 693        return libbpf_err_errno(ret);
 694}
 695
 696int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
 697{
 698        union bpf_attr attr;
 699        int ret;
 700
 701        memset(&attr, 0, sizeof(attr));
 702        attr.target_fd   = target_fd;
 703        attr.attach_bpf_fd = prog_fd;
 704        attr.attach_type = type;
 705
 706        ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
 707        return libbpf_err_errno(ret);
 708}
 709
 710int bpf_link_create(int prog_fd, int target_fd,
 711                    enum bpf_attach_type attach_type,
 712                    const struct bpf_link_create_opts *opts)
 713{
 714        __u32 target_btf_id, iter_info_len;
 715        union bpf_attr attr;
 716        int fd;
 717
 718        if (!OPTS_VALID(opts, bpf_link_create_opts))
 719                return libbpf_err(-EINVAL);
 720
 721        iter_info_len = OPTS_GET(opts, iter_info_len, 0);
 722        target_btf_id = OPTS_GET(opts, target_btf_id, 0);
 723
 724        /* validate we don't have unexpected combinations of non-zero fields */
 725        if (iter_info_len || target_btf_id) {
 726                if (iter_info_len && target_btf_id)
 727                        return libbpf_err(-EINVAL);
 728                if (!OPTS_ZEROED(opts, target_btf_id))
 729                        return libbpf_err(-EINVAL);
 730        }
 731
 732        memset(&attr, 0, sizeof(attr));
 733        attr.link_create.prog_fd = prog_fd;
 734        attr.link_create.target_fd = target_fd;
 735        attr.link_create.attach_type = attach_type;
 736        attr.link_create.flags = OPTS_GET(opts, flags, 0);
 737
 738        if (target_btf_id) {
 739                attr.link_create.target_btf_id = target_btf_id;
 740                goto proceed;
 741        }
 742
 743        switch (attach_type) {
 744        case BPF_TRACE_ITER:
 745                attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
 746                attr.link_create.iter_info_len = iter_info_len;
 747                break;
 748        case BPF_PERF_EVENT:
 749                attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0);
 750                if (!OPTS_ZEROED(opts, perf_event))
 751                        return libbpf_err(-EINVAL);
 752                break;
 753        default:
 754                if (!OPTS_ZEROED(opts, flags))
 755                        return libbpf_err(-EINVAL);
 756                break;
 757        }
 758proceed:
 759        fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
 760        return libbpf_err_errno(fd);
 761}
 762
 763int bpf_link_detach(int link_fd)
 764{
 765        union bpf_attr attr;
 766        int ret;
 767
 768        memset(&attr, 0, sizeof(attr));
 769        attr.link_detach.link_fd = link_fd;
 770
 771        ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr));
 772        return libbpf_err_errno(ret);
 773}
 774
 775int bpf_link_update(int link_fd, int new_prog_fd,
 776                    const struct bpf_link_update_opts *opts)
 777{
 778        union bpf_attr attr;
 779        int ret;
 780
 781        if (!OPTS_VALID(opts, bpf_link_update_opts))
 782                return libbpf_err(-EINVAL);
 783
 784        memset(&attr, 0, sizeof(attr));
 785        attr.link_update.link_fd = link_fd;
 786        attr.link_update.new_prog_fd = new_prog_fd;
 787        attr.link_update.flags = OPTS_GET(opts, flags, 0);
 788        attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
 789
 790        ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
 791        return libbpf_err_errno(ret);
 792}
 793
 794int bpf_iter_create(int link_fd)
 795{
 796        union bpf_attr attr;
 797        int fd;
 798
 799        memset(&attr, 0, sizeof(attr));
 800        attr.iter_create.link_fd = link_fd;
 801
 802        fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr));
 803        return libbpf_err_errno(fd);
 804}
 805
 806int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
 807                   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
 808{
 809        union bpf_attr attr;
 810        int ret;
 811
 812        memset(&attr, 0, sizeof(attr));
 813        attr.query.target_fd    = target_fd;
 814        attr.query.attach_type  = type;
 815        attr.query.query_flags  = query_flags;
 816        attr.query.prog_cnt     = *prog_cnt;
 817        attr.query.prog_ids     = ptr_to_u64(prog_ids);
 818
 819        ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
 820
 821        if (attach_flags)
 822                *attach_flags = attr.query.attach_flags;
 823        *prog_cnt = attr.query.prog_cnt;
 824
 825        return libbpf_err_errno(ret);
 826}
 827
 828int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
 829                      void *data_out, __u32 *size_out, __u32 *retval,
 830                      __u32 *duration)
 831{
 832        union bpf_attr attr;
 833        int ret;
 834
 835        memset(&attr, 0, sizeof(attr));
 836        attr.test.prog_fd = prog_fd;
 837        attr.test.data_in = ptr_to_u64(data);
 838        attr.test.data_out = ptr_to_u64(data_out);
 839        attr.test.data_size_in = size;
 840        attr.test.repeat = repeat;
 841
 842        ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
 843
 844        if (size_out)
 845                *size_out = attr.test.data_size_out;
 846        if (retval)
 847                *retval = attr.test.retval;
 848        if (duration)
 849                *duration = attr.test.duration;
 850
 851        return libbpf_err_errno(ret);
 852}
 853
 854int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
 855{
 856        union bpf_attr attr;
 857        int ret;
 858
 859        if (!test_attr->data_out && test_attr->data_size_out > 0)
 860                return libbpf_err(-EINVAL);
 861
 862        memset(&attr, 0, sizeof(attr));
 863        attr.test.prog_fd = test_attr->prog_fd;
 864        attr.test.data_in = ptr_to_u64(test_attr->data_in);
 865        attr.test.data_out = ptr_to_u64(test_attr->data_out);
 866        attr.test.data_size_in = test_attr->data_size_in;
 867        attr.test.data_size_out = test_attr->data_size_out;
 868        attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
 869        attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
 870        attr.test.ctx_size_in = test_attr->ctx_size_in;
 871        attr.test.ctx_size_out = test_attr->ctx_size_out;
 872        attr.test.repeat = test_attr->repeat;
 873
 874        ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
 875
 876        test_attr->data_size_out = attr.test.data_size_out;
 877        test_attr->ctx_size_out = attr.test.ctx_size_out;
 878        test_attr->retval = attr.test.retval;
 879        test_attr->duration = attr.test.duration;
 880
 881        return libbpf_err_errno(ret);
 882}
 883
 884int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
 885{
 886        union bpf_attr attr;
 887        int ret;
 888
 889        if (!OPTS_VALID(opts, bpf_test_run_opts))
 890                return libbpf_err(-EINVAL);
 891
 892        memset(&attr, 0, sizeof(attr));
 893        attr.test.prog_fd = prog_fd;
 894        attr.test.cpu = OPTS_GET(opts, cpu, 0);
 895        attr.test.flags = OPTS_GET(opts, flags, 0);
 896        attr.test.repeat = OPTS_GET(opts, repeat, 0);
 897        attr.test.duration = OPTS_GET(opts, duration, 0);
 898        attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
 899        attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
 900        attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
 901        attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
 902        attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
 903        attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
 904        attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
 905        attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
 906
 907        ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
 908
 909        OPTS_SET(opts, data_size_out, attr.test.data_size_out);
 910        OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
 911        OPTS_SET(opts, duration, attr.test.duration);
 912        OPTS_SET(opts, retval, attr.test.retval);
 913
 914        return libbpf_err_errno(ret);
 915}
 916
 917static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
 918{
 919        union bpf_attr attr;
 920        int err;
 921
 922        memset(&attr, 0, sizeof(attr));
 923        attr.start_id = start_id;
 924
 925        err = sys_bpf(cmd, &attr, sizeof(attr));
 926        if (!err)
 927                *next_id = attr.next_id;
 928
 929        return libbpf_err_errno(err);
 930}
 931
 932int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
 933{
 934        return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
 935}
 936
 937int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
 938{
 939        return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
 940}
 941
 942int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
 943{
 944        return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
 945}
 946
 947int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
 948{
 949        return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
 950}
 951
 952int bpf_prog_get_fd_by_id(__u32 id)
 953{
 954        union bpf_attr attr;
 955        int fd;
 956
 957        memset(&attr, 0, sizeof(attr));
 958        attr.prog_id = id;
 959
 960        fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
 961        return libbpf_err_errno(fd);
 962}
 963
 964int bpf_map_get_fd_by_id(__u32 id)
 965{
 966        union bpf_attr attr;
 967        int fd;
 968
 969        memset(&attr, 0, sizeof(attr));
 970        attr.map_id = id;
 971
 972        fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
 973        return libbpf_err_errno(fd);
 974}
 975
 976int bpf_btf_get_fd_by_id(__u32 id)
 977{
 978        union bpf_attr attr;
 979        int fd;
 980
 981        memset(&attr, 0, sizeof(attr));
 982        attr.btf_id = id;
 983
 984        fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
 985        return libbpf_err_errno(fd);
 986}
 987
 988int bpf_link_get_fd_by_id(__u32 id)
 989{
 990        union bpf_attr attr;
 991        int fd;
 992
 993        memset(&attr, 0, sizeof(attr));
 994        attr.link_id = id;
 995
 996        fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
 997        return libbpf_err_errno(fd);
 998}
 999
1000int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
1001{
1002        union bpf_attr attr;
1003        int err;
1004
1005        memset(&attr, 0, sizeof(attr));
1006        attr.info.bpf_fd = bpf_fd;
1007        attr.info.info_len = *info_len;
1008        attr.info.info = ptr_to_u64(info);
1009
1010        err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
1011
1012        if (!err)
1013                *info_len = attr.info.info_len;
1014
1015        return libbpf_err_errno(err);
1016}
1017
1018int bpf_raw_tracepoint_open(const char *name, int prog_fd)
1019{
1020        union bpf_attr attr;
1021        int fd;
1022
1023        memset(&attr, 0, sizeof(attr));
1024        attr.raw_tracepoint.name = ptr_to_u64(name);
1025        attr.raw_tracepoint.prog_fd = prog_fd;
1026
1027        fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
1028        return libbpf_err_errno(fd);
1029}
1030
1031int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
1032                 bool do_log)
1033{
1034        union bpf_attr attr = {};
1035        int fd;
1036
1037        attr.btf = ptr_to_u64(btf);
1038        attr.btf_size = btf_size;
1039
1040retry:
1041        if (do_log && log_buf && log_buf_size) {
1042                attr.btf_log_level = 1;
1043                attr.btf_log_size = log_buf_size;
1044                attr.btf_log_buf = ptr_to_u64(log_buf);
1045        }
1046
1047        fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr));
1048
1049        if (fd < 0 && !do_log && log_buf && log_buf_size) {
1050                do_log = true;
1051                goto retry;
1052        }
1053
1054        return libbpf_err_errno(fd);
1055}
1056
1057int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
1058                      __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
1059                      __u64 *probe_addr)
1060{
1061        union bpf_attr attr = {};
1062        int err;
1063
1064        attr.task_fd_query.pid = pid;
1065        attr.task_fd_query.fd = fd;
1066        attr.task_fd_query.flags = flags;
1067        attr.task_fd_query.buf = ptr_to_u64(buf);
1068        attr.task_fd_query.buf_len = *buf_len;
1069
1070        err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
1071
1072        *buf_len = attr.task_fd_query.buf_len;
1073        *prog_id = attr.task_fd_query.prog_id;
1074        *fd_type = attr.task_fd_query.fd_type;
1075        *probe_offset = attr.task_fd_query.probe_offset;
1076        *probe_addr = attr.task_fd_query.probe_addr;
1077
1078        return libbpf_err_errno(err);
1079}
1080
1081int bpf_enable_stats(enum bpf_stats_type type)
1082{
1083        union bpf_attr attr;
1084        int fd;
1085
1086        memset(&attr, 0, sizeof(attr));
1087        attr.enable_stats.type = type;
1088
1089        fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr));
1090        return libbpf_err_errno(fd);
1091}
1092
1093int bpf_prog_bind_map(int prog_fd, int map_fd,
1094                      const struct bpf_prog_bind_opts *opts)
1095{
1096        union bpf_attr attr;
1097        int ret;
1098
1099        if (!OPTS_VALID(opts, bpf_prog_bind_opts))
1100                return libbpf_err(-EINVAL);
1101
1102        memset(&attr, 0, sizeof(attr));
1103        attr.prog_bind_map.prog_fd = prog_fd;
1104        attr.prog_bind_map.map_fd = map_fd;
1105        attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
1106
1107        ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
1108        return libbpf_err_errno(ret);
1109}
1110