1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 */ 8#ifndef _UAPI__LINUX_BPF_H__ 9#define _UAPI__LINUX_BPF_H__ 10 11#include <linux/types.h> 12#include <linux/bpf_common.h> 13 14/* Extended instruction set based on top of classic BPF */ 15 16/* instruction classes */ 17#define BPF_ALU64 0x07 /* alu mode in double word width */ 18 19/* ld/ldx fields */ 20#define BPF_DW 0x18 /* double word (64-bit) */ 21#define BPF_XADD 0xc0 /* exclusive add */ 22 23/* alu/jmp fields */ 24#define BPF_MOV 0xb0 /* mov reg to reg */ 25#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 26 27/* change endianness of a register */ 28#define BPF_END 0xd0 /* flags for endianness conversion: */ 29#define BPF_TO_LE 0x00 /* convert to little-endian */ 30#define BPF_TO_BE 0x08 /* convert to big-endian */ 31#define BPF_FROM_LE BPF_TO_LE 32#define BPF_FROM_BE BPF_TO_BE 33 34/* jmp encodings */ 35#define BPF_JNE 0x50 /* jump != */ 36#define BPF_JLT 0xa0 /* LT is unsigned, '<' */ 37#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ 38#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 39#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 40#define BPF_JSLT 0xc0 /* SLT is signed, '<' */ 41#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ 42#define BPF_CALL 0x80 /* function call */ 43#define BPF_EXIT 0x90 /* function return */ 44 45/* Register numbers */ 46enum { 47 BPF_REG_0 = 0, 48 BPF_REG_1, 49 BPF_REG_2, 50 BPF_REG_3, 51 BPF_REG_4, 52 BPF_REG_5, 53 BPF_REG_6, 54 BPF_REG_7, 55 BPF_REG_8, 56 BPF_REG_9, 57 BPF_REG_10, 58 __MAX_BPF_REG, 59}; 60 61/* BPF has 10 general purpose 64-bit registers and stack frame. */ 62#define MAX_BPF_REG __MAX_BPF_REG 63 64struct bpf_insn { 65 __u8 code; /* opcode */ 66 __u8 dst_reg:4; /* dest register */ 67 __u8 src_reg:4; /* source register */ 68 __s16 off; /* signed offset */ 69 __s32 imm; /* signed immediate constant */ 70}; 71 72/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 73struct bpf_lpm_trie_key { 74 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 75 __u8 data[0]; /* Arbitrary size */ 76}; 77 78/* BPF syscall commands, see bpf(2) man-page for details. */ 79enum bpf_cmd { 80 BPF_MAP_CREATE, 81 BPF_MAP_LOOKUP_ELEM, 82 BPF_MAP_UPDATE_ELEM, 83 BPF_MAP_DELETE_ELEM, 84 BPF_MAP_GET_NEXT_KEY, 85 BPF_PROG_LOAD, 86 BPF_OBJ_PIN, 87 BPF_OBJ_GET, 88 BPF_PROG_ATTACH, 89 BPF_PROG_DETACH, 90 BPF_PROG_TEST_RUN, 91 BPF_PROG_GET_NEXT_ID, 92 BPF_MAP_GET_NEXT_ID, 93 BPF_PROG_GET_FD_BY_ID, 94 BPF_MAP_GET_FD_BY_ID, 95 BPF_OBJ_GET_INFO_BY_FD, 96 BPF_PROG_QUERY, 97 BPF_RAW_TRACEPOINT_OPEN, 98 BPF_BTF_LOAD, 99 BPF_BTF_GET_FD_BY_ID, 100 BPF_TASK_FD_QUERY, 101}; 102 103enum bpf_map_type { 104 BPF_MAP_TYPE_UNSPEC, 105 BPF_MAP_TYPE_HASH, 106 BPF_MAP_TYPE_ARRAY, 107 BPF_MAP_TYPE_PROG_ARRAY, 108 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 109 BPF_MAP_TYPE_PERCPU_HASH, 110 BPF_MAP_TYPE_PERCPU_ARRAY, 111 BPF_MAP_TYPE_STACK_TRACE, 112 BPF_MAP_TYPE_CGROUP_ARRAY, 113 BPF_MAP_TYPE_LRU_HASH, 114 BPF_MAP_TYPE_LRU_PERCPU_HASH, 115 BPF_MAP_TYPE_LPM_TRIE, 116 BPF_MAP_TYPE_ARRAY_OF_MAPS, 117 BPF_MAP_TYPE_HASH_OF_MAPS, 118 BPF_MAP_TYPE_DEVMAP, 119 BPF_MAP_TYPE_SOCKMAP, 120 BPF_MAP_TYPE_CPUMAP, 121 BPF_MAP_TYPE_XSKMAP, 122 BPF_MAP_TYPE_SOCKHASH, 123}; 124 125enum bpf_prog_type { 126 BPF_PROG_TYPE_UNSPEC, 127 BPF_PROG_TYPE_SOCKET_FILTER, 128 BPF_PROG_TYPE_KPROBE, 129 BPF_PROG_TYPE_SCHED_CLS, 130 BPF_PROG_TYPE_SCHED_ACT, 131 BPF_PROG_TYPE_TRACEPOINT, 132 BPF_PROG_TYPE_XDP, 133 BPF_PROG_TYPE_PERF_EVENT, 134 BPF_PROG_TYPE_CGROUP_SKB, 135 BPF_PROG_TYPE_CGROUP_SOCK, 136 BPF_PROG_TYPE_LWT_IN, 137 BPF_PROG_TYPE_LWT_OUT, 138 BPF_PROG_TYPE_LWT_XMIT, 139 BPF_PROG_TYPE_SOCK_OPS, 140 BPF_PROG_TYPE_SK_SKB, 141 BPF_PROG_TYPE_CGROUP_DEVICE, 142 BPF_PROG_TYPE_SK_MSG, 143 BPF_PROG_TYPE_RAW_TRACEPOINT, 144 BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 145 BPF_PROG_TYPE_LWT_SEG6LOCAL, 146 BPF_PROG_TYPE_LIRC_MODE2, 147}; 148 149enum bpf_attach_type { 150 BPF_CGROUP_INET_INGRESS, 151 BPF_CGROUP_INET_EGRESS, 152 BPF_CGROUP_INET_SOCK_CREATE, 153 BPF_CGROUP_SOCK_OPS, 154 BPF_SK_SKB_STREAM_PARSER, 155 BPF_SK_SKB_STREAM_VERDICT, 156 BPF_CGROUP_DEVICE, 157 BPF_SK_MSG_VERDICT, 158 BPF_CGROUP_INET4_BIND, 159 BPF_CGROUP_INET6_BIND, 160 BPF_CGROUP_INET4_CONNECT, 161 BPF_CGROUP_INET6_CONNECT, 162 BPF_CGROUP_INET4_POST_BIND, 163 BPF_CGROUP_INET6_POST_BIND, 164 BPF_CGROUP_UDP4_SENDMSG, 165 BPF_CGROUP_UDP6_SENDMSG, 166 BPF_LIRC_MODE2, 167 __MAX_BPF_ATTACH_TYPE 168}; 169 170#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 171 172/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command 173 * 174 * NONE(default): No further bpf programs allowed in the subtree. 175 * 176 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, 177 * the program in this cgroup yields to sub-cgroup program. 178 * 179 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, 180 * that cgroup program gets run in addition to the program in this cgroup. 181 * 182 * Only one program is allowed to be attached to a cgroup with 183 * NONE or BPF_F_ALLOW_OVERRIDE flag. 184 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will 185 * release old program and attach the new one. Attach flags has to match. 186 * 187 * Multiple programs are allowed to be attached to a cgroup with 188 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order 189 * (those that were attached first, run first) 190 * The programs of sub-cgroup are executed first, then programs of 191 * this cgroup and then programs of parent cgroup. 192 * When children program makes decision (like picking TCP CA or sock bind) 193 * parent program has a chance to override it. 194 * 195 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. 196 * A cgroup with NONE doesn't allow any programs in sub-cgroups. 197 * Ex1: 198 * cgrp1 (MULTI progs A, B) -> 199 * cgrp2 (OVERRIDE prog C) -> 200 * cgrp3 (MULTI prog D) -> 201 * cgrp4 (OVERRIDE prog E) -> 202 * cgrp5 (NONE prog F) 203 * the event in cgrp5 triggers execution of F,D,A,B in that order. 204 * if prog F is detached, the execution is E,D,A,B 205 * if prog F and D are detached, the execution is E,A,B 206 * if prog F, E and D are detached, the execution is C,A,B 207 * 208 * All eligible programs are executed regardless of return code from 209 * earlier programs. 210 */ 211#define BPF_F_ALLOW_OVERRIDE (1U << 0) 212#define BPF_F_ALLOW_MULTI (1U << 1) 213 214/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 215 * verifier will perform strict alignment checking as if the kernel 216 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 217 * and NET_IP_ALIGN defined to 2. 218 */ 219#define BPF_F_STRICT_ALIGNMENT (1U << 0) 220 221/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ 222#define BPF_PSEUDO_MAP_FD 1 223 224/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative 225 * offset to another bpf function 226 */ 227#define BPF_PSEUDO_CALL 1 228 229/* flags for BPF_MAP_UPDATE_ELEM command */ 230#define BPF_ANY 0 /* create new element or update existing */ 231#define BPF_NOEXIST 1 /* create new element if it didn't exist */ 232#define BPF_EXIST 2 /* update existing element */ 233 234/* flags for BPF_MAP_CREATE command */ 235#define BPF_F_NO_PREALLOC (1U << 0) 236/* Instead of having one common LRU list in the 237 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 238 * which can scale and perform better. 239 * Note, the LRU nodes (including free nodes) cannot be moved 240 * across different LRU lists. 241 */ 242#define BPF_F_NO_COMMON_LRU (1U << 1) 243/* Specify numa node during map creation */ 244#define BPF_F_NUMA_NODE (1U << 2) 245 246/* flags for BPF_PROG_QUERY */ 247#define BPF_F_QUERY_EFFECTIVE (1U << 0) 248 249#define BPF_OBJ_NAME_LEN 16U 250 251/* Flags for accessing BPF object */ 252#define BPF_F_RDONLY (1U << 3) 253#define BPF_F_WRONLY (1U << 4) 254 255/* Flag for stack_map, store build_id+offset instead of pointer */ 256#define BPF_F_STACK_BUILD_ID (1U << 5) 257 258enum bpf_stack_build_id_status { 259 /* user space need an empty entry to identify end of a trace */ 260 BPF_STACK_BUILD_ID_EMPTY = 0, 261 /* with valid build_id and offset */ 262 BPF_STACK_BUILD_ID_VALID = 1, 263 /* couldn't get build_id, fallback to ip */ 264 BPF_STACK_BUILD_ID_IP = 2, 265}; 266 267#define BPF_BUILD_ID_SIZE 20 268struct bpf_stack_build_id { 269 __s32 status; 270 unsigned char build_id[BPF_BUILD_ID_SIZE]; 271 union { 272 __u64 offset; 273 __u64 ip; 274 }; 275}; 276 277union bpf_attr { 278 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 279 __u32 map_type; /* one of enum bpf_map_type */ 280 __u32 key_size; /* size of key in bytes */ 281 __u32 value_size; /* size of value in bytes */ 282 __u32 max_entries; /* max number of entries in a map */ 283 __u32 map_flags; /* BPF_MAP_CREATE related 284 * flags defined above. 285 */ 286 __u32 inner_map_fd; /* fd pointing to the inner map */ 287 __u32 numa_node; /* numa node (effective only if 288 * BPF_F_NUMA_NODE is set). 289 */ 290 char map_name[BPF_OBJ_NAME_LEN]; 291 __u32 map_ifindex; /* ifindex of netdev to create on */ 292 __u32 btf_fd; /* fd pointing to a BTF type data */ 293 __u32 btf_key_type_id; /* BTF type_id of the key */ 294 __u32 btf_value_type_id; /* BTF type_id of the value */ 295 }; 296 297 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 298 __u32 map_fd; 299 __aligned_u64 key; 300 union { 301 __aligned_u64 value; 302 __aligned_u64 next_key; 303 }; 304 __u64 flags; 305 }; 306 307 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 308 __u32 prog_type; /* one of enum bpf_prog_type */ 309 __u32 insn_cnt; 310 __aligned_u64 insns; 311 __aligned_u64 license; 312 __u32 log_level; /* verbosity level of verifier */ 313 __u32 log_size; /* size of user buffer */ 314 __aligned_u64 log_buf; /* user supplied buffer */ 315 __u32 kern_version; /* checked when prog_type=kprobe */ 316 __u32 prog_flags; 317 char prog_name[BPF_OBJ_NAME_LEN]; 318 __u32 prog_ifindex; /* ifindex of netdev to prep for */ 319 /* For some prog types expected attach type must be known at 320 * load time to verify attach type specific parts of prog 321 * (context accesses, allowed helpers, etc). 322 */ 323 __u32 expected_attach_type; 324 }; 325 326 struct { /* anonymous struct used by BPF_OBJ_* commands */ 327 __aligned_u64 pathname; 328 __u32 bpf_fd; 329 __u32 file_flags; 330 }; 331 332 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 333 __u32 target_fd; /* container object to attach to */ 334 __u32 attach_bpf_fd; /* eBPF program to attach */ 335 __u32 attach_type; 336 __u32 attach_flags; 337 }; 338 339 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 340 __u32 prog_fd; 341 __u32 retval; 342 __u32 data_size_in; 343 __u32 data_size_out; 344 __aligned_u64 data_in; 345 __aligned_u64 data_out; 346 __u32 repeat; 347 __u32 duration; 348 } test; 349 350 struct { /* anonymous struct used by BPF_*_GET_*_ID */ 351 union { 352 __u32 start_id; 353 __u32 prog_id; 354 __u32 map_id; 355 __u32 btf_id; 356 }; 357 __u32 next_id; 358 __u32 open_flags; 359 }; 360 361 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 362 __u32 bpf_fd; 363 __u32 info_len; 364 __aligned_u64 info; 365 } info; 366 367 struct { /* anonymous struct used by BPF_PROG_QUERY command */ 368 __u32 target_fd; /* container object to query */ 369 __u32 attach_type; 370 __u32 query_flags; 371 __u32 attach_flags; 372 __aligned_u64 prog_ids; 373 __u32 prog_cnt; 374 } query; 375 376 struct { 377 __u64 name; 378 __u32 prog_fd; 379 } raw_tracepoint; 380 381 struct { /* anonymous struct for BPF_BTF_LOAD */ 382 __aligned_u64 btf; 383 __aligned_u64 btf_log_buf; 384 __u32 btf_size; 385 __u32 btf_log_size; 386 __u32 btf_log_level; 387 }; 388 389 struct { 390 __u32 pid; /* input: pid */ 391 __u32 fd; /* input: fd */ 392 __u32 flags; /* input: flags */ 393 __u32 buf_len; /* input/output: buf len */ 394 __aligned_u64 buf; /* input/output: 395 * tp_name for tracepoint 396 * symbol for kprobe 397 * filename for uprobe 398 */ 399 __u32 prog_id; /* output: prod_id */ 400 __u32 fd_type; /* output: BPF_FD_TYPE_* */ 401 __u64 probe_offset; /* output: probe_offset */ 402 __u64 probe_addr; /* output: probe_addr */ 403 } task_fd_query; 404} __attribute__((aligned(8))); 405 406/* The description below is an attempt at providing documentation to eBPF 407 * developers about the multiple available eBPF helper functions. It can be 408 * parsed and used to produce a manual page. The workflow is the following, 409 * and requires the rst2man utility: 410 * 411 * $ ./scripts/bpf_helpers_doc.py \ 412 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst 413 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 414 * $ man /tmp/bpf-helpers.7 415 * 416 * Note that in order to produce this external documentation, some RST 417 * formatting is used in the descriptions to get "bold" and "italics" in 418 * manual pages. Also note that the few trailing white spaces are 419 * intentional, removing them would break paragraphs for rst2man. 420 * 421 * Start of BPF helper function descriptions: 422 * 423 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) 424 * Description 425 * Perform a lookup in *map* for an entry associated to *key*. 426 * Return 427 * Map value associated to *key*, or **NULL** if no entry was 428 * found. 429 * 430 * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) 431 * Description 432 * Add or update the value of the entry associated to *key* in 433 * *map* with *value*. *flags* is one of: 434 * 435 * **BPF_NOEXIST** 436 * The entry for *key* must not exist in the map. 437 * **BPF_EXIST** 438 * The entry for *key* must already exist in the map. 439 * **BPF_ANY** 440 * No condition on the existence of the entry for *key*. 441 * 442 * Flag value **BPF_NOEXIST** cannot be used for maps of types 443 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all 444 * elements always exist), the helper would return an error. 445 * Return 446 * 0 on success, or a negative error in case of failure. 447 * 448 * int bpf_map_delete_elem(struct bpf_map *map, const void *key) 449 * Description 450 * Delete entry with *key* from *map*. 451 * Return 452 * 0 on success, or a negative error in case of failure. 453 * 454 * int bpf_probe_read(void *dst, u32 size, const void *src) 455 * Description 456 * For tracing programs, safely attempt to read *size* bytes from 457 * address *src* and store the data in *dst*. 458 * Return 459 * 0 on success, or a negative error in case of failure. 460 * 461 * u64 bpf_ktime_get_ns(void) 462 * Description 463 * Return the time elapsed since system boot, in nanoseconds. 464 * Return 465 * Current *ktime*. 466 * 467 * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) 468 * Description 469 * This helper is a "printk()-like" facility for debugging. It 470 * prints a message defined by format *fmt* (of size *fmt_size*) 471 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if 472 * available. It can take up to three additional **u64** 473 * arguments (as an eBPF helpers, the total number of arguments is 474 * limited to five). 475 * 476 * Each time the helper is called, it appends a line to the trace. 477 * The format of the trace is customizable, and the exact output 478 * one will get depends on the options set in 479 * *\/sys/kernel/debug/tracing/trace_options* (see also the 480 * *README* file under the same directory). However, it usually 481 * defaults to something like: 482 * 483 * :: 484 * 485 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> 486 * 487 * In the above: 488 * 489 * * ``telnet`` is the name of the current task. 490 * * ``470`` is the PID of the current task. 491 * * ``001`` is the CPU number on which the task is 492 * running. 493 * * In ``.N..``, each character refers to a set of 494 * options (whether irqs are enabled, scheduling 495 * options, whether hard/softirqs are running, level of 496 * preempt_disabled respectively). **N** means that 497 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** 498 * are set. 499 * * ``419421.045894`` is a timestamp. 500 * * ``0x00000001`` is a fake value used by BPF for the 501 * instruction pointer register. 502 * * ``<formatted msg>`` is the message formatted with 503 * *fmt*. 504 * 505 * The conversion specifiers supported by *fmt* are similar, but 506 * more limited than for printk(). They are **%d**, **%i**, 507 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, 508 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size 509 * of field, padding with zeroes, etc.) is available, and the 510 * helper will return **-EINVAL** (but print nothing) if it 511 * encounters an unknown specifier. 512 * 513 * Also, note that **bpf_trace_printk**\ () is slow, and should 514 * only be used for debugging purposes. For this reason, a notice 515 * bloc (spanning several lines) is printed to kernel logs and 516 * states that the helper should not be used "for production use" 517 * the first time this helper is used (or more precisely, when 518 * **trace_printk**\ () buffers are allocated). For passing values 519 * to user space, perf events should be preferred. 520 * Return 521 * The number of bytes written to the buffer, or a negative error 522 * in case of failure. 523 * 524 * u32 bpf_get_prandom_u32(void) 525 * Description 526 * Get a pseudo-random number. 527 * 528 * From a security point of view, this helper uses its own 529 * pseudo-random internal state, and cannot be used to infer the 530 * seed of other random functions in the kernel. However, it is 531 * essential to note that the generator used by the helper is not 532 * cryptographically secure. 533 * Return 534 * A random 32-bit unsigned value. 535 * 536 * u32 bpf_get_smp_processor_id(void) 537 * Description 538 * Get the SMP (symmetric multiprocessing) processor id. Note that 539 * all programs run with preemption disabled, which means that the 540 * SMP processor id is stable during all the execution of the 541 * program. 542 * Return 543 * The SMP id of the processor running the program. 544 * 545 * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) 546 * Description 547 * Store *len* bytes from address *from* into the packet 548 * associated to *skb*, at *offset*. *flags* are a combination of 549 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the 550 * checksum for the packet after storing the bytes) and 551 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ 552 * **->swhash** and *skb*\ **->l4hash** to 0). 553 * 554 * A call to this helper is susceptible to change the underlaying 555 * packet buffer. Therefore, at load time, all checks on pointers 556 * previously done by the verifier are invalidated and must be 557 * performed again, if the helper is used in combination with 558 * direct packet access. 559 * Return 560 * 0 on success, or a negative error in case of failure. 561 * 562 * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) 563 * Description 564 * Recompute the layer 3 (e.g. IP) checksum for the packet 565 * associated to *skb*. Computation is incremental, so the helper 566 * must know the former value of the header field that was 567 * modified (*from*), the new value of this field (*to*), and the 568 * number of bytes (2 or 4) for this field, stored in *size*. 569 * Alternatively, it is possible to store the difference between 570 * the previous and the new values of the header field in *to*, by 571 * setting *from* and *size* to 0. For both methods, *offset* 572 * indicates the location of the IP checksum within the packet. 573 * 574 * This helper works in combination with **bpf_csum_diff**\ (), 575 * which does not update the checksum in-place, but offers more 576 * flexibility and can handle sizes larger than 2 or 4 for the 577 * checksum to update. 578 * 579 * A call to this helper is susceptible to change the underlaying 580 * packet buffer. Therefore, at load time, all checks on pointers 581 * previously done by the verifier are invalidated and must be 582 * performed again, if the helper is used in combination with 583 * direct packet access. 584 * Return 585 * 0 on success, or a negative error in case of failure. 586 * 587 * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) 588 * Description 589 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the 590 * packet associated to *skb*. Computation is incremental, so the 591 * helper must know the former value of the header field that was 592 * modified (*from*), the new value of this field (*to*), and the 593 * number of bytes (2 or 4) for this field, stored on the lowest 594 * four bits of *flags*. Alternatively, it is possible to store 595 * the difference between the previous and the new values of the 596 * header field in *to*, by setting *from* and the four lowest 597 * bits of *flags* to 0. For both methods, *offset* indicates the 598 * location of the IP checksum within the packet. In addition to 599 * the size of the field, *flags* can be added (bitwise OR) actual 600 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left 601 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and 602 * for updates resulting in a null checksum the value is set to 603 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates 604 * the checksum is to be computed against a pseudo-header. 605 * 606 * This helper works in combination with **bpf_csum_diff**\ (), 607 * which does not update the checksum in-place, but offers more 608 * flexibility and can handle sizes larger than 2 or 4 for the 609 * checksum to update. 610 * 611 * A call to this helper is susceptible to change the underlaying 612 * packet buffer. Therefore, at load time, all checks on pointers 613 * previously done by the verifier are invalidated and must be 614 * performed again, if the helper is used in combination with 615 * direct packet access. 616 * Return 617 * 0 on success, or a negative error in case of failure. 618 * 619 * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) 620 * Description 621 * This special helper is used to trigger a "tail call", or in 622 * other words, to jump into another eBPF program. The same stack 623 * frame is used (but values on stack and in registers for the 624 * caller are not accessible to the callee). This mechanism allows 625 * for program chaining, either for raising the maximum number of 626 * available eBPF instructions, or to execute given programs in 627 * conditional blocks. For security reasons, there is an upper 628 * limit to the number of successive tail calls that can be 629 * performed. 630 * 631 * Upon call of this helper, the program attempts to jump into a 632 * program referenced at index *index* in *prog_array_map*, a 633 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes 634 * *ctx*, a pointer to the context. 635 * 636 * If the call succeeds, the kernel immediately runs the first 637 * instruction of the new program. This is not a function call, 638 * and it never returns to the previous program. If the call 639 * fails, then the helper has no effect, and the caller continues 640 * to run its subsequent instructions. A call can fail if the 641 * destination program for the jump does not exist (i.e. *index* 642 * is superior to the number of entries in *prog_array_map*), or 643 * if the maximum number of tail calls has been reached for this 644 * chain of programs. This limit is defined in the kernel by the 645 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), 646 * which is currently set to 32. 647 * Return 648 * 0 on success, or a negative error in case of failure. 649 * 650 * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) 651 * Description 652 * Clone and redirect the packet associated to *skb* to another 653 * net device of index *ifindex*. Both ingress and egress 654 * interfaces can be used for redirection. The **BPF_F_INGRESS** 655 * value in *flags* is used to make the distinction (ingress path 656 * is selected if the flag is present, egress path otherwise). 657 * This is the only flag supported for now. 658 * 659 * In comparison with **bpf_redirect**\ () helper, 660 * **bpf_clone_redirect**\ () has the associated cost of 661 * duplicating the packet buffer, but this can be executed out of 662 * the eBPF program. Conversely, **bpf_redirect**\ () is more 663 * efficient, but it is handled through an action code where the 664 * redirection happens only after the eBPF program has returned. 665 * 666 * A call to this helper is susceptible to change the underlaying 667 * packet buffer. Therefore, at load time, all checks on pointers 668 * previously done by the verifier are invalidated and must be 669 * performed again, if the helper is used in combination with 670 * direct packet access. 671 * Return 672 * 0 on success, or a negative error in case of failure. 673 * 674 * u64 bpf_get_current_pid_tgid(void) 675 * Return 676 * A 64-bit integer containing the current tgid and pid, and 677 * created as such: 678 * *current_task*\ **->tgid << 32 \|** 679 * *current_task*\ **->pid**. 680 * 681 * u64 bpf_get_current_uid_gid(void) 682 * Return 683 * A 64-bit integer containing the current GID and UID, and 684 * created as such: *current_gid* **<< 32 \|** *current_uid*. 685 * 686 * int bpf_get_current_comm(char *buf, u32 size_of_buf) 687 * Description 688 * Copy the **comm** attribute of the current task into *buf* of 689 * *size_of_buf*. The **comm** attribute contains the name of 690 * the executable (excluding the path) for the current task. The 691 * *size_of_buf* must be strictly positive. On success, the 692 * helper makes sure that the *buf* is NUL-terminated. On failure, 693 * it is filled with zeroes. 694 * Return 695 * 0 on success, or a negative error in case of failure. 696 * 697 * u32 bpf_get_cgroup_classid(struct sk_buff *skb) 698 * Description 699 * Retrieve the classid for the current task, i.e. for the net_cls 700 * cgroup to which *skb* belongs. 701 * 702 * This helper can be used on TC egress path, but not on ingress. 703 * 704 * The net_cls cgroup provides an interface to tag network packets 705 * based on a user-provided identifier for all traffic coming from 706 * the tasks belonging to the related cgroup. See also the related 707 * kernel documentation, available from the Linux sources in file 708 * *Documentation/cgroup-v1/net_cls.txt*. 709 * 710 * The Linux kernel has two versions for cgroups: there are 711 * cgroups v1 and cgroups v2. Both are available to users, who can 712 * use a mixture of them, but note that the net_cls cgroup is for 713 * cgroup v1 only. This makes it incompatible with BPF programs 714 * run on cgroups, which is a cgroup-v2-only feature (a socket can 715 * only hold data for one version of cgroups at a time). 716 * 717 * This helper is only available is the kernel was compiled with 718 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to 719 * "**y**" or to "**m**". 720 * Return 721 * The classid, or 0 for the default unconfigured classid. 722 * 723 * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 724 * Description 725 * Push a *vlan_tci* (VLAN tag control information) of protocol 726 * *vlan_proto* to the packet associated to *skb*, then update 727 * the checksum. Note that if *vlan_proto* is different from 728 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to 729 * be **ETH_P_8021Q**. 730 * 731 * A call to this helper is susceptible to change the underlaying 732 * packet buffer. Therefore, at load time, all checks on pointers 733 * previously done by the verifier are invalidated and must be 734 * performed again, if the helper is used in combination with 735 * direct packet access. 736 * Return 737 * 0 on success, or a negative error in case of failure. 738 * 739 * int bpf_skb_vlan_pop(struct sk_buff *skb) 740 * Description 741 * Pop a VLAN header from the packet associated to *skb*. 742 * 743 * A call to this helper is susceptible to change the underlaying 744 * packet buffer. Therefore, at load time, all checks on pointers 745 * previously done by the verifier are invalidated and must be 746 * performed again, if the helper is used in combination with 747 * direct packet access. 748 * Return 749 * 0 on success, or a negative error in case of failure. 750 * 751 * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 752 * Description 753 * Get tunnel metadata. This helper takes a pointer *key* to an 754 * empty **struct bpf_tunnel_key** of **size**, that will be 755 * filled with tunnel metadata for the packet associated to *skb*. 756 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which 757 * indicates that the tunnel is based on IPv6 protocol instead of 758 * IPv4. 759 * 760 * The **struct bpf_tunnel_key** is an object that generalizes the 761 * principal parameters used by various tunneling protocols into a 762 * single struct. This way, it can be used to easily make a 763 * decision based on the contents of the encapsulation header, 764 * "summarized" in this struct. In particular, it holds the IP 765 * address of the remote end (IPv4 or IPv6, depending on the case) 766 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, 767 * this struct exposes the *key*\ **->tunnel_id**, which is 768 * generally mapped to a VNI (Virtual Network Identifier), making 769 * it programmable together with the **bpf_skb_set_tunnel_key**\ 770 * () helper. 771 * 772 * Let's imagine that the following code is part of a program 773 * attached to the TC ingress interface, on one end of a GRE 774 * tunnel, and is supposed to filter out all messages coming from 775 * remote ends with IPv4 address other than 10.0.0.1: 776 * 777 * :: 778 * 779 * int ret; 780 * struct bpf_tunnel_key key = {}; 781 * 782 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); 783 * if (ret < 0) 784 * return TC_ACT_SHOT; // drop packet 785 * 786 * if (key.remote_ipv4 != 0x0a000001) 787 * return TC_ACT_SHOT; // drop packet 788 * 789 * return TC_ACT_OK; // accept packet 790 * 791 * This interface can also be used with all encapsulation devices 792 * that can operate in "collect metadata" mode: instead of having 793 * one network device per specific configuration, the "collect 794 * metadata" mode only requires a single device where the 795 * configuration can be extracted from this helper. 796 * 797 * This can be used together with various tunnels such as VXLan, 798 * Geneve, GRE or IP in IP (IPIP). 799 * Return 800 * 0 on success, or a negative error in case of failure. 801 * 802 * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 803 * Description 804 * Populate tunnel metadata for packet associated to *skb.* The 805 * tunnel metadata is set to the contents of *key*, of *size*. The 806 * *flags* can be set to a combination of the following values: 807 * 808 * **BPF_F_TUNINFO_IPV6** 809 * Indicate that the tunnel is based on IPv6 protocol 810 * instead of IPv4. 811 * **BPF_F_ZERO_CSUM_TX** 812 * For IPv4 packets, add a flag to tunnel metadata 813 * indicating that checksum computation should be skipped 814 * and checksum set to zeroes. 815 * **BPF_F_DONT_FRAGMENT** 816 * Add a flag to tunnel metadata indicating that the 817 * packet should not be fragmented. 818 * **BPF_F_SEQ_NUMBER** 819 * Add a flag to tunnel metadata indicating that a 820 * sequence number should be added to tunnel header before 821 * sending the packet. This flag was added for GRE 822 * encapsulation, but might be used with other protocols 823 * as well in the future. 824 * 825 * Here is a typical usage on the transmit path: 826 * 827 * :: 828 * 829 * struct bpf_tunnel_key key; 830 * populate key ... 831 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); 832 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); 833 * 834 * See also the description of the **bpf_skb_get_tunnel_key**\ () 835 * helper for additional information. 836 * Return 837 * 0 on success, or a negative error in case of failure. 838 * 839 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) 840 * Description 841 * Read the value of a perf event counter. This helper relies on a 842 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of 843 * the perf event counter is selected when *map* is updated with 844 * perf event file descriptors. The *map* is an array whose size 845 * is the number of available CPUs, and each cell contains a value 846 * relative to one CPU. The value to retrieve is indicated by 847 * *flags*, that contains the index of the CPU to look up, masked 848 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 849 * **BPF_F_CURRENT_CPU** to indicate that the value for the 850 * current CPU should be retrieved. 851 * 852 * Note that before Linux 4.13, only hardware perf event can be 853 * retrieved. 854 * 855 * Also, be aware that the newer helper 856 * **bpf_perf_event_read_value**\ () is recommended over 857 * **bpf_perf_event_read**\ () in general. The latter has some ABI 858 * quirks where error and counter value are used as a return code 859 * (which is wrong to do since ranges may overlap). This issue is 860 * fixed with **bpf_perf_event_read_value**\ (), which at the same 861 * time provides more features over the **bpf_perf_event_read**\ 862 * () interface. Please refer to the description of 863 * **bpf_perf_event_read_value**\ () for details. 864 * Return 865 * The value of the perf event counter read from the map, or a 866 * negative error code in case of failure. 867 * 868 * int bpf_redirect(u32 ifindex, u64 flags) 869 * Description 870 * Redirect the packet to another net device of index *ifindex*. 871 * This helper is somewhat similar to **bpf_clone_redirect**\ 872 * (), except that the packet is not cloned, which provides 873 * increased performance. 874 * 875 * Except for XDP, both ingress and egress interfaces can be used 876 * for redirection. The **BPF_F_INGRESS** value in *flags* is used 877 * to make the distinction (ingress path is selected if the flag 878 * is present, egress path otherwise). Currently, XDP only 879 * supports redirection to the egress interface, and accepts no 880 * flag at all. 881 * 882 * The same effect can be attained with the more generic 883 * **bpf_redirect_map**\ (), which requires specific maps to be 884 * used but offers better performance. 885 * Return 886 * For XDP, the helper returns **XDP_REDIRECT** on success or 887 * **XDP_ABORTED** on error. For other program types, the values 888 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on 889 * error. 890 * 891 * u32 bpf_get_route_realm(struct sk_buff *skb) 892 * Description 893 * Retrieve the realm or the route, that is to say the 894 * **tclassid** field of the destination for the *skb*. The 895 * indentifier retrieved is a user-provided tag, similar to the 896 * one used with the net_cls cgroup (see description for 897 * **bpf_get_cgroup_classid**\ () helper), but here this tag is 898 * held by a route (a destination entry), not by a task. 899 * 900 * Retrieving this identifier works with the clsact TC egress hook 901 * (see also **tc-bpf(8)**), or alternatively on conventional 902 * classful egress qdiscs, but not on TC ingress path. In case of 903 * clsact TC egress hook, this has the advantage that, internally, 904 * the destination entry has not been dropped yet in the transmit 905 * path. Therefore, the destination entry does not need to be 906 * artificially held via **netif_keep_dst**\ () for a classful 907 * qdisc until the *skb* is freed. 908 * 909 * This helper is available only if the kernel was compiled with 910 * **CONFIG_IP_ROUTE_CLASSID** configuration option. 911 * Return 912 * The realm of the route for the packet associated to *skb*, or 0 913 * if none was found. 914 * 915 * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 916 * Description 917 * Write raw *data* blob into a special BPF perf event held by 918 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 919 * event must have the following attributes: **PERF_SAMPLE_RAW** 920 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 921 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 922 * 923 * The *flags* are used to indicate the index in *map* for which 924 * the value must be put, masked with **BPF_F_INDEX_MASK**. 925 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 926 * to indicate that the index of the current CPU core should be 927 * used. 928 * 929 * The value to write, of *size*, is passed through eBPF stack and 930 * pointed by *data*. 931 * 932 * The context of the program *ctx* needs also be passed to the 933 * helper. 934 * 935 * On user space, a program willing to read the values needs to 936 * call **perf_event_open**\ () on the perf event (either for 937 * one or for all CPUs) and to store the file descriptor into the 938 * *map*. This must be done before the eBPF program can send data 939 * into it. An example is available in file 940 * *samples/bpf/trace_output_user.c* in the Linux kernel source 941 * tree (the eBPF program counterpart is in 942 * *samples/bpf/trace_output_kern.c*). 943 * 944 * **bpf_perf_event_output**\ () achieves better performance 945 * than **bpf_trace_printk**\ () for sharing data with user 946 * space, and is much better suitable for streaming data from eBPF 947 * programs. 948 * 949 * Note that this helper is not restricted to tracing use cases 950 * and can be used with programs attached to TC or XDP as well, 951 * where it allows for passing data to user space listeners. Data 952 * can be: 953 * 954 * * Only custom structs, 955 * * Only the packet payload, or 956 * * A combination of both. 957 * Return 958 * 0 on success, or a negative error in case of failure. 959 * 960 * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) 961 * Description 962 * This helper was provided as an easy way to load data from a 963 * packet. It can be used to load *len* bytes from *offset* from 964 * the packet associated to *skb*, into the buffer pointed by 965 * *to*. 966 * 967 * Since Linux 4.7, usage of this helper has mostly been replaced 968 * by "direct packet access", enabling packet data to be 969 * manipulated with *skb*\ **->data** and *skb*\ **->data_end** 970 * pointing respectively to the first byte of packet data and to 971 * the byte after the last byte of packet data. However, it 972 * remains useful if one wishes to read large quantities of data 973 * at once from a packet into the eBPF stack. 974 * Return 975 * 0 on success, or a negative error in case of failure. 976 * 977 * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags) 978 * Description 979 * Walk a user or a kernel stack and return its id. To achieve 980 * this, the helper needs *ctx*, which is a pointer to the context 981 * on which the tracing program is executed, and a pointer to a 982 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. 983 * 984 * The last argument, *flags*, holds the number of stack frames to 985 * skip (from 0 to 255), masked with 986 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 987 * a combination of the following flags: 988 * 989 * **BPF_F_USER_STACK** 990 * Collect a user space stack instead of a kernel stack. 991 * **BPF_F_FAST_STACK_CMP** 992 * Compare stacks by hash only. 993 * **BPF_F_REUSE_STACKID** 994 * If two different stacks hash into the same *stackid*, 995 * discard the old one. 996 * 997 * The stack id retrieved is a 32 bit long integer handle which 998 * can be further combined with other data (including other stack 999 * ids) and used as a key into maps. This can be useful for 1000 * generating a variety of graphs (such as flame graphs or off-cpu
1001 * graphs). 1002 * 1003 * For walking a stack, this helper is an improvement over 1004 * **bpf_probe_read**\ (), which can be used with unrolled loops 1005 * but is not efficient and consumes a lot of eBPF instructions. 1006 * Instead, **bpf_get_stackid**\ () can collect up to 1007 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that 1008 * this limit can be controlled with the **sysctl** program, and 1009 * that it should be manually increased in order to profile long 1010 * user stacks (such as stacks for Java programs). To do so, use: 1011 * 1012 * :: 1013 * 1014 * # sysctl kernel.perf_event_max_stack=<new value> 1015 * Return 1016 * The positive or null stack id on success, or a negative error 1017 * in case of failure. 1018 * 1019 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) 1020 * Description 1021 * Compute a checksum difference, from the raw buffer pointed by 1022 * *from*, of length *from_size* (that must be a multiple of 4), 1023 * towards the raw buffer pointed by *to*, of size *to_size* 1024 * (same remark). An optional *seed* can be added to the value 1025 * (this can be cascaded, the seed may come from a previous call 1026 * to the helper). 1027 * 1028 * This is flexible enough to be used in several ways: 1029 * 1030 * * With *from_size* == 0, *to_size* > 0 and *seed* set to 1031 * checksum, it can be used when pushing new data. 1032 * * With *from_size* > 0, *to_size* == 0 and *seed* set to 1033 * checksum, it can be used when removing data from a packet. 1034 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it 1035 * can be used to compute a diff. Note that *from_size* and 1036 * *to_size* do not need to be equal. 1037 * 1038 * This helper can be used in combination with 1039 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to 1040 * which one can feed in the difference computed with 1041 * **bpf_csum_diff**\ (). 1042 * Return 1043 * The checksum result, or a negative error code in case of 1044 * failure. 1045 * 1046 * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) 1047 * Description 1048 * Retrieve tunnel options metadata for the packet associated to 1049 * *skb*, and store the raw tunnel option data to the buffer *opt* 1050 * of *size*. 1051 * 1052 * This helper can be used with encapsulation devices that can 1053 * operate in "collect metadata" mode (please refer to the related 1054 * note in the description of **bpf_skb_get_tunnel_key**\ () for 1055 * more details). A particular example where this can be used is 1056 * in combination with the Geneve encapsulation protocol, where it 1057 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) 1058 * and retrieving arbitrary TLVs (Type-Length-Value headers) from 1059 * the eBPF program. This allows for full customization of these 1060 * headers. 1061 * Return 1062 * The size of the option data retrieved. 1063 * 1064 * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) 1065 * Description 1066 * Set tunnel options metadata for the packet associated to *skb* 1067 * to the option data contained in the raw buffer *opt* of *size*. 1068 * 1069 * See also the description of the **bpf_skb_get_tunnel_opt**\ () 1070 * helper for additional information. 1071 * Return 1072 * 0 on success, or a negative error in case of failure. 1073 * 1074 * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) 1075 * Description 1076 * Change the protocol of the *skb* to *proto*. Currently 1077 * supported are transition from IPv4 to IPv6, and from IPv6 to 1078 * IPv4. The helper takes care of the groundwork for the 1079 * transition, including resizing the socket buffer. The eBPF 1080 * program is expected to fill the new headers, if any, via 1081 * **skb_store_bytes**\ () and to recompute the checksums with 1082 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ 1083 * (). The main case for this helper is to perform NAT64 1084 * operations out of an eBPF program. 1085 * 1086 * Internally, the GSO type is marked as dodgy so that headers are 1087 * checked and segments are recalculated by the GSO/GRO engine. 1088 * The size for GSO target is adapted as well. 1089 * 1090 * All values for *flags* are reserved for future usage, and must 1091 * be left at zero. 1092 * 1093 * A call to this helper is susceptible to change the underlaying 1094 * packet buffer. Therefore, at load time, all checks on pointers 1095 * previously done by the verifier are invalidated and must be 1096 * performed again, if the helper is used in combination with 1097 * direct packet access. 1098 * Return 1099 * 0 on success, or a negative error in case of failure. 1100 * 1101 * int bpf_skb_change_type(struct sk_buff *skb, u32 type) 1102 * Description 1103 * Change the packet type for the packet associated to *skb*. This 1104 * comes down to setting *skb*\ **->pkt_type** to *type*, except 1105 * the eBPF program does not have a write access to *skb*\ 1106 * **->pkt_type** beside this helper. Using a helper here allows 1107 * for graceful handling of errors. 1108 * 1109 * The major use case is to change incoming *skb*s to 1110 * **PACKET_HOST** in a programmatic way instead of having to 1111 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for 1112 * example. 1113 * 1114 * Note that *type* only allows certain values. At this time, they 1115 * are: 1116 * 1117 * **PACKET_HOST** 1118 * Packet is for us. 1119 * **PACKET_BROADCAST** 1120 * Send packet to all. 1121 * **PACKET_MULTICAST** 1122 * Send packet to group. 1123 * **PACKET_OTHERHOST** 1124 * Send packet to someone else. 1125 * Return 1126 * 0 on success, or a negative error in case of failure. 1127 * 1128 * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) 1129 * Description 1130 * Check whether *skb* is a descendant of the cgroup2 held by 1131 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1132 * Return 1133 * The return value depends on the result of the test, and can be: 1134 * 1135 * * 0, if the *skb* failed the cgroup2 descendant test. 1136 * * 1, if the *skb* succeeded the cgroup2 descendant test. 1137 * * A negative error code, if an error occurred. 1138 * 1139 * u32 bpf_get_hash_recalc(struct sk_buff *skb) 1140 * Description 1141 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is 1142 * not set, in particular if the hash was cleared due to mangling, 1143 * recompute this hash. Later accesses to the hash can be done 1144 * directly with *skb*\ **->hash**. 1145 * 1146 * Calling **bpf_set_hash_invalid**\ (), changing a packet 1147 * prototype with **bpf_skb_change_proto**\ (), or calling 1148 * **bpf_skb_store_bytes**\ () with the 1149 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear 1150 * the hash and to trigger a new computation for the next call to 1151 * **bpf_get_hash_recalc**\ (). 1152 * Return 1153 * The 32-bit hash. 1154 * 1155 * u64 bpf_get_current_task(void) 1156 * Return 1157 * A pointer to the current task struct. 1158 * 1159 * int bpf_probe_write_user(void *dst, const void *src, u32 len) 1160 * Description 1161 * Attempt in a safe way to write *len* bytes from the buffer 1162 * *src* to *dst* in memory. It only works for threads that are in 1163 * user context, and *dst* must be a valid user space address. 1164 * 1165 * This helper should not be used to implement any kind of 1166 * security mechanism because of TOC-TOU attacks, but rather to 1167 * debug, divert, and manipulate execution of semi-cooperative 1168 * processes. 1169 * 1170 * Keep in mind that this feature is meant for experiments, and it 1171 * has a risk of crashing the system and running programs. 1172 * Therefore, when an eBPF program using this helper is attached, 1173 * a warning including PID and process name is printed to kernel 1174 * logs. 1175 * Return 1176 * 0 on success, or a negative error in case of failure. 1177 * 1178 * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) 1179 * Description 1180 * Check whether the probe is being run is the context of a given 1181 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by 1182 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1183 * Return 1184 * The return value depends on the result of the test, and can be: 1185 * 1186 * * 0, if the *skb* task belongs to the cgroup2. 1187 * * 1, if the *skb* task does not belong to the cgroup2. 1188 * * A negative error code, if an error occurred. 1189 * 1190 * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) 1191 * Description 1192 * Resize (trim or grow) the packet associated to *skb* to the 1193 * new *len*. The *flags* are reserved for future usage, and must 1194 * be left at zero. 1195 * 1196 * The basic idea is that the helper performs the needed work to 1197 * change the size of the packet, then the eBPF program rewrites 1198 * the rest via helpers like **bpf_skb_store_bytes**\ (), 1199 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () 1200 * and others. This helper is a slow path utility intended for 1201 * replies with control messages. And because it is targeted for 1202 * slow path, the helper itself can afford to be slow: it 1203 * implicitly linearizes, unclones and drops offloads from the 1204 * *skb*. 1205 * 1206 * A call to this helper is susceptible to change the underlaying 1207 * packet buffer. Therefore, at load time, all checks on pointers 1208 * previously done by the verifier are invalidated and must be 1209 * performed again, if the helper is used in combination with 1210 * direct packet access. 1211 * Return 1212 * 0 on success, or a negative error in case of failure. 1213 * 1214 * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) 1215 * Description 1216 * Pull in non-linear data in case the *skb* is non-linear and not 1217 * all of *len* are part of the linear section. Make *len* bytes 1218 * from *skb* readable and writable. If a zero value is passed for 1219 * *len*, then the whole length of the *skb* is pulled. 1220 * 1221 * This helper is only needed for reading and writing with direct 1222 * packet access. 1223 * 1224 * For direct packet access, testing that offsets to access 1225 * are within packet boundaries (test on *skb*\ **->data_end**) is 1226 * susceptible to fail if offsets are invalid, or if the requested 1227 * data is in non-linear parts of the *skb*. On failure the 1228 * program can just bail out, or in the case of a non-linear 1229 * buffer, use a helper to make the data available. The 1230 * **bpf_skb_load_bytes**\ () helper is a first solution to access 1231 * the data. Another one consists in using **bpf_skb_pull_data** 1232 * to pull in once the non-linear parts, then retesting and 1233 * eventually access the data. 1234 * 1235 * At the same time, this also makes sure the *skb* is uncloned, 1236 * which is a necessary condition for direct write. As this needs 1237 * to be an invariant for the write part only, the verifier 1238 * detects writes and adds a prologue that is calling 1239 * **bpf_skb_pull_data()** to effectively unclone the *skb* from 1240 * the very beginning in case it is indeed cloned. 1241 * 1242 * A call to this helper is susceptible to change the underlaying 1243 * packet buffer. Therefore, at load time, all checks on pointers 1244 * previously done by the verifier are invalidated and must be 1245 * performed again, if the helper is used in combination with 1246 * direct packet access. 1247 * Return 1248 * 0 on success, or a negative error in case of failure. 1249 * 1250 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) 1251 * Description 1252 * Add the checksum *csum* into *skb*\ **->csum** in case the 1253 * driver has supplied a checksum for the entire packet into that 1254 * field. Return an error otherwise. This helper is intended to be 1255 * used in combination with **bpf_csum_diff**\ (), in particular 1256 * when the checksum needs to be updated after data has been 1257 * written into the packet through direct packet access. 1258 * Return 1259 * The checksum on success, or a negative error code in case of 1260 * failure. 1261 * 1262 * void bpf_set_hash_invalid(struct sk_buff *skb) 1263 * Description 1264 * Invalidate the current *skb*\ **->hash**. It can be used after 1265 * mangling on headers through direct packet access, in order to 1266 * indicate that the hash is outdated and to trigger a 1267 * recalculation the next time the kernel tries to access this 1268 * hash or when the **bpf_get_hash_recalc**\ () helper is called. 1269 * 1270 * int bpf_get_numa_node_id(void) 1271 * Description 1272 * Return the id of the current NUMA node. The primary use case 1273 * for this helper is the selection of sockets for the local NUMA 1274 * node, when the program is attached to sockets using the 1275 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), 1276 * but the helper is also available to other eBPF program types, 1277 * similarly to **bpf_get_smp_processor_id**\ (). 1278 * Return 1279 * The id of current NUMA node. 1280 * 1281 * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) 1282 * Description 1283 * Grows headroom of packet associated to *skb* and adjusts the 1284 * offset of the MAC header accordingly, adding *len* bytes of 1285 * space. It automatically extends and reallocates memory as 1286 * required. 1287 * 1288 * This helper can be used on a layer 3 *skb* to push a MAC header 1289 * for redirection into a layer 2 device. 1290 * 1291 * All values for *flags* are reserved for future usage, and must 1292 * be left at zero. 1293 * 1294 * A call to this helper is susceptible to change the underlaying 1295 * packet buffer. Therefore, at load time, all checks on pointers 1296 * previously done by the verifier are invalidated and must be 1297 * performed again, if the helper is used in combination with 1298 * direct packet access. 1299 * Return 1300 * 0 on success, or a negative error in case of failure. 1301 * 1302 * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) 1303 * Description 1304 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that 1305 * it is possible to use a negative value for *delta*. This helper 1306 * can be used to prepare the packet for pushing or popping 1307 * headers. 1308 * 1309 * A call to this helper is susceptible to change the underlaying 1310 * packet buffer. Therefore, at load time, all checks on pointers 1311 * previously done by the verifier are invalidated and must be 1312 * performed again, if the helper is used in combination with 1313 * direct packet access. 1314 * Return 1315 * 0 on success, or a negative error in case of failure. 1316 * 1317 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) 1318 * Description 1319 * Copy a NUL terminated string from an unsafe address 1320 * *unsafe_ptr* to *dst*. The *size* should include the 1321 * terminating NUL byte. In case the string length is smaller than 1322 * *size*, the target is not padded with further NUL bytes. If the 1323 * string length is larger than *size*, just *size*-1 bytes are 1324 * copied and the last byte is set to NUL. 1325 * 1326 * On success, the length of the copied string is returned. This 1327 * makes this helper useful in tracing programs for reading 1328 * strings, and more importantly to get its length at runtime. See 1329 * the following snippet: 1330 * 1331 * :: 1332 * 1333 * SEC("kprobe/sys_open") 1334 * void bpf_sys_open(struct pt_regs *ctx) 1335 * { 1336 * char buf[PATHLEN]; // PATHLEN is defined to 256 1337 * int res = bpf_probe_read_str(buf, sizeof(buf), 1338 * ctx->di); 1339 * 1340 * // Consume buf, for example push it to 1341 * // userspace via bpf_perf_event_output(); we 1342 * // can use res (the string length) as event 1343 * // size, after checking its boundaries. 1344 * } 1345 * 1346 * In comparison, using **bpf_probe_read()** helper here instead 1347 * to read the string would require to estimate the length at 1348 * compile time, and would often result in copying more memory 1349 * than necessary. 1350 * 1351 * Another useful use case is when parsing individual process 1352 * arguments or individual environment variables navigating 1353 * *current*\ **->mm->arg_start** and *current*\ 1354 * **->mm->env_start**: using this helper and the return value, 1355 * one can quickly iterate at the right offset of the memory area. 1356 * Return 1357 * On success, the strictly positive length of the string, 1358 * including the trailing NUL character. On error, a negative 1359 * value. 1360 * 1361 * u64 bpf_get_socket_cookie(struct sk_buff *skb) 1362 * Description 1363 * If the **struct sk_buff** pointed by *skb* has a known socket, 1364 * retrieve the cookie (generated by the kernel) of this socket. 1365 * If no cookie has been set yet, generate a new cookie. Once 1366 * generated, the socket cookie remains stable for the life of the 1367 * socket. This helper can be useful for monitoring per socket 1368 * networking traffic statistics as it provides a unique socket 1369 * identifier per namespace. 1370 * Return 1371 * A 8-byte long non-decreasing number on success, or 0 if the 1372 * socket field is missing inside *skb*. 1373 * 1374 * u32 bpf_get_socket_uid(struct sk_buff *skb) 1375 * Return 1376 * The owner UID of the socket associated to *skb*. If the socket 1377 * is **NULL**, or if it is not a full socket (i.e. if it is a 1378 * time-wait or a request socket instead), **overflowuid** value 1379 * is returned (note that **overflowuid** might also be the actual 1380 * UID value for the socket). 1381 * 1382 * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) 1383 * Description 1384 * Set the full hash for *skb* (set the field *skb*\ **->hash**) 1385 * to value *hash*. 1386 * Return 1387 * 0 1388 * 1389 * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) 1390 * Description 1391 * Emulate a call to **setsockopt()** on the socket associated to 1392 * *bpf_socket*, which must be a full socket. The *level* at 1393 * which the option resides and the name *optname* of the option 1394 * must be specified, see **setsockopt(2)** for more information. 1395 * The option value of length *optlen* is pointed by *optval*. 1396 * 1397 * This helper actually implements a subset of **setsockopt()**. 1398 * It supports the following *level*\ s: 1399 * 1400 * * **SOL_SOCKET**, which supports the following *optname*\ s: 1401 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, 1402 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. 1403 * * **IPPROTO_TCP**, which supports the following *optname*\ s: 1404 * **TCP_CONGESTION**, **TCP_BPF_IW**, 1405 * **TCP_BPF_SNDCWND_CLAMP**. 1406 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1407 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1408 * Return 1409 * 0 on success, or a negative error in case of failure. 1410 * 1411 * int bpf_skb_adjust_room(struct sk_buff *skb, u32 len_diff, u32 mode, u64 flags) 1412 * Description 1413 * Grow or shrink the room for data in the packet associated to 1414 * *skb* by *len_diff*, and according to the selected *mode*. 1415 * 1416 * There is a single supported mode at this time: 1417 * 1418 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer 1419 * (room space is added or removed below the layer 3 header). 1420 * 1421 * All values for *flags* are reserved for future usage, and must 1422 * be left at zero. 1423 * 1424 * A call to this helper is susceptible to change the underlaying 1425 * packet buffer. Therefore, at load time, all checks on pointers 1426 * previously done by the verifier are invalidated and must be 1427 * performed again, if the helper is used in combination with 1428 * direct packet access. 1429 * Return 1430 * 0 on success, or a negative error in case of failure. 1431 * 1432 * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) 1433 * Description 1434 * Redirect the packet to the endpoint referenced by *map* at 1435 * index *key*. Depending on its type, this *map* can contain 1436 * references to net devices (for forwarding packets through other 1437 * ports), or to CPUs (for redirecting XDP frames to another CPU; 1438 * but this is only implemented for native XDP (with driver 1439 * support) as of this writing). 1440 * 1441 * All values for *flags* are reserved for future usage, and must 1442 * be left at zero. 1443 * 1444 * When used to redirect packets to net devices, this helper 1445 * provides a high performance increase over **bpf_redirect**\ (). 1446 * This is due to various implementation details of the underlying 1447 * mechanisms, one of which is the fact that **bpf_redirect_map**\ 1448 * () tries to send packet as a "bulk" to the device. 1449 * Return 1450 * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error. 1451 * 1452 * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags) 1453 * Description 1454 * Redirect the packet to the socket referenced by *map* (of type 1455 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1456 * egress interfaces can be used for redirection. The 1457 * **BPF_F_INGRESS** value in *flags* is used to make the 1458 * distinction (ingress path is selected if the flag is present, 1459 * egress path otherwise). This is the only flag supported for now. 1460 * Return 1461 * **SK_PASS** on success, or **SK_DROP** on error. 1462 * 1463 * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 1464 * Description 1465 * Add an entry to, or update a *map* referencing sockets. The 1466 * *skops* is used as a new value for the entry associated to 1467 * *key*. *flags* is one of: 1468 * 1469 * **BPF_NOEXIST** 1470 * The entry for *key* must not exist in the map. 1471 * **BPF_EXIST** 1472 * The entry for *key* must already exist in the map. 1473 * **BPF_ANY** 1474 * No condition on the existence of the entry for *key*. 1475 * 1476 * If the *map* has eBPF programs (parser and verdict), those will 1477 * be inherited by the socket being added. If the socket is 1478 * already attached to eBPF programs, this results in an error. 1479 * Return 1480 * 0 on success, or a negative error in case of failure. 1481 * 1482 * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) 1483 * Description 1484 * Adjust the address pointed by *xdp_md*\ **->data_meta** by 1485 * *delta* (which can be positive or negative). Note that this 1486 * operation modifies the address stored in *xdp_md*\ **->data**, 1487 * so the latter must be loaded only after the helper has been 1488 * called. 1489 * 1490 * The use of *xdp_md*\ **->data_meta** is optional and programs 1491 * are not required to use it. The rationale is that when the 1492 * packet is processed with XDP (e.g. as DoS filter), it is 1493 * possible to push further meta data along with it before passing 1494 * to the stack, and to give the guarantee that an ingress eBPF 1495 * program attached as a TC classifier on the same device can pick 1496 * this up for further post-processing. Since TC works with socket 1497 * buffers, it remains possible to set from XDP the **mark** or 1498 * **priority** pointers, or other pointers for the socket buffer. 1499 * Having this scratch space generic and programmable allows for 1500 * more flexibility as the user is free to store whatever meta 1501 * data they need. 1502 * 1503 * A call to this helper is susceptible to change the underlaying 1504 * packet buffer. Therefore, at load time, all checks on pointers 1505 * previously done by the verifier are invalidated and must be 1506 * performed again, if the helper is used in combination with 1507 * direct packet access. 1508 * Return 1509 * 0 on success, or a negative error in case of failure. 1510 * 1511 * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) 1512 * Description 1513 * Read the value of a perf event counter, and store it into *buf* 1514 * of size *buf_size*. This helper relies on a *map* of type 1515 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event 1516 * counter is selected when *map* is updated with perf event file 1517 * descriptors. The *map* is an array whose size is the number of 1518 * available CPUs, and each cell contains a value relative to one 1519 * CPU. The value to retrieve is indicated by *flags*, that 1520 * contains the index of the CPU to look up, masked with 1521 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1522 * **BPF_F_CURRENT_CPU** to indicate that the value for the 1523 * current CPU should be retrieved. 1524 * 1525 * This helper behaves in a way close to 1526 * **bpf_perf_event_read**\ () helper, save that instead of 1527 * just returning the value observed, it fills the *buf* 1528 * structure. This allows for additional data to be retrieved: in 1529 * particular, the enabled and running times (in *buf*\ 1530 * **->enabled** and *buf*\ **->running**, respectively) are 1531 * copied. In general, **bpf_perf_event_read_value**\ () is 1532 * recommended over **bpf_perf_event_read**\ (), which has some 1533 * ABI issues and provides fewer functionalities. 1534 * 1535 * These values are interesting, because hardware PMU (Performance 1536 * Monitoring Unit) counters are limited resources. When there are 1537 * more PMU based perf events opened than available counters, 1538 * kernel will multiplex these events so each event gets certain 1539 * percentage (but not all) of the PMU time. In case that 1540 * multiplexing happens, the number of samples or counter value 1541 * will not reflect the case compared to when no multiplexing 1542 * occurs. This makes comparison between different runs difficult. 1543 * Typically, the counter value should be normalized before 1544 * comparing to other experiments. The usual normalization is done 1545 * as follows. 1546 * 1547 * :: 1548 * 1549 * normalized_counter = counter * t_enabled / t_running 1550 * 1551 * Where t_enabled is the time enabled for event and t_running is 1552 * the time running for event since last normalization. The 1553 * enabled and running times are accumulated since the perf event 1554 * open. To achieve scaling factor between two invocations of an 1555 * eBPF program, users can can use CPU id as the key (which is 1556 * typical for perf array usage model) to remember the previous 1557 * value and do the calculation inside the eBPF program. 1558 * Return 1559 * 0 on success, or a negative error in case of failure. 1560 * 1561 * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) 1562 * Description 1563 * For en eBPF program attached to a perf event, retrieve the 1564 * value of the event counter associated to *ctx* and store it in 1565 * the structure pointed by *buf* and of size *buf_size*. Enabled 1566 * and running times are also stored in the structure (see 1567 * description of helper **bpf_perf_event_read_value**\ () for 1568 * more details). 1569 * Return 1570 * 0 on success, or a negative error in case of failure. 1571 * 1572 * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) 1573 * Description 1574 * Emulate a call to **getsockopt()** on the socket associated to 1575 * *bpf_socket*, which must be a full socket. The *level* at 1576 * which the option resides and the name *optname* of the option 1577 * must be specified, see **getsockopt(2)** for more information. 1578 * The retrieved value is stored in the structure pointed by 1579 * *opval* and of length *optlen*. 1580 * 1581 * This helper actually implements a subset of **getsockopt()**. 1582 * It supports the following *level*\ s: 1583 * 1584 * * **IPPROTO_TCP**, which supports *optname* 1585 * **TCP_CONGESTION**. 1586 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1587 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1588 * Return 1589 * 0 on success, or a negative error in case of failure. 1590 * 1591 * int bpf_override_return(struct pt_reg *regs, u64 rc) 1592 * Description 1593 * Used for error injection, this helper uses kprobes to override 1594 * the return value of the probed function, and to set it to *rc*. 1595 * The first argument is the context *regs* on which the kprobe 1596 * works. 1597 * 1598 * This helper works by setting setting the PC (program counter) 1599 * to an override function which is run in place of the original 1600 * probed function. This means the probed function is not run at 1601 * all. The replacement function just returns with the required 1602 * value. 1603 * 1604 * This helper has security implications, and thus is subject to 1605 * restrictions. It is only available if the kernel was compiled 1606 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration 1607 * option, and in this case it only works on functions tagged with 1608 * **ALLOW_ERROR_INJECTION** in the kernel code. 1609 * 1610 * Also, the helper is only available for the architectures having 1611 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, 1612 * x86 architecture is the only one to support this feature. 1613 * Return 1614 * 0 1615 * 1616 * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) 1617 * Description 1618 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field 1619 * for the full TCP socket associated to *bpf_sock_ops* to 1620 * *argval*. 1621 * 1622 * The primary use of this field is to determine if there should 1623 * be calls to eBPF programs of type 1624 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP 1625 * code. A program of the same type can change its value, per 1626 * connection and as necessary, when the connection is 1627 * established. This field is directly accessible for reading, but 1628 * this helper must be used for updates in order to return an 1629 * error if an eBPF program tries to set a callback that is not 1630 * supported in the current kernel. 1631 * 1632 * The supported callback values that *argval* can combine are: 1633 * 1634 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) 1635 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) 1636 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) 1637 * 1638 * Here are some examples of where one could call such eBPF 1639 * program: 1640 * 1641 * * When RTO fires. 1642 * * When a packet is retransmitted. 1643 * * When the connection terminates. 1644 * * When a packet is sent. 1645 * * When a packet is received. 1646 * Return 1647 * Code **-EINVAL** if the socket is not a full TCP socket; 1648 * otherwise, a positive number containing the bits that could not 1649 * be set is returned (which comes down to 0 if all bits were set 1650 * as required). 1651 * 1652 * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) 1653 * Description 1654 * This helper is used in programs implementing policies at the 1655 * socket level. If the message *msg* is allowed to pass (i.e. if 1656 * the verdict eBPF program returns **SK_PASS**), redirect it to 1657 * the socket referenced by *map* (of type 1658 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1659 * egress interfaces can be used for redirection. The 1660 * **BPF_F_INGRESS** value in *flags* is used to make the 1661 * distinction (ingress path is selected if the flag is present, 1662 * egress path otherwise). This is the only flag supported for now. 1663 * Return 1664 * **SK_PASS** on success, or **SK_DROP** on error. 1665 * 1666 * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) 1667 * Description 1668 * For socket policies, apply the verdict of the eBPF program to 1669 * the next *bytes* (number of bytes) of message *msg*. 1670 * 1671 * For example, this helper can be used in the following cases: 1672 * 1673 * * A single **sendmsg**\ () or **sendfile**\ () system call 1674 * contains multiple logical messages that the eBPF program is 1675 * supposed to read and for which it should apply a verdict. 1676 * * An eBPF program only cares to read the first *bytes* of a 1677 * *msg*. If the message has a large payload, then setting up 1678 * and calling the eBPF program repeatedly for all bytes, even 1679 * though the verdict is already known, would create unnecessary 1680 * overhead. 1681 * 1682 * When called from within an eBPF program, the helper sets a 1683 * counter internal to the BPF infrastructure, that is used to 1684 * apply the last verdict to the next *bytes*. If *bytes* is 1685 * smaller than the current data being processed from a 1686 * **sendmsg**\ () or **sendfile**\ () system call, the first 1687 * *bytes* will be sent and the eBPF program will be re-run with 1688 * the pointer for start of data pointing to byte number *bytes* 1689 * **+ 1**. If *bytes* is larger than the current data being 1690 * processed, then the eBPF verdict will be applied to multiple 1691 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are 1692 * consumed. 1693 * 1694 * Note that if a socket closes with the internal counter holding 1695 * a non-zero value, this is not a problem because data is not 1696 * being buffered for *bytes* and is sent as it is received. 1697 * Return 1698 * 0 1699 * 1700 * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) 1701 * Description 1702 * For socket policies, prevent the execution of the verdict eBPF 1703 * program for message *msg* until *bytes* (byte number) have been 1704 * accumulated. 1705 * 1706 * This can be used when one needs a specific number of bytes 1707 * before a verdict can be assigned, even if the data spans 1708 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme 1709 * case would be a user calling **sendmsg**\ () repeatedly with 1710 * 1-byte long message segments. Obviously, this is bad for 1711 * performance, but it is still valid. If the eBPF program needs 1712 * *bytes* bytes to validate a header, this helper can be used to 1713 * prevent the eBPF program to be called again until *bytes* have 1714 * been accumulated. 1715 * Return 1716 * 0 1717 * 1718 * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) 1719 * Description 1720 * For socket policies, pull in non-linear data from user space 1721 * for *msg* and set pointers *msg*\ **->data** and *msg*\ 1722 * **->data_end** to *start* and *end* bytes offsets into *msg*, 1723 * respectively. 1724 * 1725 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 1726 * *msg* it can only parse data that the (**data**, **data_end**) 1727 * pointers have already consumed. For **sendmsg**\ () hooks this 1728 * is likely the first scatterlist element. But for calls relying 1729 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will 1730 * be the range (**0**, **0**) because the data is shared with 1731 * user space and by default the objective is to avoid allowing 1732 * user space to modify data while (or after) eBPF verdict is 1733 * being decided. This helper can be used to pull in data and to 1734 * set the start and end pointer to given values. Data will be 1735 * copied if necessary (i.e. if data was not linear and if start 1736 * and end pointers do not point to the same chunk). 1737 * 1738 * A call to this helper is susceptible to change the underlaying 1739 * packet buffer. Therefore, at load time, all checks on pointers 1740 * previously done by the verifier are invalidated and must be 1741 * performed again, if the helper is used in combination with 1742 * direct packet access. 1743 * 1744 * All values for *flags* are reserved for future usage, and must 1745 * be left at zero. 1746 * Return 1747 * 0 on success, or a negative error in case of failure. 1748 * 1749 * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) 1750 * Description 1751 * Bind the socket associated to *ctx* to the address pointed by 1752 * *addr*, of length *addr_len*. This allows for making outgoing 1753 * connection from the desired IP address, which can be useful for 1754 * example when all processes inside a cgroup should use one 1755 * single IP address on a host that has multiple IP configured. 1756 * 1757 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The 1758 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or 1759 * **AF_INET6**). Looking for a free port to bind to can be 1760 * expensive, therefore binding to port is not permitted by the 1761 * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively) 1762 * must be set to zero. 1763 * Return 1764 * 0 on success, or a negative error in case of failure. 1765 * 1766 * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) 1767 * Description 1768 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is 1769 * only possible to shrink the packet as of this writing, 1770 * therefore *delta* must be a negative integer. 1771 * 1772 * A call to this helper is susceptible to change the underlaying 1773 * packet buffer. Therefore, at load time, all checks on pointers 1774 * previously done by the verifier are invalidated and must be 1775 * performed again, if the helper is used in combination with 1776 * direct packet access. 1777 * Return 1778 * 0 on success, or a negative error in case of failure. 1779 * 1780 * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) 1781 * Description 1782 * Retrieve the XFRM state (IP transform framework, see also 1783 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. 1784 * 1785 * The retrieved value is stored in the **struct bpf_xfrm_state** 1786 * pointed by *xfrm_state* and of length *size*. 1787 * 1788 * All values for *flags* are reserved for future usage, and must 1789 * be left at zero. 1790 * 1791 * This helper is available only if the kernel was compiled with 1792 * **CONFIG_XFRM** configuration option. 1793 * Return 1794 * 0 on success, or a negative error in case of failure. 1795 * 1796 * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags) 1797 * Description 1798 * Return a user or a kernel stack in bpf program provided buffer. 1799 * To achieve this, the helper needs *ctx*, which is a pointer 1800 * to the context on which the tracing program is executed. 1801 * To store the stacktrace, the bpf program provides *buf* with 1802 * a nonnegative *size*. 1803 * 1804 * The last argument, *flags*, holds the number of stack frames to 1805 * skip (from 0 to 255), masked with 1806 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 1807 * the following flags: 1808 * 1809 * **BPF_F_USER_STACK** 1810 * Collect a user space stack instead of a kernel stack. 1811 * **BPF_F_USER_BUILD_ID** 1812 * Collect buildid+offset instead of ips for user stack, 1813 * only valid if **BPF_F_USER_STACK** is also specified. 1814 * 1815 * **bpf_get_stack**\ () can collect up to 1816 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 1817 * to sufficient large buffer size. Note that 1818 * this limit can be controlled with the **sysctl** program, and 1819 * that it should be manually increased in order to profile long 1820 * user stacks (such as stacks for Java programs). To do so, use: 1821 * 1822 * :: 1823 * 1824 * # sysctl kernel.perf_event_max_stack=<new value> 1825 * Return 1826 * A non-negative value equal to or less than *size* on success, 1827 * or a negative error in case of failure. 1828 * 1829 * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) 1830 * Description 1831 * This helper is similar to **bpf_skb_load_bytes**\ () in that 1832 * it provides an easy way to load *len* bytes from *offset* 1833 * from the packet associated to *skb*, into the buffer pointed 1834 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that 1835 * a fifth argument *start_header* exists in order to select a 1836 * base offset to start from. *start_header* can be one of: 1837 * 1838 * **BPF_HDR_START_MAC** 1839 * Base offset to load data from is *skb*'s mac header. 1840 * **BPF_HDR_START_NET** 1841 * Base offset to load data from is *skb*'s network header. 1842 * 1843 * In general, "direct packet access" is the preferred method to 1844 * access packet data, however, this helper is in particular useful 1845 * in socket filters where *skb*\ **->data** does not always point 1846 * to the start of the mac header and where "direct packet access" 1847 * is not available. 1848 * Return 1849 * 0 on success, or a negative error in case of failure. 1850 * 1851 * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) 1852 * Description 1853 * Do FIB lookup in kernel tables using parameters in *params*. 1854 * If lookup is successful and result shows packet is to be 1855 * forwarded, the neighbor tables are searched for the nexthop. 1856 * If successful (ie., FIB lookup shows forwarding and nexthop 1857 * is resolved), the nexthop address is returned in ipv4_dst 1858 * or ipv6_dst based on family, smac is set to mac address of 1859 * egress device, dmac is set to nexthop mac address, rt_metric 1860 * is set to metric from route (IPv4/IPv6 only), and ifindex 1861 * is set to the device index of the nexthop from the FIB lookup. 1862 * 1863 * *plen* argument is the size of the passed in struct. 1864 * *flags* argument can be a combination of one or more of the 1865 * following values: 1866 * 1867 * **BPF_FIB_LOOKUP_DIRECT** 1868 * Do a direct table lookup vs full lookup using FIB 1869 * rules. 1870 * **BPF_FIB_LOOKUP_OUTPUT** 1871 * Perform lookup from an egress perspective (default is 1872 * ingress). 1873 * 1874 * *ctx* is either **struct xdp_md** for XDP programs or 1875 * **struct sk_buff** tc cls_act programs. 1876 * Return 1877 * * < 0 if any input argument is invalid 1878 * * 0 on success (packet is forwarded, nexthop neighbor exists) 1879 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the 1880 * * packet is not forwarded or needs assist from full stack 1881 * 1882 * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) 1883 * Description 1884 * Add an entry to, or update a sockhash *map* referencing sockets. 1885 * The *skops* is used as a new value for the entry associated to 1886 * *key*. *flags* is one of: 1887 * 1888 * **BPF_NOEXIST** 1889 * The entry for *key* must not exist in the map. 1890 * **BPF_EXIST** 1891 * The entry for *key* must already exist in the map. 1892 * **BPF_ANY** 1893 * No condition on the existence of the entry for *key*. 1894 * 1895 * If the *map* has eBPF programs (parser and verdict), those will 1896 * be inherited by the socket being added. If the socket is 1897 * already attached to eBPF programs, this results in an error. 1898 * Return 1899 * 0 on success, or a negative error in case of failure. 1900 * 1901 * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) 1902 * Description 1903 * This helper is used in programs implementing policies at the 1904 * socket level. If the message *msg* is allowed to pass (i.e. if 1905 * the verdict eBPF program returns **SK_PASS**), redirect it to 1906 * the socket referenced by *map* (of type 1907 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 1908 * egress interfaces can be used for redirection. The 1909 * **BPF_F_INGRESS** value in *flags* is used to make the 1910 * distinction (ingress path is selected if the flag is present, 1911 * egress path otherwise). This is the only flag supported for now. 1912 * Return 1913 * **SK_PASS** on success, or **SK_DROP** on error. 1914 * 1915 * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) 1916 * Description 1917 * This helper is used in programs implementing policies at the 1918 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. 1919 * if the verdeict eBPF program returns **SK_PASS**), redirect it 1920 * to the socket referenced by *map* (of type 1921 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 1922 * egress interfaces can be used for redirection. The 1923 * **BPF_F_INGRESS** value in *flags* is used to make the 1924 * distinction (ingress path is selected if the flag is present, 1925 * egress otherwise). This is the only flag supported for now. 1926 * Return 1927 * **SK_PASS** on success, or **SK_DROP** on error. 1928 * 1929 * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 1930 * Description 1931 * Encapsulate the packet associated to *skb* within a Layer 3 1932 * protocol header. This header is provided in the buffer at 1933 * address *hdr*, with *len* its size in bytes. *type* indicates 1934 * the protocol of the header and can be one of: 1935 * 1936 * **BPF_LWT_ENCAP_SEG6** 1937 * IPv6 encapsulation with Segment Routing Header 1938 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, 1939 * the IPv6 header is computed by the kernel. 1940 * **BPF_LWT_ENCAP_SEG6_INLINE** 1941 * Only works if *skb* contains an IPv6 packet. Insert a 1942 * Segment Routing Header (**struct ipv6_sr_hdr**) inside 1943 * the IPv6 header. 1944 * 1945 * A call to this helper is susceptible to change the underlaying 1946 * packet buffer. Therefore, at load time, all checks on pointers 1947 * previously done by the verifier are invalidated and must be 1948 * performed again, if the helper is used in combination with 1949 * direct packet access. 1950 * Return 1951 * 0 on success, or a negative error in case of failure. 1952 * 1953 * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) 1954 * Description 1955 * Store *len* bytes from address *from* into the packet 1956 * associated to *skb*, at *offset*. Only the flags, tag and TLVs 1957 * inside the outermost IPv6 Segment Routing Header can be 1958 * modified through this helper. 1959 * 1960 * A call to this helper is susceptible to change the underlaying 1961 * packet buffer. Therefore, at load time, all checks on pointers 1962 * previously done by the verifier are invalidated and must be 1963 * performed again, if the helper is used in combination with 1964 * direct packet access. 1965 * Return 1966 * 0 on success, or a negative error in case of failure. 1967 * 1968 * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) 1969 * Description 1970 * Adjust the size allocated to TLVs in the outermost IPv6 1971 * Segment Routing Header contained in the packet associated to 1972 * *skb*, at position *offset* by *delta* bytes. Only offsets 1973 * after the segments are accepted. *delta* can be as well 1974 * positive (growing) as negative (shrinking). 1975 * 1976 * A call to this helper is susceptible to change the underlaying 1977 * packet buffer. Therefore, at load time, all checks on pointers 1978 * previously done by the verifier are invalidated and must be 1979 * performed again, if the helper is used in combination with 1980 * direct packet access. 1981 * Return 1982 * 0 on success, or a negative error in case of failure. 1983 * 1984 * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) 1985 * Description 1986 * Apply an IPv6 Segment Routing action of type *action* to the 1987 * packet associated to *skb*. Each action takes a parameter 1988 * contained at address *param*, and of length *param_len* bytes. 1989 * *action* can be one of: 1990 * 1991 * **SEG6_LOCAL_ACTION_END_X** 1992 * End.X action: Endpoint with Layer-3 cross-connect. 1993 * Type of *param*: **struct in6_addr**. 1994 * **SEG6_LOCAL_ACTION_END_T** 1995 * End.T action: Endpoint with specific IPv6 table lookup. 1996 * Type of *param*: **int**. 1997 * **SEG6_LOCAL_ACTION_END_B6** 1998 * End.B6 action: Endpoint bound to an SRv6 policy. 1999 * Type of param: **struct ipv6_sr_hdr**. 2000 * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
2001 * End.B6.Encap action: Endpoint bound to an SRv6 2002 * encapsulation policy. 2003 * Type of param: **struct ipv6_sr_hdr**. 2004 * 2005 * A call to this helper is susceptible to change the underlaying 2006 * packet buffer. Therefore, at load time, all checks on pointers 2007 * previously done by the verifier are invalidated and must be 2008 * performed again, if the helper is used in combination with 2009 * direct packet access. 2010 * Return 2011 * 0 on success, or a negative error in case of failure. 2012 * 2013 * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) 2014 * Description 2015 * This helper is used in programs implementing IR decoding, to 2016 * report a successfully decoded key press with *scancode*, 2017 * *toggle* value in the given *protocol*. The scancode will be 2018 * translated to a keycode using the rc keymap, and reported as 2019 * an input key down event. After a period a key up event is 2020 * generated. This period can be extended by calling either 2021 * **bpf_rc_keydown** () again with the same values, or calling 2022 * **bpf_rc_repeat** (). 2023 * 2024 * Some protocols include a toggle bit, in case the button was 2025 * released and pressed again between consecutive scancodes. 2026 * 2027 * The *ctx* should point to the lirc sample as passed into 2028 * the program. 2029 * 2030 * The *protocol* is the decoded protocol number (see 2031 * **enum rc_proto** for some predefined values). 2032 * 2033 * This helper is only available is the kernel was compiled with 2034 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2035 * "**y**". 2036 * 2037 * Return 2038 * 0 2039 * 2040 * int bpf_rc_repeat(void *ctx) 2041 * Description 2042 * This helper is used in programs implementing IR decoding, to 2043 * report a successfully decoded repeat key message. This delays 2044 * the generation of a key up event for previously generated 2045 * key down event. 2046 * 2047 * Some IR protocols like NEC have a special IR message for 2048 * repeating last button, for when a button is held down. 2049 * 2050 * The *ctx* should point to the lirc sample as passed into 2051 * the program. 2052 * 2053 * This helper is only available is the kernel was compiled with 2054 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2055 * "**y**". 2056 * 2057 * Return 2058 * 0 2059 * 2060 * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb) 2061 * Description 2062 * Return the cgroup v2 id of the socket associated with the *skb*. 2063 * This is roughly similar to the **bpf_get_cgroup_classid**\ () 2064 * helper for cgroup v1 by providing a tag resp. identifier that 2065 * can be matched on or used for map lookups e.g. to implement 2066 * policy. The cgroup v2 id of a given path in the hierarchy is 2067 * exposed in user space through the f_handle API in order to get 2068 * to the same 64-bit id. 2069 * 2070 * This helper can be used on TC egress path, but not on ingress, 2071 * and is available only if the kernel was compiled with the 2072 * **CONFIG_SOCK_CGROUP_DATA** configuration option. 2073 * Return 2074 * The id is returned or 0 in case the id could not be retrieved. 2075 * 2076 * u64 bpf_get_current_cgroup_id(void) 2077 * Return 2078 * A 64-bit integer containing the current cgroup id based 2079 * on the cgroup within which the current task is running. 2080 */ 2081#define __BPF_FUNC_MAPPER(FN) \ 2082 FN(unspec), \ 2083 FN(map_lookup_elem), \ 2084 FN(map_update_elem), \ 2085 FN(map_delete_elem), \ 2086 FN(probe_read), \ 2087 FN(ktime_get_ns), \ 2088 FN(trace_printk), \ 2089 FN(get_prandom_u32), \ 2090 FN(get_smp_processor_id), \ 2091 FN(skb_store_bytes), \ 2092 FN(l3_csum_replace), \ 2093 FN(l4_csum_replace), \ 2094 FN(tail_call), \ 2095 FN(clone_redirect), \ 2096 FN(get_current_pid_tgid), \ 2097 FN(get_current_uid_gid), \ 2098 FN(get_current_comm), \ 2099 FN(get_cgroup_classid), \ 2100 FN(skb_vlan_push), \ 2101 FN(skb_vlan_pop), \ 2102 FN(skb_get_tunnel_key), \ 2103 FN(skb_set_tunnel_key), \ 2104 FN(perf_event_read), \ 2105 FN(redirect), \ 2106 FN(get_route_realm), \ 2107 FN(perf_event_output), \ 2108 FN(skb_load_bytes), \ 2109 FN(get_stackid), \ 2110 FN(csum_diff), \ 2111 FN(skb_get_tunnel_opt), \ 2112 FN(skb_set_tunnel_opt), \ 2113 FN(skb_change_proto), \ 2114 FN(skb_change_type), \ 2115 FN(skb_under_cgroup), \ 2116 FN(get_hash_recalc), \ 2117 FN(get_current_task), \ 2118 FN(probe_write_user), \ 2119 FN(current_task_under_cgroup), \ 2120 FN(skb_change_tail), \ 2121 FN(skb_pull_data), \ 2122 FN(csum_update), \ 2123 FN(set_hash_invalid), \ 2124 FN(get_numa_node_id), \ 2125 FN(skb_change_head), \ 2126 FN(xdp_adjust_head), \ 2127 FN(probe_read_str), \ 2128 FN(get_socket_cookie), \ 2129 FN(get_socket_uid), \ 2130 FN(set_hash), \ 2131 FN(setsockopt), \ 2132 FN(skb_adjust_room), \ 2133 FN(redirect_map), \ 2134 FN(sk_redirect_map), \ 2135 FN(sock_map_update), \ 2136 FN(xdp_adjust_meta), \ 2137 FN(perf_event_read_value), \ 2138 FN(perf_prog_read_value), \ 2139 FN(getsockopt), \ 2140 FN(override_return), \ 2141 FN(sock_ops_cb_flags_set), \ 2142 FN(msg_redirect_map), \ 2143 FN(msg_apply_bytes), \ 2144 FN(msg_cork_bytes), \ 2145 FN(msg_pull_data), \ 2146 FN(bind), \ 2147 FN(xdp_adjust_tail), \ 2148 FN(skb_get_xfrm_state), \ 2149 FN(get_stack), \ 2150 FN(skb_load_bytes_relative), \ 2151 FN(fib_lookup), \ 2152 FN(sock_hash_update), \ 2153 FN(msg_redirect_hash), \ 2154 FN(sk_redirect_hash), \ 2155 FN(lwt_push_encap), \ 2156 FN(lwt_seg6_store_bytes), \ 2157 FN(lwt_seg6_adjust_srh), \ 2158 FN(lwt_seg6_action), \ 2159 FN(rc_repeat), \ 2160 FN(rc_keydown), \ 2161 FN(skb_cgroup_id), \ 2162 FN(get_current_cgroup_id), 2163 2164/* integer value in 'imm' field of BPF_CALL instruction selects which helper 2165 * function eBPF program intends to call 2166 */ 2167#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 2168enum bpf_func_id { 2169 __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 2170 __BPF_FUNC_MAX_ID, 2171}; 2172#undef __BPF_ENUM_FN 2173 2174/* All flags used by eBPF helper functions, placed here. */ 2175 2176/* BPF_FUNC_skb_store_bytes flags. */ 2177#define BPF_F_RECOMPUTE_CSUM (1ULL << 0) 2178#define BPF_F_INVALIDATE_HASH (1ULL << 1) 2179 2180/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 2181 * First 4 bits are for passing the header field size. 2182 */ 2183#define BPF_F_HDR_FIELD_MASK 0xfULL 2184 2185/* BPF_FUNC_l4_csum_replace flags. */ 2186#define BPF_F_PSEUDO_HDR (1ULL << 4) 2187#define BPF_F_MARK_MANGLED_0 (1ULL << 5) 2188#define BPF_F_MARK_ENFORCE (1ULL << 6) 2189 2190/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 2191#define BPF_F_INGRESS (1ULL << 0) 2192 2193/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 2194#define BPF_F_TUNINFO_IPV6 (1ULL << 0) 2195 2196/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ 2197#define BPF_F_SKIP_FIELD_MASK 0xffULL 2198#define BPF_F_USER_STACK (1ULL << 8) 2199/* flags used by BPF_FUNC_get_stackid only. */ 2200#define BPF_F_FAST_STACK_CMP (1ULL << 9) 2201#define BPF_F_REUSE_STACKID (1ULL << 10) 2202/* flags used by BPF_FUNC_get_stack only. */ 2203#define BPF_F_USER_BUILD_ID (1ULL << 11) 2204 2205/* BPF_FUNC_skb_set_tunnel_key flags. */ 2206#define BPF_F_ZERO_CSUM_TX (1ULL << 1) 2207#define BPF_F_DONT_FRAGMENT (1ULL << 2) 2208#define BPF_F_SEQ_NUMBER (1ULL << 3) 2209 2210/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 2211 * BPF_FUNC_perf_event_read_value flags. 2212 */ 2213#define BPF_F_INDEX_MASK 0xffffffffULL 2214#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 2215/* BPF_FUNC_perf_event_output for sk_buff input context. */ 2216#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) 2217 2218/* Mode for BPF_FUNC_skb_adjust_room helper. */ 2219enum bpf_adj_room_mode { 2220 BPF_ADJ_ROOM_NET, 2221}; 2222 2223/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ 2224enum bpf_hdr_start_off { 2225 BPF_HDR_START_MAC, 2226 BPF_HDR_START_NET, 2227}; 2228 2229/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ 2230enum bpf_lwt_encap_mode { 2231 BPF_LWT_ENCAP_SEG6, 2232 BPF_LWT_ENCAP_SEG6_INLINE 2233}; 2234 2235/* user accessible mirror of in-kernel sk_buff. 2236 * new fields can only be added to the end of this structure 2237 */ 2238struct __sk_buff { 2239 __u32 len; 2240 __u32 pkt_type; 2241 __u32 mark; 2242 __u32 queue_mapping; 2243 __u32 protocol; 2244 __u32 vlan_present; 2245 __u32 vlan_tci; 2246 __u32 vlan_proto; 2247 __u32 priority; 2248 __u32 ingress_ifindex; 2249 __u32 ifindex; 2250 __u32 tc_index; 2251 __u32 cb[5]; 2252 __u32 hash; 2253 __u32 tc_classid; 2254 __u32 data; 2255 __u32 data_end; 2256 __u32 napi_id; 2257 2258 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ 2259 __u32 family; 2260 __u32 remote_ip4; /* Stored in network byte order */ 2261 __u32 local_ip4; /* Stored in network byte order */ 2262 __u32 remote_ip6[4]; /* Stored in network byte order */ 2263 __u32 local_ip6[4]; /* Stored in network byte order */ 2264 __u32 remote_port; /* Stored in network byte order */ 2265 __u32 local_port; /* stored in host byte order */ 2266 /* ... here. */ 2267 2268 __u32 data_meta; 2269}; 2270 2271struct bpf_tunnel_key { 2272 __u32 tunnel_id; 2273 union { 2274 __u32 remote_ipv4; 2275 __u32 remote_ipv6[4]; 2276 }; 2277 __u8 tunnel_tos; 2278 __u8 tunnel_ttl; 2279 __u16 tunnel_ext; /* Padding, future use. */ 2280 __u32 tunnel_label; 2281}; 2282 2283/* user accessible mirror of in-kernel xfrm_state. 2284 * new fields can only be added to the end of this structure 2285 */ 2286struct bpf_xfrm_state { 2287 __u32 reqid; 2288 __u32 spi; /* Stored in network byte order */ 2289 __u16 family; 2290 __u16 ext; /* Padding, future use. */ 2291 union { 2292 __u32 remote_ipv4; /* Stored in network byte order */ 2293 __u32 remote_ipv6[4]; /* Stored in network byte order */ 2294 }; 2295}; 2296 2297/* Generic BPF return codes which all BPF program types may support. 2298 * The values are binary compatible with their TC_ACT_* counter-part to 2299 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 2300 * programs. 2301 * 2302 * XDP is handled seprately, see XDP_*. 2303 */ 2304enum bpf_ret_code { 2305 BPF_OK = 0, 2306 /* 1 reserved */ 2307 BPF_DROP = 2, 2308 /* 3-6 reserved */ 2309 BPF_REDIRECT = 7, 2310 /* >127 are reserved for prog type specific return codes */ 2311}; 2312 2313struct bpf_sock { 2314 __u32 bound_dev_if; 2315 __u32 family; 2316 __u32 type; 2317 __u32 protocol; 2318 __u32 mark; 2319 __u32 priority; 2320 __u32 src_ip4; /* Allows 1,2,4-byte read. 2321 * Stored in network byte order. 2322 */ 2323 __u32 src_ip6[4]; /* Allows 1,2,4-byte read. 2324 * Stored in network byte order. 2325 */ 2326 __u32 src_port; /* Allows 4-byte read. 2327 * Stored in host byte order 2328 */ 2329}; 2330 2331#define XDP_PACKET_HEADROOM 256 2332 2333/* User return codes for XDP prog type. 2334 * A valid XDP program must return one of these defined values. All other 2335 * return codes are reserved for future use. Unknown return codes will 2336 * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). 2337 */ 2338enum xdp_action { 2339 XDP_ABORTED = 0, 2340 XDP_DROP, 2341 XDP_PASS, 2342 XDP_TX, 2343 XDP_REDIRECT, 2344}; 2345 2346/* user accessible metadata for XDP packet hook 2347 * new fields must be added to the end of this structure 2348 */ 2349struct xdp_md { 2350 __u32 data; 2351 __u32 data_end; 2352 __u32 data_meta; 2353 /* Below access go through struct xdp_rxq_info */ 2354 __u32 ingress_ifindex; /* rxq->dev->ifindex */ 2355 __u32 rx_queue_index; /* rxq->queue_index */ 2356}; 2357 2358enum sk_action { 2359 SK_DROP = 0, 2360 SK_PASS, 2361}; 2362 2363/* user accessible metadata for SK_MSG packet hook, new fields must 2364 * be added to the end of this structure 2365 */ 2366struct sk_msg_md { 2367 void *data; 2368 void *data_end; 2369 2370 __u32 family; 2371 __u32 remote_ip4; /* Stored in network byte order */ 2372 __u32 local_ip4; /* Stored in network byte order */ 2373 __u32 remote_ip6[4]; /* Stored in network byte order */ 2374 __u32 local_ip6[4]; /* Stored in network byte order */ 2375 __u32 remote_port; /* Stored in network byte order */ 2376 __u32 local_port; /* stored in host byte order */ 2377}; 2378 2379#define BPF_TAG_SIZE 8 2380 2381struct bpf_prog_info { 2382 __u32 type; 2383 __u32 id; 2384 __u8 tag[BPF_TAG_SIZE]; 2385 __u32 jited_prog_len; 2386 __u32 xlated_prog_len; 2387 __aligned_u64 jited_prog_insns; 2388 __aligned_u64 xlated_prog_insns; 2389 __u64 load_time; /* ns since boottime */ 2390 __u32 created_by_uid; 2391 __u32 nr_map_ids; 2392 __aligned_u64 map_ids; 2393 char name[BPF_OBJ_NAME_LEN]; 2394 __u32 ifindex; 2395 __u32 gpl_compatible:1; 2396 __u64 netns_dev; 2397 __u64 netns_ino; 2398 __u32 nr_jited_ksyms; 2399 __u32 nr_jited_func_lens; 2400 __aligned_u64 jited_ksyms; 2401 __aligned_u64 jited_func_lens; 2402} __attribute__((aligned(8))); 2403 2404struct bpf_map_info { 2405 __u32 type; 2406 __u32 id; 2407 __u32 key_size; 2408 __u32 value_size; 2409 __u32 max_entries; 2410 __u32 map_flags; 2411 char name[BPF_OBJ_NAME_LEN]; 2412 __u32 ifindex; 2413 __u32 :32; 2414 __u64 netns_dev; 2415 __u64 netns_ino; 2416 __u32 btf_id; 2417 __u32 btf_key_type_id; 2418 __u32 btf_value_type_id; 2419} __attribute__((aligned(8))); 2420 2421struct bpf_btf_info { 2422 __aligned_u64 btf; 2423 __u32 btf_size; 2424 __u32 id; 2425} __attribute__((aligned(8))); 2426 2427/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed 2428 * by user and intended to be used by socket (e.g. to bind to, depends on 2429 * attach attach type). 2430 */ 2431struct bpf_sock_addr { 2432 __u32 user_family; /* Allows 4-byte read, but no write. */ 2433 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. 2434 * Stored in network byte order. 2435 */ 2436 __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. 2437 * Stored in network byte order. 2438 */ 2439 __u32 user_port; /* Allows 4-byte read and write. 2440 * Stored in network byte order 2441 */ 2442 __u32 family; /* Allows 4-byte read, but no write */ 2443 __u32 type; /* Allows 4-byte read, but no write */ 2444 __u32 protocol; /* Allows 4-byte read, but no write */ 2445 __u32 msg_src_ip4; /* Allows 1,2,4-byte read an 4-byte write. 2446 * Stored in network byte order. 2447 */ 2448 __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. 2449 * Stored in network byte order. 2450 */ 2451}; 2452 2453/* User bpf_sock_ops struct to access socket values and specify request ops 2454 * and their replies. 2455 * Some of this fields are in network (bigendian) byte order and may need 2456 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). 2457 * New fields can only be added at the end of this structure 2458 */ 2459struct bpf_sock_ops { 2460 __u32 op; 2461 union { 2462 __u32 args[4]; /* Optionally passed to bpf program */ 2463 __u32 reply; /* Returned by bpf program */ 2464 __u32 replylong[4]; /* Optionally returned by bpf prog */ 2465 }; 2466 __u32 family; 2467 __u32 remote_ip4; /* Stored in network byte order */ 2468 __u32 local_ip4; /* Stored in network byte order */ 2469 __u32 remote_ip6[4]; /* Stored in network byte order */ 2470 __u32 local_ip6[4]; /* Stored in network byte order */ 2471 __u32 remote_port; /* Stored in network byte order */ 2472 __u32 local_port; /* stored in host byte order */ 2473 __u32 is_fullsock; /* Some TCP fields are only valid if 2474 * there is a full socket. If not, the 2475 * fields read as zero. 2476 */ 2477 __u32 snd_cwnd; 2478 __u32 srtt_us; /* Averaged RTT << 3 in usecs */ 2479 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ 2480 __u32 state; 2481 __u32 rtt_min; 2482 __u32 snd_ssthresh; 2483 __u32 rcv_nxt; 2484 __u32 snd_nxt; 2485 __u32 snd_una; 2486 __u32 mss_cache; 2487 __u32 ecn_flags; 2488 __u32 rate_delivered; 2489 __u32 rate_interval_us; 2490 __u32 packets_out; 2491 __u32 retrans_out; 2492 __u32 total_retrans; 2493 __u32 segs_in; 2494 __u32 data_segs_in; 2495 __u32 segs_out; 2496 __u32 data_segs_out; 2497 __u32 lost_out; 2498 __u32 sacked_out; 2499 __u32 sk_txhash; 2500 __u64 bytes_received; 2501 __u64 bytes_acked; 2502}; 2503 2504/* Definitions for bpf_sock_ops_cb_flags */ 2505#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) 2506#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) 2507#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) 2508#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently 2509 * supported cb flags 2510 */ 2511 2512/* List of known BPF sock_ops operators. 2513 * New entries can only be added at the end 2514 */ 2515enum { 2516 BPF_SOCK_OPS_VOID, 2517 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or 2518 * -1 if default value should be used 2519 */ 2520 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized 2521 * window (in packets) or -1 if default 2522 * value should be used 2523 */ 2524 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an 2525 * active connection is initialized 2526 */ 2527 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an 2528 * active connection is 2529 * established 2530 */ 2531 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a 2532 * passive connection is 2533 * established 2534 */ 2535 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 2536 * needs ECN 2537 */ 2538 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is 2539 * based on the path and may be 2540 * dependent on the congestion control 2541 * algorithm. In general it indicates 2542 * a congestion threshold. RTTs above 2543 * this indicate congestion 2544 */ 2545 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. 2546 * Arg1: value of icsk_retransmits 2547 * Arg2: value of icsk_rto 2548 * Arg3: whether RTO has expired 2549 */ 2550 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. 2551 * Arg1: sequence number of 1st byte 2552 * Arg2: # segments 2553 * Arg3: return value of 2554 * tcp_transmit_skb (0 => success) 2555 */ 2556 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. 2557 * Arg1: old_state 2558 * Arg2: new_state 2559 */ 2560}; 2561 2562/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect 2563 * changes between the TCP and BPF versions. Ideally this should never happen. 2564 * If it does, we need to add code to convert them before calling 2565 * the BPF sock_ops function. 2566 */ 2567enum { 2568 BPF_TCP_ESTABLISHED = 1, 2569 BPF_TCP_SYN_SENT, 2570 BPF_TCP_SYN_RECV, 2571 BPF_TCP_FIN_WAIT1, 2572 BPF_TCP_FIN_WAIT2, 2573 BPF_TCP_TIME_WAIT, 2574 BPF_TCP_CLOSE, 2575 BPF_TCP_CLOSE_WAIT, 2576 BPF_TCP_LAST_ACK, 2577 BPF_TCP_LISTEN, 2578 BPF_TCP_CLOSING, /* Now a valid state */ 2579 BPF_TCP_NEW_SYN_RECV, 2580 2581 BPF_TCP_MAX_STATES /* Leave at the end! */ 2582}; 2583 2584#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ 2585#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ 2586 2587struct bpf_perf_event_value { 2588 __u64 counter; 2589 __u64 enabled; 2590 __u64 running; 2591}; 2592 2593#define BPF_DEVCG_ACC_MKNOD (1ULL << 0) 2594#define BPF_DEVCG_ACC_READ (1ULL << 1) 2595#define BPF_DEVCG_ACC_WRITE (1ULL << 2) 2596 2597#define BPF_DEVCG_DEV_BLOCK (1ULL << 0) 2598#define BPF_DEVCG_DEV_CHAR (1ULL << 1) 2599 2600struct bpf_cgroup_dev_ctx { 2601 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ 2602 __u32 access_type; 2603 __u32 major; 2604 __u32 minor; 2605}; 2606 2607struct bpf_raw_tracepoint_args { 2608 __u64 args[0]; 2609}; 2610 2611/* DIRECT: Skip the FIB rules and go to FIB table associated with device 2612 * OUTPUT: Do lookup from egress perspective; default is ingress 2613 */ 2614#define BPF_FIB_LOOKUP_DIRECT BIT(0) 2615#define BPF_FIB_LOOKUP_OUTPUT BIT(1) 2616 2617enum { 2618 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ 2619 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ 2620 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ 2621 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ 2622 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ 2623 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ 2624 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ 2625 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ 2626 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ 2627}; 2628 2629struct bpf_fib_lookup { 2630 /* input: network family for lookup (AF_INET, AF_INET6) 2631 * output: network family of egress nexthop 2632 */ 2633 __u8 family; 2634 2635 /* set if lookup is to consider L4 data - e.g., FIB rules */ 2636 __u8 l4_protocol; 2637 __be16 sport; 2638 __be16 dport; 2639 2640 /* total length of packet from network header - used for MTU check */ 2641 __u16 tot_len; 2642 2643 /* input: L3 device index for lookup 2644 * output: device index from FIB lookup 2645 */ 2646 __u32 ifindex; 2647 2648 union { 2649 /* inputs to lookup */ 2650 __u8 tos; /* AF_INET */ 2651 __be32 flowinfo; /* AF_INET6, flow_label + priority */ 2652 2653 /* output: metric of fib result (IPv4/IPv6 only) */ 2654 __u32 rt_metric; 2655 }; 2656 2657 union { 2658 __be32 ipv4_src; 2659 __u32 ipv6_src[4]; /* in6_addr; network order */ 2660 }; 2661 2662 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in 2663 * network header. output: bpf_fib_lookup sets to gateway address 2664 * if FIB lookup returns gateway route 2665 */ 2666 union { 2667 __be32 ipv4_dst; 2668 __u32 ipv6_dst[4]; /* in6_addr; network order */ 2669 }; 2670 2671 /* output */ 2672 __be16 h_vlan_proto; 2673 __be16 h_vlan_TCI; 2674 __u8 smac[6]; /* ETH_ALEN */ 2675 __u8 dmac[6]; /* ETH_ALEN */ 2676}; 2677 2678enum bpf_task_fd_type { 2679 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ 2680 BPF_FD_TYPE_TRACEPOINT, /* tp name */ 2681 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ 2682 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ 2683 BPF_FD_TYPE_UPROBE, /* filename + offset */ 2684 BPF_FD_TYPE_URETPROBE, /* filename + offset */ 2685}; 2686 2687#endif /* _UAPI__LINUX_BPF_H__ */ 2688