1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 */ 8#ifndef _UAPI__LINUX_BPF_H__ 9#define _UAPI__LINUX_BPF_H__ 10 11#include <linux/types.h> 12#include <linux/bpf_common.h> 13 14/* Extended instruction set based on top of classic BPF */ 15 16/* instruction classes */ 17#define BPF_JMP32 0x06 /* jmp mode in word width */ 18#define BPF_ALU64 0x07 /* alu mode in double word width */ 19 20/* ld/ldx fields */ 21#define BPF_DW 0x18 /* double word (64-bit) */ 22#define BPF_XADD 0xc0 /* exclusive add */ 23 24/* alu/jmp fields */ 25#define BPF_MOV 0xb0 /* mov reg to reg */ 26#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 27 28/* change endianness of a register */ 29#define BPF_END 0xd0 /* flags for endianness conversion: */ 30#define BPF_TO_LE 0x00 /* convert to little-endian */ 31#define BPF_TO_BE 0x08 /* convert to big-endian */ 32#define BPF_FROM_LE BPF_TO_LE 33#define BPF_FROM_BE BPF_TO_BE 34 35/* jmp encodings */ 36#define BPF_JNE 0x50 /* jump != */ 37#define BPF_JLT 0xa0 /* LT is unsigned, '<' */ 38#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ 39#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 40#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 41#define BPF_JSLT 0xc0 /* SLT is signed, '<' */ 42#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ 43#define BPF_CALL 0x80 /* function call */ 44#define BPF_EXIT 0x90 /* function return */ 45 46/* Register numbers */ 47enum { 48 BPF_REG_0 = 0, 49 BPF_REG_1, 50 BPF_REG_2, 51 BPF_REG_3, 52 BPF_REG_4, 53 BPF_REG_5, 54 BPF_REG_6, 55 BPF_REG_7, 56 BPF_REG_8, 57 BPF_REG_9, 58 BPF_REG_10, 59 __MAX_BPF_REG, 60}; 61 62/* BPF has 10 general purpose 64-bit registers and stack frame. */ 63#define MAX_BPF_REG __MAX_BPF_REG 64 65struct bpf_insn { 66 __u8 code; /* opcode */ 67 __u8 dst_reg:4; /* dest register */ 68 __u8 src_reg:4; /* source register */ 69 __s16 off; /* signed offset */ 70 __s32 imm; /* signed immediate constant */ 71}; 72 73/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 74struct bpf_lpm_trie_key { 75 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 76 __u8 data[0]; /* Arbitrary size */ 77}; 78 79struct bpf_cgroup_storage_key { 80 __u64 cgroup_inode_id; /* cgroup inode id */ 81 __u32 attach_type; /* program attach type */ 82}; 83 84/* BPF syscall commands, see bpf(2) man-page for details. */ 85enum bpf_cmd { 86 BPF_MAP_CREATE, 87 BPF_MAP_LOOKUP_ELEM, 88 BPF_MAP_UPDATE_ELEM, 89 BPF_MAP_DELETE_ELEM, 90 BPF_MAP_GET_NEXT_KEY, 91 BPF_PROG_LOAD, 92 BPF_OBJ_PIN, 93 BPF_OBJ_GET, 94 BPF_PROG_ATTACH, 95 BPF_PROG_DETACH, 96 BPF_PROG_TEST_RUN, 97 BPF_PROG_GET_NEXT_ID, 98 BPF_MAP_GET_NEXT_ID, 99 BPF_PROG_GET_FD_BY_ID, 100 BPF_MAP_GET_FD_BY_ID, 101 BPF_OBJ_GET_INFO_BY_FD, 102 BPF_PROG_QUERY, 103 BPF_RAW_TRACEPOINT_OPEN, 104 BPF_BTF_LOAD, 105 BPF_BTF_GET_FD_BY_ID, 106 BPF_TASK_FD_QUERY, 107 BPF_MAP_LOOKUP_AND_DELETE_ELEM, 108 BPF_MAP_FREEZE, 109 BPF_BTF_GET_NEXT_ID, 110 BPF_MAP_LOOKUP_BATCH, 111 BPF_MAP_LOOKUP_AND_DELETE_BATCH, 112 BPF_MAP_UPDATE_BATCH, 113 BPF_MAP_DELETE_BATCH, 114 BPF_LINK_CREATE, 115 BPF_LINK_UPDATE, 116}; 117 118enum bpf_map_type { 119 BPF_MAP_TYPE_UNSPEC, 120 BPF_MAP_TYPE_HASH, 121 BPF_MAP_TYPE_ARRAY, 122 BPF_MAP_TYPE_PROG_ARRAY, 123 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 124 BPF_MAP_TYPE_PERCPU_HASH, 125 BPF_MAP_TYPE_PERCPU_ARRAY, 126 BPF_MAP_TYPE_STACK_TRACE, 127 BPF_MAP_TYPE_CGROUP_ARRAY, 128 BPF_MAP_TYPE_LRU_HASH, 129 BPF_MAP_TYPE_LRU_PERCPU_HASH, 130 BPF_MAP_TYPE_LPM_TRIE, 131 BPF_MAP_TYPE_ARRAY_OF_MAPS, 132 BPF_MAP_TYPE_HASH_OF_MAPS, 133 BPF_MAP_TYPE_DEVMAP, 134 BPF_MAP_TYPE_SOCKMAP, 135 BPF_MAP_TYPE_CPUMAP, 136 BPF_MAP_TYPE_XSKMAP, 137 BPF_MAP_TYPE_SOCKHASH, 138#ifndef __GENKSYMS__ 139 BPF_MAP_TYPE_CGROUP_STORAGE, 140 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, 141 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, 142 BPF_MAP_TYPE_QUEUE, 143 BPF_MAP_TYPE_STACK, 144 BPF_MAP_TYPE_SK_STORAGE, 145 BPF_MAP_TYPE_DEVMAP_HASH, 146 BPF_MAP_TYPE_STRUCT_OPS, 147#endif /* __GENKSYMS__ */ 148}; 149 150/* Note that tracing related programs such as 151 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} 152 * are not subject to a stable API since kernel internal data 153 * structures can change from release to release and may 154 * therefore break existing tracing BPF programs. Tracing BPF 155 * programs correspond to /a/ specific kernel which is to be 156 * analyzed, and not /a/ specific kernel /and/ all future ones. 157 */ 158enum bpf_prog_type { 159 BPF_PROG_TYPE_UNSPEC, 160 BPF_PROG_TYPE_SOCKET_FILTER, 161 BPF_PROG_TYPE_KPROBE, 162 BPF_PROG_TYPE_SCHED_CLS, 163 BPF_PROG_TYPE_SCHED_ACT, 164 BPF_PROG_TYPE_TRACEPOINT, 165 BPF_PROG_TYPE_XDP, 166 BPF_PROG_TYPE_PERF_EVENT, 167 BPF_PROG_TYPE_CGROUP_SKB, 168 BPF_PROG_TYPE_CGROUP_SOCK, 169 BPF_PROG_TYPE_LWT_IN, 170 BPF_PROG_TYPE_LWT_OUT, 171 BPF_PROG_TYPE_LWT_XMIT, 172 BPF_PROG_TYPE_SOCK_OPS, 173 BPF_PROG_TYPE_SK_SKB, 174 BPF_PROG_TYPE_CGROUP_DEVICE, 175 BPF_PROG_TYPE_SK_MSG, 176 BPF_PROG_TYPE_RAW_TRACEPOINT, 177 BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 178 BPF_PROG_TYPE_LWT_SEG6LOCAL, 179 BPF_PROG_TYPE_LIRC_MODE2, 180#ifndef __GENKSYMS__ 181 BPF_PROG_TYPE_SK_REUSEPORT, 182 BPF_PROG_TYPE_FLOW_DISSECTOR, 183 BPF_PROG_TYPE_CGROUP_SYSCTL, 184 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 185 BPF_PROG_TYPE_CGROUP_SOCKOPT, 186 BPF_PROG_TYPE_TRACING, 187 BPF_PROG_TYPE_STRUCT_OPS, 188 BPF_PROG_TYPE_EXT, 189 BPF_PROG_TYPE_LSM, 190#endif /* __GENKSYMS__ */ 191}; 192 193enum bpf_attach_type { 194 BPF_CGROUP_INET_INGRESS, 195 BPF_CGROUP_INET_EGRESS, 196 BPF_CGROUP_INET_SOCK_CREATE, 197 BPF_CGROUP_SOCK_OPS, 198 BPF_SK_SKB_STREAM_PARSER, 199 BPF_SK_SKB_STREAM_VERDICT, 200 BPF_CGROUP_DEVICE, 201 BPF_SK_MSG_VERDICT, 202 BPF_CGROUP_INET4_BIND, 203 BPF_CGROUP_INET6_BIND, 204 BPF_CGROUP_INET4_CONNECT, 205 BPF_CGROUP_INET6_CONNECT, 206 BPF_CGROUP_INET4_POST_BIND, 207 BPF_CGROUP_INET6_POST_BIND, 208 BPF_CGROUP_UDP4_SENDMSG, 209 BPF_CGROUP_UDP6_SENDMSG, 210 BPF_LIRC_MODE2, 211#ifndef __GENKSYMS__ 212 BPF_FLOW_DISSECTOR, 213 BPF_CGROUP_SYSCTL, 214 BPF_CGROUP_UDP4_RECVMSG, 215 BPF_CGROUP_UDP6_RECVMSG, 216 BPF_CGROUP_GETSOCKOPT, 217 BPF_CGROUP_SETSOCKOPT, 218 BPF_TRACE_RAW_TP, 219 BPF_TRACE_FENTRY, 220 BPF_TRACE_FEXIT, 221 BPF_MODIFY_RETURN, 222 BPF_LSM_MAC, 223#endif /* __GENKSYMS__ */ 224 __MAX_BPF_ATTACH_TYPE 225}; 226 227#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 228 229/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command 230 * 231 * NONE(default): No further bpf programs allowed in the subtree. 232 * 233 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, 234 * the program in this cgroup yields to sub-cgroup program. 235 * 236 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, 237 * that cgroup program gets run in addition to the program in this cgroup. 238 * 239 * Only one program is allowed to be attached to a cgroup with 240 * NONE or BPF_F_ALLOW_OVERRIDE flag. 241 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will 242 * release old program and attach the new one. Attach flags has to match. 243 * 244 * Multiple programs are allowed to be attached to a cgroup with 245 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order 246 * (those that were attached first, run first) 247 * The programs of sub-cgroup are executed first, then programs of 248 * this cgroup and then programs of parent cgroup. 249 * When children program makes decision (like picking TCP CA or sock bind) 250 * parent program has a chance to override it. 251 * 252 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of 253 * programs for a cgroup. Though it's possible to replace an old program at 254 * any position by also specifying BPF_F_REPLACE flag and position itself in 255 * replace_bpf_fd attribute. Old program at this position will be released. 256 * 257 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. 258 * A cgroup with NONE doesn't allow any programs in sub-cgroups. 259 * Ex1: 260 * cgrp1 (MULTI progs A, B) -> 261 * cgrp2 (OVERRIDE prog C) -> 262 * cgrp3 (MULTI prog D) -> 263 * cgrp4 (OVERRIDE prog E) -> 264 * cgrp5 (NONE prog F) 265 * the event in cgrp5 triggers execution of F,D,A,B in that order. 266 * if prog F is detached, the execution is E,D,A,B 267 * if prog F and D are detached, the execution is E,A,B 268 * if prog F, E and D are detached, the execution is C,A,B 269 * 270 * All eligible programs are executed regardless of return code from 271 * earlier programs. 272 */ 273#define BPF_F_ALLOW_OVERRIDE (1U << 0) 274#define BPF_F_ALLOW_MULTI (1U << 1) 275#define BPF_F_REPLACE (1U << 2) 276 277/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 278 * verifier will perform strict alignment checking as if the kernel 279 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 280 * and NET_IP_ALIGN defined to 2. 281 */ 282#define BPF_F_STRICT_ALIGNMENT (1U << 0) 283 284/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the 285 * verifier will allow any alignment whatsoever. On platforms 286 * with strict alignment requirements for loads ands stores (such 287 * as sparc and mips) the verifier validates that all loads and 288 * stores provably follow this requirement. This flag turns that 289 * checking and enforcement off. 290 * 291 * It is mostly used for testing when we want to validate the 292 * context and memory access aspects of the verifier, but because 293 * of an unaligned access the alignment check would trigger before 294 * the one we are interested in. 295 */ 296#define BPF_F_ANY_ALIGNMENT (1U << 1) 297 298/* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. 299 * Verifier does sub-register def/use analysis and identifies instructions whose 300 * def only matters for low 32-bit, high 32-bit is never referenced later 301 * through implicit zero extension. Therefore verifier notifies JIT back-ends 302 * that it is safe to ignore clearing high 32-bit for these instructions. This 303 * saves some back-ends a lot of code-gen. However such optimization is not 304 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends 305 * hence hasn't used verifier's analysis result. But, we really want to have a 306 * way to be able to verify the correctness of the described optimization on 307 * x86_64 on which testsuites are frequently exercised. 308 * 309 * So, this flag is introduced. Once it is set, verifier will randomize high 310 * 32-bit for those instructions who has been identified as safe to ignore them. 311 * Then, if verifier is not doing correct analysis, such randomization will 312 * regress tests to expose bugs. 313 */ 314#define BPF_F_TEST_RND_HI32 (1U << 2) 315 316/* The verifier internal test flag. Behavior is undefined */ 317#define BPF_F_TEST_STATE_FREQ (1U << 3) 318 319/* When BPF ldimm64's insn[0].src_reg != 0 then this can have 320 * two extensions: 321 * 322 * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE 323 * insn[0].imm: map fd map fd 324 * insn[1].imm: 0 offset into value 325 * insn[0].off: 0 0 326 * insn[1].off: 0 0 327 * ldimm64 rewrite: address of map address of map[0]+offset 328 * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE 329 */ 330#define BPF_PSEUDO_MAP_FD 1 331#define BPF_PSEUDO_MAP_VALUE 2 332 333/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative 334 * offset to another bpf function 335 */ 336#define BPF_PSEUDO_CALL 1 337 338/* flags for BPF_MAP_UPDATE_ELEM command */ 339enum { 340 BPF_ANY = 0, /* create new element or update existing */ 341 BPF_NOEXIST = 1, /* create new element if it didn't exist */ 342 BPF_EXIST = 2, /* update existing element */ 343 BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ 344}; 345 346/* flags for BPF_MAP_CREATE command */ 347enum { 348 BPF_F_NO_PREALLOC = (1U << 0), 349/* Instead of having one common LRU list in the 350 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 351 * which can scale and perform better. 352 * Note, the LRU nodes (including free nodes) cannot be moved 353 * across different LRU lists. 354 */ 355 BPF_F_NO_COMMON_LRU = (1U << 1), 356/* Specify numa node during map creation */ 357 BPF_F_NUMA_NODE = (1U << 2), 358 359/* Flags for accessing BPF object from syscall side. */ 360 BPF_F_RDONLY = (1U << 3), 361 BPF_F_WRONLY = (1U << 4), 362 363/* Flag for stack_map, store build_id+offset instead of pointer */ 364 BPF_F_STACK_BUILD_ID = (1U << 5), 365 366/* Zero-initialize hash function seed. This should only be used for testing. */ 367 BPF_F_ZERO_SEED = (1U << 6), 368 369/* Flags for accessing BPF object from program side. */ 370 BPF_F_RDONLY_PROG = (1U << 7), 371 BPF_F_WRONLY_PROG = (1U << 8), 372 373/* Clone map from listener for newly accepted socket */ 374 BPF_F_CLONE = (1U << 9), 375 376/* Enable memory-mapping BPF map */ 377 BPF_F_MMAPABLE = (1U << 10), 378}; 379 380/* Flags for BPF_PROG_QUERY. */ 381 382/* Query effective (directly attached + inherited from ancestor cgroups) 383 * programs that will be executed for events within a cgroup. 384 * attach_flags with this flag are returned only for directly attached programs. 385 */ 386#define BPF_F_QUERY_EFFECTIVE (1U << 0) 387 388enum bpf_stack_build_id_status { 389 /* user space need an empty entry to identify end of a trace */ 390 BPF_STACK_BUILD_ID_EMPTY = 0, 391 /* with valid build_id and offset */ 392 BPF_STACK_BUILD_ID_VALID = 1, 393 /* couldn't get build_id, fallback to ip */ 394 BPF_STACK_BUILD_ID_IP = 2, 395}; 396 397#define BPF_BUILD_ID_SIZE 20 398struct bpf_stack_build_id { 399 __s32 status; 400 unsigned char build_id[BPF_BUILD_ID_SIZE]; 401 union { 402 __u64 offset; 403 __u64 ip; 404 }; 405}; 406 407#define BPF_OBJ_NAME_LEN 16U 408 409union bpf_attr { 410 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 411 __u32 map_type; /* one of enum bpf_map_type */ 412 __u32 key_size; /* size of key in bytes */ 413 __u32 value_size; /* size of value in bytes */ 414 __u32 max_entries; /* max number of entries in a map */ 415 __u32 map_flags; /* BPF_MAP_CREATE related 416 * flags defined above. 417 */ 418 __u32 inner_map_fd; /* fd pointing to the inner map */ 419 __u32 numa_node; /* numa node (effective only if 420 * BPF_F_NUMA_NODE is set). 421 */ 422 char map_name[BPF_OBJ_NAME_LEN]; 423 __u32 map_ifindex; /* ifindex of netdev to create on */ 424 __u32 btf_fd; /* fd pointing to a BTF type data */ 425 __u32 btf_key_type_id; /* BTF type_id of the key */ 426 __u32 btf_value_type_id; /* BTF type_id of the value */ 427#ifndef __GENKSYMS__ 428 __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- 429 * struct stored as the 430 * map value 431 */ 432#endif /* __GENKSYMS__ */ 433 }; 434 435 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 436 __u32 map_fd; 437 __aligned_u64 key; 438 union { 439 __aligned_u64 value; 440 __aligned_u64 next_key; 441 }; 442 __u64 flags; 443 }; 444 445#ifndef __GENKSYMS__ 446 struct { /* struct used by BPF_MAP_*_BATCH commands */ 447 __aligned_u64 in_batch; /* start batch, 448 * NULL to start from beginning 449 */ 450 __aligned_u64 out_batch; /* output: next start batch */ 451 __aligned_u64 keys; 452 __aligned_u64 values; 453 __u32 count; /* input/output: 454 * input: # of key/value 455 * elements 456 * output: # of filled elements 457 */ 458 __u32 map_fd; 459 __u64 elem_flags; 460 __u64 flags; 461 } batch; 462#endif /* __GENKSYMS__ */ 463 464 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 465 __u32 prog_type; /* one of enum bpf_prog_type */ 466 __u32 insn_cnt; 467 __aligned_u64 insns; 468 __aligned_u64 license; 469 __u32 log_level; /* verbosity level of verifier */ 470 __u32 log_size; /* size of user buffer */ 471 __aligned_u64 log_buf; /* user supplied buffer */ 472 __u32 kern_version; /* not used */ 473 __u32 prog_flags; 474 char prog_name[BPF_OBJ_NAME_LEN]; 475 __u32 prog_ifindex; /* ifindex of netdev to prep for */ 476 /* For some prog types expected attach type must be known at 477 * load time to verify attach type specific parts of prog 478 * (context accesses, allowed helpers, etc). 479 */ 480 __u32 expected_attach_type; 481#ifndef __GENKSYMS__ 482 __u32 prog_btf_fd; /* fd pointing to BTF type data */ 483 __u32 func_info_rec_size; /* userspace bpf_func_info size */ 484 __aligned_u64 func_info; /* func info */ 485 __u32 func_info_cnt; /* number of bpf_func_info records */ 486 __u32 line_info_rec_size; /* userspace bpf_line_info size */ 487 __aligned_u64 line_info; /* line info */ 488 __u32 line_info_cnt; /* number of bpf_line_info records */ 489 __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 490 __u32 attach_prog_fd; /* 0 to attach to vmlinux */ 491#endif /* __GENKSYMS__ */ 492 }; 493 494 struct { /* anonymous struct used by BPF_OBJ_* commands */ 495 __aligned_u64 pathname; 496 __u32 bpf_fd; 497 __u32 file_flags; 498 }; 499 500 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 501 __u32 target_fd; /* container object to attach to */ 502 __u32 attach_bpf_fd; /* eBPF program to attach */ 503 __u32 attach_type; 504 __u32 attach_flags; 505#ifndef __GENKSYMS__ 506 __u32 replace_bpf_fd; /* previously attached eBPF 507 * program to replace if 508 * BPF_F_REPLACE is used 509 */ 510#endif /* __GENKSYMS__ */ 511 }; 512 513 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 514 __u32 prog_fd; 515 __u32 retval; 516 __u32 data_size_in; /* input: len of data_in */ 517 __u32 data_size_out; /* input/output: len of data_out 518 * returns ENOSPC if data_out 519 * is too small. 520 */ 521 __aligned_u64 data_in; 522 __aligned_u64 data_out; 523 __u32 repeat; 524 __u32 duration; 525#ifndef __GENKSYMS__ 526 __u32 ctx_size_in; /* input: len of ctx_in */ 527 __u32 ctx_size_out; /* input/output: len of ctx_out 528 * returns ENOSPC if ctx_out 529 * is too small. 530 */ 531 __aligned_u64 ctx_in; 532 __aligned_u64 ctx_out; 533#endif /* __GENKSYMS__ */ 534 } test; 535 536 struct { /* anonymous struct used by BPF_*_GET_*_ID */ 537 union { 538 __u32 start_id; 539 __u32 prog_id; 540 __u32 map_id; 541 __u32 btf_id; 542 }; 543 __u32 next_id; 544 __u32 open_flags; 545 }; 546 547 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 548 __u32 bpf_fd; 549 __u32 info_len; 550 __aligned_u64 info; 551 } info; 552 553 struct { /* anonymous struct used by BPF_PROG_QUERY command */ 554 __u32 target_fd; /* container object to query */ 555 __u32 attach_type; 556 __u32 query_flags; 557 __u32 attach_flags; 558 __aligned_u64 prog_ids; 559 __u32 prog_cnt; 560 } query; 561 562 struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ 563 __u64 name; 564 __u32 prog_fd; 565 } raw_tracepoint; 566 567 struct { /* anonymous struct for BPF_BTF_LOAD */ 568 __aligned_u64 btf; 569 __aligned_u64 btf_log_buf; 570 __u32 btf_size; 571 __u32 btf_log_size; 572 __u32 btf_log_level; 573 }; 574 575 struct { 576 __u32 pid; /* input: pid */ 577 __u32 fd; /* input: fd */ 578 __u32 flags; /* input: flags */ 579 __u32 buf_len; /* input/output: buf len */ 580 __aligned_u64 buf; /* input/output: 581 * tp_name for tracepoint 582 * symbol for kprobe 583 * filename for uprobe 584 */ 585 __u32 prog_id; /* output: prod_id */ 586 __u32 fd_type; /* output: BPF_FD_TYPE_* */ 587 __u64 probe_offset; /* output: probe_offset */ 588 __u64 probe_addr; /* output: probe_addr */ 589 } task_fd_query; 590#ifndef __GENKSYMS__ 591 592 struct { /* struct used by BPF_LINK_CREATE command */ 593 __u32 prog_fd; /* eBPF program to attach */ 594 __u32 target_fd; /* object to attach to */ 595 __u32 attach_type; /* attach type */ 596 __u32 flags; /* extra flags */ 597 } link_create; 598 599 struct { /* struct used by BPF_LINK_UPDATE command */ 600 __u32 link_fd; /* link fd */ 601 /* new program fd to update link with */ 602 __u32 new_prog_fd; 603 __u32 flags; /* extra flags */ 604 /* expected link's program fd; is specified only if 605 * BPF_F_REPLACE flag is set in flags */ 606 __u32 old_prog_fd; 607 } link_update; 608 609#endif /* __GENKSYMS__ */ 610} __attribute__((aligned(8))); 611 612/* The description below is an attempt at providing documentation to eBPF 613 * developers about the multiple available eBPF helper functions. It can be 614 * parsed and used to produce a manual page. The workflow is the following, 615 * and requires the rst2man utility: 616 * 617 * $ ./scripts/bpf_helpers_doc.py \ 618 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst 619 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 620 * $ man /tmp/bpf-helpers.7 621 * 622 * Note that in order to produce this external documentation, some RST 623 * formatting is used in the descriptions to get "bold" and "italics" in 624 * manual pages. Also note that the few trailing white spaces are 625 * intentional, removing them would break paragraphs for rst2man. 626 * 627 * Start of BPF helper function descriptions: 628 * 629 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) 630 * Description 631 * Perform a lookup in *map* for an entry associated to *key*. 632 * Return 633 * Map value associated to *key*, or **NULL** if no entry was 634 * found. 635 * 636 * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) 637 * Description 638 * Add or update the value of the entry associated to *key* in 639 * *map* with *value*. *flags* is one of: 640 * 641 * **BPF_NOEXIST** 642 * The entry for *key* must not exist in the map. 643 * **BPF_EXIST** 644 * The entry for *key* must already exist in the map. 645 * **BPF_ANY** 646 * No condition on the existence of the entry for *key*. 647 * 648 * Flag value **BPF_NOEXIST** cannot be used for maps of types 649 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all 650 * elements always exist), the helper would return an error. 651 * Return 652 * 0 on success, or a negative error in case of failure. 653 * 654 * int bpf_map_delete_elem(struct bpf_map *map, const void *key) 655 * Description 656 * Delete entry with *key* from *map*. 657 * Return 658 * 0 on success, or a negative error in case of failure. 659 * 660 * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) 661 * Description 662 * For tracing programs, safely attempt to read *size* bytes from 663 * kernel space address *unsafe_ptr* and store the data in *dst*. 664 * 665 * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel() 666 * instead. 667 * Return 668 * 0 on success, or a negative error in case of failure. 669 * 670 * u64 bpf_ktime_get_ns(void) 671 * Description 672 * Return the time elapsed since system boot, in nanoseconds. 673 * Return 674 * Current *ktime*. 675 * 676 * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) 677 * Description 678 * This helper is a "printk()-like" facility for debugging. It 679 * prints a message defined by format *fmt* (of size *fmt_size*) 680 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if 681 * available. It can take up to three additional **u64** 682 * arguments (as an eBPF helpers, the total number of arguments is 683 * limited to five). 684 * 685 * Each time the helper is called, it appends a line to the trace. 686 * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is 687 * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. 688 * The format of the trace is customizable, and the exact output 689 * one will get depends on the options set in 690 * *\/sys/kernel/debug/tracing/trace_options* (see also the 691 * *README* file under the same directory). However, it usually 692 * defaults to something like: 693 * 694 * :: 695 * 696 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> 697 * 698 * In the above: 699 * 700 * * ``telnet`` is the name of the current task. 701 * * ``470`` is the PID of the current task. 702 * * ``001`` is the CPU number on which the task is 703 * running. 704 * * In ``.N..``, each character refers to a set of 705 * options (whether irqs are enabled, scheduling 706 * options, whether hard/softirqs are running, level of 707 * preempt_disabled respectively). **N** means that 708 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** 709 * are set. 710 * * ``419421.045894`` is a timestamp. 711 * * ``0x00000001`` is a fake value used by BPF for the 712 * instruction pointer register. 713 * * ``<formatted msg>`` is the message formatted with 714 * *fmt*. 715 * 716 * The conversion specifiers supported by *fmt* are similar, but 717 * more limited than for printk(). They are **%d**, **%i**, 718 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, 719 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size 720 * of field, padding with zeroes, etc.) is available, and the 721 * helper will return **-EINVAL** (but print nothing) if it 722 * encounters an unknown specifier. 723 * 724 * Also, note that **bpf_trace_printk**\ () is slow, and should 725 * only be used for debugging purposes. For this reason, a notice 726 * bloc (spanning several lines) is printed to kernel logs and 727 * states that the helper should not be used "for production use" 728 * the first time this helper is used (or more precisely, when 729 * **trace_printk**\ () buffers are allocated). For passing values 730 * to user space, perf events should be preferred. 731 * Return 732 * The number of bytes written to the buffer, or a negative error 733 * in case of failure. 734 * 735 * u32 bpf_get_prandom_u32(void) 736 * Description 737 * Get a pseudo-random number. 738 * 739 * From a security point of view, this helper uses its own 740 * pseudo-random internal state, and cannot be used to infer the 741 * seed of other random functions in the kernel. However, it is 742 * essential to note that the generator used by the helper is not 743 * cryptographically secure. 744 * Return 745 * A random 32-bit unsigned value. 746 * 747 * u32 bpf_get_smp_processor_id(void) 748 * Description 749 * Get the SMP (symmetric multiprocessing) processor id. Note that 750 * all programs run with preemption disabled, which means that the 751 * SMP processor id is stable during all the execution of the 752 * program. 753 * Return 754 * The SMP id of the processor running the program. 755 * 756 * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) 757 * Description 758 * Store *len* bytes from address *from* into the packet 759 * associated to *skb*, at *offset*. *flags* are a combination of 760 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the 761 * checksum for the packet after storing the bytes) and 762 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ 763 * **->swhash** and *skb*\ **->l4hash** to 0). 764 * 765 * A call to this helper is susceptible to change the underlying 766 * packet buffer. Therefore, at load time, all checks on pointers 767 * previously done by the verifier are invalidated and must be 768 * performed again, if the helper is used in combination with 769 * direct packet access. 770 * Return 771 * 0 on success, or a negative error in case of failure. 772 * 773 * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) 774 * Description 775 * Recompute the layer 3 (e.g. IP) checksum for the packet 776 * associated to *skb*. Computation is incremental, so the helper 777 * must know the former value of the header field that was 778 * modified (*from*), the new value of this field (*to*), and the 779 * number of bytes (2 or 4) for this field, stored in *size*. 780 * Alternatively, it is possible to store the difference between 781 * the previous and the new values of the header field in *to*, by 782 * setting *from* and *size* to 0. For both methods, *offset* 783 * indicates the location of the IP checksum within the packet. 784 * 785 * This helper works in combination with **bpf_csum_diff**\ (), 786 * which does not update the checksum in-place, but offers more 787 * flexibility and can handle sizes larger than 2 or 4 for the 788 * checksum to update. 789 * 790 * A call to this helper is susceptible to change the underlying 791 * packet buffer. Therefore, at load time, all checks on pointers 792 * previously done by the verifier are invalidated and must be 793 * performed again, if the helper is used in combination with 794 * direct packet access. 795 * Return 796 * 0 on success, or a negative error in case of failure. 797 * 798 * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) 799 * Description 800 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the 801 * packet associated to *skb*. Computation is incremental, so the 802 * helper must know the former value of the header field that was 803 * modified (*from*), the new value of this field (*to*), and the 804 * number of bytes (2 or 4) for this field, stored on the lowest 805 * four bits of *flags*. Alternatively, it is possible to store 806 * the difference between the previous and the new values of the 807 * header field in *to*, by setting *from* and the four lowest 808 * bits of *flags* to 0. For both methods, *offset* indicates the 809 * location of the IP checksum within the packet. In addition to 810 * the size of the field, *flags* can be added (bitwise OR) actual 811 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left 812 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and 813 * for updates resulting in a null checksum the value is set to 814 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates 815 * the checksum is to be computed against a pseudo-header. 816 * 817 * This helper works in combination with **bpf_csum_diff**\ (), 818 * which does not update the checksum in-place, but offers more 819 * flexibility and can handle sizes larger than 2 or 4 for the 820 * checksum to update. 821 * 822 * A call to this helper is susceptible to change the underlying 823 * packet buffer. Therefore, at load time, all checks on pointers 824 * previously done by the verifier are invalidated and must be 825 * performed again, if the helper is used in combination with 826 * direct packet access. 827 * Return 828 * 0 on success, or a negative error in case of failure. 829 * 830 * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) 831 * Description 832 * This special helper is used to trigger a "tail call", or in 833 * other words, to jump into another eBPF program. The same stack 834 * frame is used (but values on stack and in registers for the 835 * caller are not accessible to the callee). This mechanism allows 836 * for program chaining, either for raising the maximum number of 837 * available eBPF instructions, or to execute given programs in 838 * conditional blocks. For security reasons, there is an upper 839 * limit to the number of successive tail calls that can be 840 * performed. 841 * 842 * Upon call of this helper, the program attempts to jump into a 843 * program referenced at index *index* in *prog_array_map*, a 844 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes 845 * *ctx*, a pointer to the context. 846 * 847 * If the call succeeds, the kernel immediately runs the first 848 * instruction of the new program. This is not a function call, 849 * and it never returns to the previous program. If the call 850 * fails, then the helper has no effect, and the caller continues 851 * to run its subsequent instructions. A call can fail if the 852 * destination program for the jump does not exist (i.e. *index* 853 * is superior to the number of entries in *prog_array_map*), or 854 * if the maximum number of tail calls has been reached for this 855 * chain of programs. This limit is defined in the kernel by the 856 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), 857 * which is currently set to 32. 858 * Return 859 * 0 on success, or a negative error in case of failure. 860 * 861 * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) 862 * Description 863 * Clone and redirect the packet associated to *skb* to another 864 * net device of index *ifindex*. Both ingress and egress 865 * interfaces can be used for redirection. The **BPF_F_INGRESS** 866 * value in *flags* is used to make the distinction (ingress path 867 * is selected if the flag is present, egress path otherwise). 868 * This is the only flag supported for now. 869 * 870 * In comparison with **bpf_redirect**\ () helper, 871 * **bpf_clone_redirect**\ () has the associated cost of 872 * duplicating the packet buffer, but this can be executed out of 873 * the eBPF program. Conversely, **bpf_redirect**\ () is more 874 * efficient, but it is handled through an action code where the 875 * redirection happens only after the eBPF program has returned. 876 * 877 * A call to this helper is susceptible to change the underlying 878 * packet buffer. Therefore, at load time, all checks on pointers 879 * previously done by the verifier are invalidated and must be 880 * performed again, if the helper is used in combination with 881 * direct packet access. 882 * Return 883 * 0 on success, or a negative error in case of failure. 884 * 885 * u64 bpf_get_current_pid_tgid(void) 886 * Return 887 * A 64-bit integer containing the current tgid and pid, and 888 * created as such: 889 * *current_task*\ **->tgid << 32 \|** 890 * *current_task*\ **->pid**. 891 * 892 * u64 bpf_get_current_uid_gid(void) 893 * Return 894 * A 64-bit integer containing the current GID and UID, and 895 * created as such: *current_gid* **<< 32 \|** *current_uid*. 896 * 897 * int bpf_get_current_comm(void *buf, u32 size_of_buf) 898 * Description 899 * Copy the **comm** attribute of the current task into *buf* of 900 * *size_of_buf*. The **comm** attribute contains the name of 901 * the executable (excluding the path) for the current task. The 902 * *size_of_buf* must be strictly positive. On success, the 903 * helper makes sure that the *buf* is NUL-terminated. On failure, 904 * it is filled with zeroes. 905 * Return 906 * 0 on success, or a negative error in case of failure. 907 * 908 * u32 bpf_get_cgroup_classid(struct sk_buff *skb) 909 * Description 910 * Retrieve the classid for the current task, i.e. for the net_cls 911 * cgroup to which *skb* belongs. 912 * 913 * This helper can be used on TC egress path, but not on ingress. 914 * 915 * The net_cls cgroup provides an interface to tag network packets 916 * based on a user-provided identifier for all traffic coming from 917 * the tasks belonging to the related cgroup. See also the related 918 * kernel documentation, available from the Linux sources in file 919 * *Documentation/cgroup-v1/net_cls.txt*. 920 * 921 * The Linux kernel has two versions for cgroups: there are 922 * cgroups v1 and cgroups v2. Both are available to users, who can 923 * use a mixture of them, but note that the net_cls cgroup is for 924 * cgroup v1 only. This makes it incompatible with BPF programs 925 * run on cgroups, which is a cgroup-v2-only feature (a socket can 926 * only hold data for one version of cgroups at a time). 927 * 928 * This helper is only available is the kernel was compiled with 929 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to 930 * "**y**" or to "**m**". 931 * Return 932 * The classid, or 0 for the default unconfigured classid. 933 * 934 * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 935 * Description 936 * Push a *vlan_tci* (VLAN tag control information) of protocol 937 * *vlan_proto* to the packet associated to *skb*, then update 938 * the checksum. Note that if *vlan_proto* is different from 939 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to 940 * be **ETH_P_8021Q**. 941 * 942 * A call to this helper is susceptible to change the underlying 943 * packet buffer. Therefore, at load time, all checks on pointers 944 * previously done by the verifier are invalidated and must be 945 * performed again, if the helper is used in combination with 946 * direct packet access. 947 * Return 948 * 0 on success, or a negative error in case of failure. 949 * 950 * int bpf_skb_vlan_pop(struct sk_buff *skb) 951 * Description 952 * Pop a VLAN header from the packet associated to *skb*. 953 * 954 * A call to this helper is susceptible to change the underlying 955 * packet buffer. Therefore, at load time, all checks on pointers 956 * previously done by the verifier are invalidated and must be 957 * performed again, if the helper is used in combination with 958 * direct packet access. 959 * Return 960 * 0 on success, or a negative error in case of failure. 961 * 962 * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 963 * Description 964 * Get tunnel metadata. This helper takes a pointer *key* to an 965 * empty **struct bpf_tunnel_key** of **size**, that will be 966 * filled with tunnel metadata for the packet associated to *skb*. 967 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which 968 * indicates that the tunnel is based on IPv6 protocol instead of 969 * IPv4. 970 * 971 * The **struct bpf_tunnel_key** is an object that generalizes the 972 * principal parameters used by various tunneling protocols into a 973 * single struct. This way, it can be used to easily make a 974 * decision based on the contents of the encapsulation header, 975 * "summarized" in this struct. In particular, it holds the IP 976 * address of the remote end (IPv4 or IPv6, depending on the case) 977 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, 978 * this struct exposes the *key*\ **->tunnel_id**, which is 979 * generally mapped to a VNI (Virtual Network Identifier), making 980 * it programmable together with the **bpf_skb_set_tunnel_key**\ 981 * () helper. 982 * 983 * Let's imagine that the following code is part of a program 984 * attached to the TC ingress interface, on one end of a GRE 985 * tunnel, and is supposed to filter out all messages coming from 986 * remote ends with IPv4 address other than 10.0.0.1: 987 * 988 * :: 989 * 990 * int ret; 991 * struct bpf_tunnel_key key = {}; 992 * 993 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); 994 * if (ret < 0) 995 * return TC_ACT_SHOT; // drop packet 996 * 997 * if (key.remote_ipv4 != 0x0a000001) 998 * return TC_ACT_SHOT; // drop packet 999 * 1000 * return TC_ACT_OK; // accept packet
1001 * 1002 * This interface can also be used with all encapsulation devices 1003 * that can operate in "collect metadata" mode: instead of having 1004 * one network device per specific configuration, the "collect 1005 * metadata" mode only requires a single device where the 1006 * configuration can be extracted from this helper. 1007 * 1008 * This can be used together with various tunnels such as VXLan, 1009 * Geneve, GRE or IP in IP (IPIP). 1010 * Return 1011 * 0 on success, or a negative error in case of failure. 1012 * 1013 * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 1014 * Description 1015 * Populate tunnel metadata for packet associated to *skb.* The 1016 * tunnel metadata is set to the contents of *key*, of *size*. The 1017 * *flags* can be set to a combination of the following values: 1018 * 1019 * **BPF_F_TUNINFO_IPV6** 1020 * Indicate that the tunnel is based on IPv6 protocol 1021 * instead of IPv4. 1022 * **BPF_F_ZERO_CSUM_TX** 1023 * For IPv4 packets, add a flag to tunnel metadata 1024 * indicating that checksum computation should be skipped 1025 * and checksum set to zeroes. 1026 * **BPF_F_DONT_FRAGMENT** 1027 * Add a flag to tunnel metadata indicating that the 1028 * packet should not be fragmented. 1029 * **BPF_F_SEQ_NUMBER** 1030 * Add a flag to tunnel metadata indicating that a 1031 * sequence number should be added to tunnel header before 1032 * sending the packet. This flag was added for GRE 1033 * encapsulation, but might be used with other protocols 1034 * as well in the future. 1035 * 1036 * Here is a typical usage on the transmit path: 1037 * 1038 * :: 1039 * 1040 * struct bpf_tunnel_key key; 1041 * populate key ... 1042 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); 1043 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); 1044 * 1045 * See also the description of the **bpf_skb_get_tunnel_key**\ () 1046 * helper for additional information. 1047 * Return 1048 * 0 on success, or a negative error in case of failure. 1049 * 1050 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) 1051 * Description 1052 * Read the value of a perf event counter. This helper relies on a 1053 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of 1054 * the perf event counter is selected when *map* is updated with 1055 * perf event file descriptors. The *map* is an array whose size 1056 * is the number of available CPUs, and each cell contains a value 1057 * relative to one CPU. The value to retrieve is indicated by 1058 * *flags*, that contains the index of the CPU to look up, masked 1059 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1060 * **BPF_F_CURRENT_CPU** to indicate that the value for the 1061 * current CPU should be retrieved. 1062 * 1063 * Note that before Linux 4.13, only hardware perf event can be 1064 * retrieved. 1065 * 1066 * Also, be aware that the newer helper 1067 * **bpf_perf_event_read_value**\ () is recommended over 1068 * **bpf_perf_event_read**\ () in general. The latter has some ABI 1069 * quirks where error and counter value are used as a return code 1070 * (which is wrong to do since ranges may overlap). This issue is 1071 * fixed with **bpf_perf_event_read_value**\ (), which at the same 1072 * time provides more features over the **bpf_perf_event_read**\ 1073 * () interface. Please refer to the description of 1074 * **bpf_perf_event_read_value**\ () for details. 1075 * Return 1076 * The value of the perf event counter read from the map, or a 1077 * negative error code in case of failure. 1078 * 1079 * int bpf_redirect(u32 ifindex, u64 flags) 1080 * Description 1081 * Redirect the packet to another net device of index *ifindex*. 1082 * This helper is somewhat similar to **bpf_clone_redirect**\ 1083 * (), except that the packet is not cloned, which provides 1084 * increased performance. 1085 * 1086 * Except for XDP, both ingress and egress interfaces can be used 1087 * for redirection. The **BPF_F_INGRESS** value in *flags* is used 1088 * to make the distinction (ingress path is selected if the flag 1089 * is present, egress path otherwise). Currently, XDP only 1090 * supports redirection to the egress interface, and accepts no 1091 * flag at all. 1092 * 1093 * The same effect can also be attained with the more generic 1094 * **bpf_redirect_map**\ (), which uses a BPF map to store the 1095 * redirect target instead of providing it directly to the helper. 1096 * Return 1097 * For XDP, the helper returns **XDP_REDIRECT** on success or 1098 * **XDP_ABORTED** on error. For other program types, the values 1099 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on 1100 * error. 1101 * 1102 * u32 bpf_get_route_realm(struct sk_buff *skb) 1103 * Description 1104 * Retrieve the realm or the route, that is to say the 1105 * **tclassid** field of the destination for the *skb*. The 1106 * indentifier retrieved is a user-provided tag, similar to the 1107 * one used with the net_cls cgroup (see description for 1108 * **bpf_get_cgroup_classid**\ () helper), but here this tag is 1109 * held by a route (a destination entry), not by a task. 1110 * 1111 * Retrieving this identifier works with the clsact TC egress hook 1112 * (see also **tc-bpf(8)**), or alternatively on conventional 1113 * classful egress qdiscs, but not on TC ingress path. In case of 1114 * clsact TC egress hook, this has the advantage that, internally, 1115 * the destination entry has not been dropped yet in the transmit 1116 * path. Therefore, the destination entry does not need to be 1117 * artificially held via **netif_keep_dst**\ () for a classful 1118 * qdisc until the *skb* is freed. 1119 * 1120 * This helper is available only if the kernel was compiled with 1121 * **CONFIG_IP_ROUTE_CLASSID** configuration option. 1122 * Return 1123 * The realm of the route for the packet associated to *skb*, or 0 1124 * if none was found. 1125 * 1126 * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 1127 * Description 1128 * Write raw *data* blob into a special BPF perf event held by 1129 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 1130 * event must have the following attributes: **PERF_SAMPLE_RAW** 1131 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 1132 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 1133 * 1134 * The *flags* are used to indicate the index in *map* for which 1135 * the value must be put, masked with **BPF_F_INDEX_MASK**. 1136 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 1137 * to indicate that the index of the current CPU core should be 1138 * used. 1139 * 1140 * The value to write, of *size*, is passed through eBPF stack and 1141 * pointed by *data*. 1142 * 1143 * The context of the program *ctx* needs also be passed to the 1144 * helper. 1145 * 1146 * On user space, a program willing to read the values needs to 1147 * call **perf_event_open**\ () on the perf event (either for 1148 * one or for all CPUs) and to store the file descriptor into the 1149 * *map*. This must be done before the eBPF program can send data 1150 * into it. An example is available in file 1151 * *samples/bpf/trace_output_user.c* in the Linux kernel source 1152 * tree (the eBPF program counterpart is in 1153 * *samples/bpf/trace_output_kern.c*). 1154 * 1155 * **bpf_perf_event_output**\ () achieves better performance 1156 * than **bpf_trace_printk**\ () for sharing data with user 1157 * space, and is much better suitable for streaming data from eBPF 1158 * programs. 1159 * 1160 * Note that this helper is not restricted to tracing use cases 1161 * and can be used with programs attached to TC or XDP as well, 1162 * where it allows for passing data to user space listeners. Data 1163 * can be: 1164 * 1165 * * Only custom structs, 1166 * * Only the packet payload, or 1167 * * A combination of both. 1168 * Return 1169 * 0 on success, or a negative error in case of failure. 1170 * 1171 * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) 1172 * Description 1173 * This helper was provided as an easy way to load data from a 1174 * packet. It can be used to load *len* bytes from *offset* from 1175 * the packet associated to *skb*, into the buffer pointed by 1176 * *to*. 1177 * 1178 * Since Linux 4.7, usage of this helper has mostly been replaced 1179 * by "direct packet access", enabling packet data to be 1180 * manipulated with *skb*\ **->data** and *skb*\ **->data_end** 1181 * pointing respectively to the first byte of packet data and to 1182 * the byte after the last byte of packet data. However, it 1183 * remains useful if one wishes to read large quantities of data 1184 * at once from a packet into the eBPF stack. 1185 * Return 1186 * 0 on success, or a negative error in case of failure. 1187 * 1188 * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) 1189 * Description 1190 * Walk a user or a kernel stack and return its id. To achieve 1191 * this, the helper needs *ctx*, which is a pointer to the context 1192 * on which the tracing program is executed, and a pointer to a 1193 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. 1194 * 1195 * The last argument, *flags*, holds the number of stack frames to 1196 * skip (from 0 to 255), masked with 1197 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 1198 * a combination of the following flags: 1199 * 1200 * **BPF_F_USER_STACK** 1201 * Collect a user space stack instead of a kernel stack. 1202 * **BPF_F_FAST_STACK_CMP** 1203 * Compare stacks by hash only. 1204 * **BPF_F_REUSE_STACKID** 1205 * If two different stacks hash into the same *stackid*, 1206 * discard the old one. 1207 * 1208 * The stack id retrieved is a 32 bit long integer handle which 1209 * can be further combined with other data (including other stack 1210 * ids) and used as a key into maps. This can be useful for 1211 * generating a variety of graphs (such as flame graphs or off-cpu 1212 * graphs). 1213 * 1214 * For walking a stack, this helper is an improvement over 1215 * **bpf_probe_read**\ (), which can be used with unrolled loops 1216 * but is not efficient and consumes a lot of eBPF instructions. 1217 * Instead, **bpf_get_stackid**\ () can collect up to 1218 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that 1219 * this limit can be controlled with the **sysctl** program, and 1220 * that it should be manually increased in order to profile long 1221 * user stacks (such as stacks for Java programs). To do so, use: 1222 * 1223 * :: 1224 * 1225 * # sysctl kernel.perf_event_max_stack=<new value> 1226 * Return 1227 * The positive or null stack id on success, or a negative error 1228 * in case of failure. 1229 * 1230 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) 1231 * Description 1232 * Compute a checksum difference, from the raw buffer pointed by 1233 * *from*, of length *from_size* (that must be a multiple of 4), 1234 * towards the raw buffer pointed by *to*, of size *to_size* 1235 * (same remark). An optional *seed* can be added to the value 1236 * (this can be cascaded, the seed may come from a previous call 1237 * to the helper). 1238 * 1239 * This is flexible enough to be used in several ways: 1240 * 1241 * * With *from_size* == 0, *to_size* > 0 and *seed* set to 1242 * checksum, it can be used when pushing new data. 1243 * * With *from_size* > 0, *to_size* == 0 and *seed* set to 1244 * checksum, it can be used when removing data from a packet. 1245 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it 1246 * can be used to compute a diff. Note that *from_size* and 1247 * *to_size* do not need to be equal. 1248 * 1249 * This helper can be used in combination with 1250 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to 1251 * which one can feed in the difference computed with 1252 * **bpf_csum_diff**\ (). 1253 * Return 1254 * The checksum result, or a negative error code in case of 1255 * failure. 1256 * 1257 * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 1258 * Description 1259 * Retrieve tunnel options metadata for the packet associated to 1260 * *skb*, and store the raw tunnel option data to the buffer *opt* 1261 * of *size*. 1262 * 1263 * This helper can be used with encapsulation devices that can 1264 * operate in "collect metadata" mode (please refer to the related 1265 * note in the description of **bpf_skb_get_tunnel_key**\ () for 1266 * more details). A particular example where this can be used is 1267 * in combination with the Geneve encapsulation protocol, where it 1268 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) 1269 * and retrieving arbitrary TLVs (Type-Length-Value headers) from 1270 * the eBPF program. This allows for full customization of these 1271 * headers. 1272 * Return 1273 * The size of the option data retrieved. 1274 * 1275 * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 1276 * Description 1277 * Set tunnel options metadata for the packet associated to *skb* 1278 * to the option data contained in the raw buffer *opt* of *size*. 1279 * 1280 * See also the description of the **bpf_skb_get_tunnel_opt**\ () 1281 * helper for additional information. 1282 * Return 1283 * 0 on success, or a negative error in case of failure. 1284 * 1285 * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) 1286 * Description 1287 * Change the protocol of the *skb* to *proto*. Currently 1288 * supported are transition from IPv4 to IPv6, and from IPv6 to 1289 * IPv4. The helper takes care of the groundwork for the 1290 * transition, including resizing the socket buffer. The eBPF 1291 * program is expected to fill the new headers, if any, via 1292 * **skb_store_bytes**\ () and to recompute the checksums with 1293 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ 1294 * (). The main case for this helper is to perform NAT64 1295 * operations out of an eBPF program. 1296 * 1297 * Internally, the GSO type is marked as dodgy so that headers are 1298 * checked and segments are recalculated by the GSO/GRO engine. 1299 * The size for GSO target is adapted as well. 1300 * 1301 * All values for *flags* are reserved for future usage, and must 1302 * be left at zero. 1303 * 1304 * A call to this helper is susceptible to change the underlying 1305 * packet buffer. Therefore, at load time, all checks on pointers 1306 * previously done by the verifier are invalidated and must be 1307 * performed again, if the helper is used in combination with 1308 * direct packet access. 1309 * Return 1310 * 0 on success, or a negative error in case of failure. 1311 * 1312 * int bpf_skb_change_type(struct sk_buff *skb, u32 type) 1313 * Description 1314 * Change the packet type for the packet associated to *skb*. This 1315 * comes down to setting *skb*\ **->pkt_type** to *type*, except 1316 * the eBPF program does not have a write access to *skb*\ 1317 * **->pkt_type** beside this helper. Using a helper here allows 1318 * for graceful handling of errors. 1319 * 1320 * The major use case is to change incoming *skb*s to 1321 * **PACKET_HOST** in a programmatic way instead of having to 1322 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for 1323 * example. 1324 * 1325 * Note that *type* only allows certain values. At this time, they 1326 * are: 1327 * 1328 * **PACKET_HOST** 1329 * Packet is for us. 1330 * **PACKET_BROADCAST** 1331 * Send packet to all. 1332 * **PACKET_MULTICAST** 1333 * Send packet to group. 1334 * **PACKET_OTHERHOST** 1335 * Send packet to someone else. 1336 * Return 1337 * 0 on success, or a negative error in case of failure. 1338 * 1339 * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) 1340 * Description 1341 * Check whether *skb* is a descendant of the cgroup2 held by 1342 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1343 * Return 1344 * The return value depends on the result of the test, and can be: 1345 * 1346 * * 0, if the *skb* failed the cgroup2 descendant test. 1347 * * 1, if the *skb* succeeded the cgroup2 descendant test. 1348 * * A negative error code, if an error occurred. 1349 * 1350 * u32 bpf_get_hash_recalc(struct sk_buff *skb) 1351 * Description 1352 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is 1353 * not set, in particular if the hash was cleared due to mangling, 1354 * recompute this hash. Later accesses to the hash can be done 1355 * directly with *skb*\ **->hash**. 1356 * 1357 * Calling **bpf_set_hash_invalid**\ (), changing a packet 1358 * prototype with **bpf_skb_change_proto**\ (), or calling 1359 * **bpf_skb_store_bytes**\ () with the 1360 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear 1361 * the hash and to trigger a new computation for the next call to 1362 * **bpf_get_hash_recalc**\ (). 1363 * Return 1364 * The 32-bit hash. 1365 * 1366 * u64 bpf_get_current_task(void) 1367 * Return 1368 * A pointer to the current task struct. 1369 * 1370 * int bpf_probe_write_user(void *dst, const void *src, u32 len) 1371 * Description 1372 * Attempt in a safe way to write *len* bytes from the buffer 1373 * *src* to *dst* in memory. It only works for threads that are in 1374 * user context, and *dst* must be a valid user space address. 1375 * 1376 * This helper should not be used to implement any kind of 1377 * security mechanism because of TOC-TOU attacks, but rather to 1378 * debug, divert, and manipulate execution of semi-cooperative 1379 * processes. 1380 * 1381 * Keep in mind that this feature is meant for experiments, and it 1382 * has a risk of crashing the system and running programs. 1383 * Therefore, when an eBPF program using this helper is attached, 1384 * a warning including PID and process name is printed to kernel 1385 * logs. 1386 * Return 1387 * 0 on success, or a negative error in case of failure. 1388 * 1389 * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) 1390 * Description 1391 * Check whether the probe is being run is the context of a given 1392 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by 1393 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1394 * Return 1395 * The return value depends on the result of the test, and can be: 1396 * 1397 * * 0, if the *skb* task belongs to the cgroup2. 1398 * * 1, if the *skb* task does not belong to the cgroup2. 1399 * * A negative error code, if an error occurred. 1400 * 1401 * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) 1402 * Description 1403 * Resize (trim or grow) the packet associated to *skb* to the 1404 * new *len*. The *flags* are reserved for future usage, and must 1405 * be left at zero. 1406 * 1407 * The basic idea is that the helper performs the needed work to 1408 * change the size of the packet, then the eBPF program rewrites 1409 * the rest via helpers like **bpf_skb_store_bytes**\ (), 1410 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () 1411 * and others. This helper is a slow path utility intended for 1412 * replies with control messages. And because it is targeted for 1413 * slow path, the helper itself can afford to be slow: it 1414 * implicitly linearizes, unclones and drops offloads from the 1415 * *skb*. 1416 * 1417 * A call to this helper is susceptible to change the underlying 1418 * packet buffer. Therefore, at load time, all checks on pointers 1419 * previously done by the verifier are invalidated and must be 1420 * performed again, if the helper is used in combination with 1421 * direct packet access. 1422 * Return 1423 * 0 on success, or a negative error in case of failure. 1424 * 1425 * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) 1426 * Description 1427 * Pull in non-linear data in case the *skb* is non-linear and not 1428 * all of *len* are part of the linear section. Make *len* bytes 1429 * from *skb* readable and writable. If a zero value is passed for 1430 * *len*, then the whole length of the *skb* is pulled. 1431 * 1432 * This helper is only needed for reading and writing with direct 1433 * packet access. 1434 * 1435 * For direct packet access, testing that offsets to access 1436 * are within packet boundaries (test on *skb*\ **->data_end**) is 1437 * susceptible to fail if offsets are invalid, or if the requested 1438 * data is in non-linear parts of the *skb*. On failure the 1439 * program can just bail out, or in the case of a non-linear 1440 * buffer, use a helper to make the data available. The 1441 * **bpf_skb_load_bytes**\ () helper is a first solution to access 1442 * the data. Another one consists in using **bpf_skb_pull_data** 1443 * to pull in once the non-linear parts, then retesting and 1444 * eventually access the data. 1445 * 1446 * At the same time, this also makes sure the *skb* is uncloned, 1447 * which is a necessary condition for direct write. As this needs 1448 * to be an invariant for the write part only, the verifier 1449 * detects writes and adds a prologue that is calling 1450 * **bpf_skb_pull_data()** to effectively unclone the *skb* from 1451 * the very beginning in case it is indeed cloned. 1452 * 1453 * A call to this helper is susceptible to change the underlying 1454 * packet buffer. Therefore, at load time, all checks on pointers 1455 * previously done by the verifier are invalidated and must be 1456 * performed again, if the helper is used in combination with 1457 * direct packet access. 1458 * Return 1459 * 0 on success, or a negative error in case of failure. 1460 * 1461 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) 1462 * Description 1463 * Add the checksum *csum* into *skb*\ **->csum** in case the 1464 * driver has supplied a checksum for the entire packet into that 1465 * field. Return an error otherwise. This helper is intended to be 1466 * used in combination with **bpf_csum_diff**\ (), in particular 1467 * when the checksum needs to be updated after data has been 1468 * written into the packet through direct packet access. 1469 * Return 1470 * The checksum on success, or a negative error code in case of 1471 * failure. 1472 * 1473 * void bpf_set_hash_invalid(struct sk_buff *skb) 1474 * Description 1475 * Invalidate the current *skb*\ **->hash**. It can be used after 1476 * mangling on headers through direct packet access, in order to 1477 * indicate that the hash is outdated and to trigger a 1478 * recalculation the next time the kernel tries to access this 1479 * hash or when the **bpf_get_hash_recalc**\ () helper is called. 1480 * 1481 * int bpf_get_numa_node_id(void) 1482 * Description 1483 * Return the id of the current NUMA node. The primary use case 1484 * for this helper is the selection of sockets for the local NUMA 1485 * node, when the program is attached to sockets using the 1486 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), 1487 * but the helper is also available to other eBPF program types, 1488 * similarly to **bpf_get_smp_processor_id**\ (). 1489 * Return 1490 * The id of current NUMA node. 1491 * 1492 * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) 1493 * Description 1494 * Grows headroom of packet associated to *skb* and adjusts the 1495 * offset of the MAC header accordingly, adding *len* bytes of 1496 * space. It automatically extends and reallocates memory as 1497 * required. 1498 * 1499 * This helper can be used on a layer 3 *skb* to push a MAC header 1500 * for redirection into a layer 2 device. 1501 * 1502 * All values for *flags* are reserved for future usage, and must 1503 * be left at zero. 1504 * 1505 * A call to this helper is susceptible to change the underlying 1506 * packet buffer. Therefore, at load time, all checks on pointers 1507 * previously done by the verifier are invalidated and must be 1508 * performed again, if the helper is used in combination with 1509 * direct packet access. 1510 * Return 1511 * 0 on success, or a negative error in case of failure. 1512 * 1513 * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) 1514 * Description 1515 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that 1516 * it is possible to use a negative value for *delta*. This helper 1517 * can be used to prepare the packet for pushing or popping 1518 * headers. 1519 * 1520 * A call to this helper is susceptible to change the underlying 1521 * packet buffer. Therefore, at load time, all checks on pointers 1522 * previously done by the verifier are invalidated and must be 1523 * performed again, if the helper is used in combination with 1524 * direct packet access. 1525 * Return 1526 * 0 on success, or a negative error in case of failure. 1527 * 1528 * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) 1529 * Description 1530 * Copy a NUL terminated string from an unsafe kernel address 1531 * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for 1532 * more details. 1533 * 1534 * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str() 1535 * instead. 1536 * Return 1537 * On success, the strictly positive length of the string, 1538 * including the trailing NUL character. On error, a negative 1539 * value. 1540 * 1541 * u64 bpf_get_socket_cookie(struct sk_buff *skb) 1542 * Description 1543 * If the **struct sk_buff** pointed by *skb* has a known socket, 1544 * retrieve the cookie (generated by the kernel) of this socket. 1545 * If no cookie has been set yet, generate a new cookie. Once 1546 * generated, the socket cookie remains stable for the life of the 1547 * socket. This helper can be useful for monitoring per socket 1548 * networking traffic statistics as it provides a global socket 1549 * identifier that can be assumed unique. 1550 * Return 1551 * A 8-byte long non-decreasing number on success, or 0 if the 1552 * socket field is missing inside *skb*. 1553 * 1554 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) 1555 * Description 1556 * Equivalent to bpf_get_socket_cookie() helper that accepts 1557 * *skb*, but gets socket from **struct bpf_sock_addr** context. 1558 * Return 1559 * A 8-byte long non-decreasing number. 1560 * 1561 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) 1562 * Description 1563 * Equivalent to bpf_get_socket_cookie() helper that accepts 1564 * *skb*, but gets socket from **struct bpf_sock_ops** context. 1565 * Return 1566 * A 8-byte long non-decreasing number. 1567 * 1568 * u32 bpf_get_socket_uid(struct sk_buff *skb) 1569 * Return 1570 * The owner UID of the socket associated to *skb*. If the socket 1571 * is **NULL**, or if it is not a full socket (i.e. if it is a 1572 * time-wait or a request socket instead), **overflowuid** value 1573 * is returned (note that **overflowuid** might also be the actual 1574 * UID value for the socket). 1575 * 1576 * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) 1577 * Description 1578 * Set the full hash for *skb* (set the field *skb*\ **->hash**) 1579 * to value *hash*. 1580 * Return 1581 * 0 1582 * 1583 * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen) 1584 * Description 1585 * Emulate a call to **setsockopt()** on the socket associated to 1586 * *bpf_socket*, which must be a full socket. The *level* at 1587 * which the option resides and the name *optname* of the option 1588 * must be specified, see **setsockopt(2)** for more information. 1589 * The option value of length *optlen* is pointed by *optval*. 1590 * 1591 * This helper actually implements a subset of **setsockopt()**. 1592 * It supports the following *level*\ s: 1593 * 1594 * * **SOL_SOCKET**, which supports the following *optname*\ s: 1595 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, 1596 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. 1597 * * **IPPROTO_TCP**, which supports the following *optname*\ s: 1598 * **TCP_CONGESTION**, **TCP_BPF_IW**, 1599 * **TCP_BPF_SNDCWND_CLAMP**. 1600 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1601 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1602 * Return 1603 * 0 on success, or a negative error in case of failure. 1604 * 1605 * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) 1606 * Description 1607 * Grow or shrink the room for data in the packet associated to 1608 * *skb* by *len_diff*, and according to the selected *mode*. 1609 * 1610 * There are two supported modes at this time: 1611 * 1612 * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer 1613 * (room space is added or removed below the layer 2 header). 1614 * 1615 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer 1616 * (room space is added or removed below the layer 3 header). 1617 * 1618 * The following flags are supported at this time: 1619 * 1620 * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. 1621 * Adjusting mss in this way is not allowed for datagrams. 1622 * 1623 * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, 1624 * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: 1625 * Any new space is reserved to hold a tunnel header. 1626 * Configure skb offsets and other fields accordingly. 1627 * 1628 * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, 1629 * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: 1630 * Use with ENCAP_L3 flags to further specify the tunnel type. 1631 * 1632 * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): 1633 * Use with ENCAP_L3/L4 flags to further specify the tunnel 1634 * type; *len* is the length of the inner MAC header. 1635 * 1636 * A call to this helper is susceptible to change the underlying 1637 * packet buffer. Therefore, at load time, all checks on pointers 1638 * previously done by the verifier are invalidated and must be 1639 * performed again, if the helper is used in combination with 1640 * direct packet access. 1641 * Return 1642 * 0 on success, or a negative error in case of failure. 1643 * 1644 * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) 1645 * Description 1646 * Redirect the packet to the endpoint referenced by *map* at 1647 * index *key*. Depending on its type, this *map* can contain 1648 * references to net devices (for forwarding packets through other 1649 * ports), or to CPUs (for redirecting XDP frames to another CPU; 1650 * but this is only implemented for native XDP (with driver 1651 * support) as of this writing). 1652 * 1653 * The lower two bits of *flags* are used as the return code if 1654 * the map lookup fails. This is so that the return value can be 1655 * one of the XDP program return codes up to XDP_TX, as chosen by 1656 * the caller. Any higher bits in the *flags* argument must be 1657 * unset. 1658 * 1659 * See also bpf_redirect(), which only supports redirecting to an 1660 * ifindex, but doesn't require a map to do so. 1661 * Return 1662 * **XDP_REDIRECT** on success, or the value of the two lower bits 1663 * of the *flags* argument on error. 1664 * 1665 * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) 1666 * Description 1667 * Redirect the packet to the socket referenced by *map* (of type 1668 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1669 * egress interfaces can be used for redirection. The 1670 * **BPF_F_INGRESS** value in *flags* is used to make the 1671 * distinction (ingress path is selected if the flag is present, 1672 * egress path otherwise). This is the only flag supported for now. 1673 * Return 1674 * **SK_PASS** on success, or **SK_DROP** on error. 1675 * 1676 * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 1677 * Description 1678 * Add an entry to, or update a *map* referencing sockets. The 1679 * *skops* is used as a new value for the entry associated to 1680 * *key*. *flags* is one of: 1681 * 1682 * **BPF_NOEXIST** 1683 * The entry for *key* must not exist in the map. 1684 * **BPF_EXIST** 1685 * The entry for *key* must already exist in the map. 1686 * **BPF_ANY** 1687 * No condition on the existence of the entry for *key*. 1688 * 1689 * If the *map* has eBPF programs (parser and verdict), those will 1690 * be inherited by the socket being added. If the socket is 1691 * already attached to eBPF programs, this results in an error. 1692 * Return 1693 * 0 on success, or a negative error in case of failure. 1694 * 1695 * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) 1696 * Description 1697 * Adjust the address pointed by *xdp_md*\ **->data_meta** by 1698 * *delta* (which can be positive or negative). Note that this 1699 * operation modifies the address stored in *xdp_md*\ **->data**, 1700 * so the latter must be loaded only after the helper has been 1701 * called. 1702 * 1703 * The use of *xdp_md*\ **->data_meta** is optional and programs 1704 * are not required to use it. The rationale is that when the 1705 * packet is processed with XDP (e.g. as DoS filter), it is 1706 * possible to push further meta data along with it before passing 1707 * to the stack, and to give the guarantee that an ingress eBPF 1708 * program attached as a TC classifier on the same device can pick 1709 * this up for further post-processing. Since TC works with socket 1710 * buffers, it remains possible to set from XDP the **mark** or 1711 * **priority** pointers, or other pointers for the socket buffer. 1712 * Having this scratch space generic and programmable allows for 1713 * more flexibility as the user is free to store whatever meta 1714 * data they need. 1715 * 1716 * A call to this helper is susceptible to change the underlying 1717 * packet buffer. Therefore, at load time, all checks on pointers 1718 * previously done by the verifier are invalidated and must be 1719 * performed again, if the helper is used in combination with 1720 * direct packet access. 1721 * Return 1722 * 0 on success, or a negative error in case of failure. 1723 * 1724 * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) 1725 * Description 1726 * Read the value of a perf event counter, and store it into *buf* 1727 * of size *buf_size*. This helper relies on a *map* of type 1728 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event 1729 * counter is selected when *map* is updated with perf event file 1730 * descriptors. The *map* is an array whose size is the number of 1731 * available CPUs, and each cell contains a value relative to one 1732 * CPU. The value to retrieve is indicated by *flags*, that 1733 * contains the index of the CPU to look up, masked with 1734 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1735 * **BPF_F_CURRENT_CPU** to indicate that the value for the 1736 * current CPU should be retrieved. 1737 * 1738 * This helper behaves in a way close to 1739 * **bpf_perf_event_read**\ () helper, save that instead of 1740 * just returning the value observed, it fills the *buf* 1741 * structure. This allows for additional data to be retrieved: in 1742 * particular, the enabled and running times (in *buf*\ 1743 * **->enabled** and *buf*\ **->running**, respectively) are 1744 * copied. In general, **bpf_perf_event_read_value**\ () is 1745 * recommended over **bpf_perf_event_read**\ (), which has some 1746 * ABI issues and provides fewer functionalities. 1747 * 1748 * These values are interesting, because hardware PMU (Performance 1749 * Monitoring Unit) counters are limited resources. When there are 1750 * more PMU based perf events opened than available counters, 1751 * kernel will multiplex these events so each event gets certain 1752 * percentage (but not all) of the PMU time. In case that 1753 * multiplexing happens, the number of samples or counter value 1754 * will not reflect the case compared to when no multiplexing 1755 * occurs. This makes comparison between different runs difficult. 1756 * Typically, the counter value should be normalized before 1757 * comparing to other experiments. The usual normalization is done 1758 * as follows. 1759 * 1760 * :: 1761 * 1762 * normalized_counter = counter * t_enabled / t_running 1763 * 1764 * Where t_enabled is the time enabled for event and t_running is 1765 * the time running for event since last normalization. The 1766 * enabled and running times are accumulated since the perf event 1767 * open. To achieve scaling factor between two invocations of an 1768 * eBPF program, users can can use CPU id as the key (which is 1769 * typical for perf array usage model) to remember the previous 1770 * value and do the calculation inside the eBPF program. 1771 * Return 1772 * 0 on success, or a negative error in case of failure. 1773 * 1774 * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) 1775 * Description 1776 * For en eBPF program attached to a perf event, retrieve the 1777 * value of the event counter associated to *ctx* and store it in 1778 * the structure pointed by *buf* and of size *buf_size*. Enabled 1779 * and running times are also stored in the structure (see 1780 * description of helper **bpf_perf_event_read_value**\ () for 1781 * more details). 1782 * Return 1783 * 0 on success, or a negative error in case of failure. 1784 * 1785 * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen) 1786 * Description 1787 * Emulate a call to **getsockopt()** on the socket associated to 1788 * *bpf_socket*, which must be a full socket. The *level* at 1789 * which the option resides and the name *optname* of the option 1790 * must be specified, see **getsockopt(2)** for more information. 1791 * The retrieved value is stored in the structure pointed by 1792 * *opval* and of length *optlen*. 1793 * 1794 * This helper actually implements a subset of **getsockopt()**. 1795 * It supports the following *level*\ s: 1796 * 1797 * * **IPPROTO_TCP**, which supports *optname* 1798 * **TCP_CONGESTION**. 1799 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1800 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1801 * Return 1802 * 0 on success, or a negative error in case of failure. 1803 * 1804 * int bpf_override_return(struct pt_regs *regs, u64 rc) 1805 * Description 1806 * Used for error injection, this helper uses kprobes to override 1807 * the return value of the probed function, and to set it to *rc*. 1808 * The first argument is the context *regs* on which the kprobe 1809 * works. 1810 * 1811 * This helper works by setting setting the PC (program counter) 1812 * to an override function which is run in place of the original 1813 * probed function. This means the probed function is not run at 1814 * all. The replacement function just returns with the required 1815 * value. 1816 * 1817 * This helper has security implications, and thus is subject to 1818 * restrictions. It is only available if the kernel was compiled 1819 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration 1820 * option, and in this case it only works on functions tagged with 1821 * **ALLOW_ERROR_INJECTION** in the kernel code. 1822 * 1823 * Also, the helper is only available for the architectures having 1824 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, 1825 * x86 architecture is the only one to support this feature. 1826 * Return 1827 * 0 1828 * 1829 * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) 1830 * Description 1831 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field 1832 * for the full TCP socket associated to *bpf_sock_ops* to 1833 * *argval*. 1834 * 1835 * The primary use of this field is to determine if there should 1836 * be calls to eBPF programs of type 1837 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP 1838 * code. A program of the same type can change its value, per 1839 * connection and as necessary, when the connection is 1840 * established. This field is directly accessible for reading, but 1841 * this helper must be used for updates in order to return an 1842 * error if an eBPF program tries to set a callback that is not 1843 * supported in the current kernel. 1844 * 1845 * *argval* is a flag array which can combine these flags: 1846 * 1847 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) 1848 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) 1849 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) 1850 * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) 1851 * 1852 * Therefore, this function can be used to clear a callback flag by 1853 * setting the appropriate bit to zero. e.g. to disable the RTO 1854 * callback: 1855 * 1856 * **bpf_sock_ops_cb_flags_set(bpf_sock,** 1857 * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** 1858 * 1859 * Here are some examples of where one could call such eBPF 1860 * program: 1861 * 1862 * * When RTO fires. 1863 * * When a packet is retransmitted. 1864 * * When the connection terminates. 1865 * * When a packet is sent. 1866 * * When a packet is received. 1867 * Return 1868 * Code **-EINVAL** if the socket is not a full TCP socket; 1869 * otherwise, a positive number containing the bits that could not 1870 * be set is returned (which comes down to 0 if all bits were set 1871 * as required). 1872 * 1873 * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) 1874 * Description 1875 * This helper is used in programs implementing policies at the 1876 * socket level. If the message *msg* is allowed to pass (i.e. if 1877 * the verdict eBPF program returns **SK_PASS**), redirect it to 1878 * the socket referenced by *map* (of type 1879 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1880 * egress interfaces can be used for redirection. The 1881 * **BPF_F_INGRESS** value in *flags* is used to make the 1882 * distinction (ingress path is selected if the flag is present, 1883 * egress path otherwise). This is the only flag supported for now. 1884 * Return 1885 * **SK_PASS** on success, or **SK_DROP** on error. 1886 * 1887 * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) 1888 * Description 1889 * For socket policies, apply the verdict of the eBPF program to 1890 * the next *bytes* (number of bytes) of message *msg*. 1891 * 1892 * For example, this helper can be used in the following cases: 1893 * 1894 * * A single **sendmsg**\ () or **sendfile**\ () system call 1895 * contains multiple logical messages that the eBPF program is 1896 * supposed to read and for which it should apply a verdict. 1897 * * An eBPF program only cares to read the first *bytes* of a 1898 * *msg*. If the message has a large payload, then setting up 1899 * and calling the eBPF program repeatedly for all bytes, even 1900 * though the verdict is already known, would create unnecessary 1901 * overhead. 1902 * 1903 * When called from within an eBPF program, the helper sets a 1904 * counter internal to the BPF infrastructure, that is used to 1905 * apply the last verdict to the next *bytes*. If *bytes* is 1906 * smaller than the current data being processed from a 1907 * **sendmsg**\ () or **sendfile**\ () system call, the first 1908 * *bytes* will be sent and the eBPF program will be re-run with 1909 * the pointer for start of data pointing to byte number *bytes* 1910 * **+ 1**. If *bytes* is larger than the current data being 1911 * processed, then the eBPF verdict will be applied to multiple 1912 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are 1913 * consumed. 1914 * 1915 * Note that if a socket closes with the internal counter holding 1916 * a non-zero value, this is not a problem because data is not 1917 * being buffered for *bytes* and is sent as it is received. 1918 * Return 1919 * 0 1920 * 1921 * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) 1922 * Description 1923 * For socket policies, prevent the execution of the verdict eBPF 1924 * program for message *msg* until *bytes* (byte number) have been 1925 * accumulated. 1926 * 1927 * This can be used when one needs a specific number of bytes 1928 * before a verdict can be assigned, even if the data spans 1929 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme 1930 * case would be a user calling **sendmsg**\ () repeatedly with 1931 * 1-byte long message segments. Obviously, this is bad for 1932 * performance, but it is still valid. If the eBPF program needs 1933 * *bytes* bytes to validate a header, this helper can be used to 1934 * prevent the eBPF program to be called again until *bytes* have 1935 * been accumulated. 1936 * Return 1937 * 0 1938 * 1939 * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) 1940 * Description 1941 * For socket policies, pull in non-linear data from user space 1942 * for *msg* and set pointers *msg*\ **->data** and *msg*\ 1943 * **->data_end** to *start* and *end* bytes offsets into *msg*, 1944 * respectively. 1945 * 1946 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 1947 * *msg* it can only parse data that the (**data**, **data_end**) 1948 * pointers have already consumed. For **sendmsg**\ () hooks this 1949 * is likely the first scatterlist element. But for calls relying 1950 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will 1951 * be the range (**0**, **0**) because the data is shared with 1952 * user space and by default the objective is to avoid allowing 1953 * user space to modify data while (or after) eBPF verdict is 1954 * being decided. This helper can be used to pull in data and to 1955 * set the start and end pointer to given values. Data will be 1956 * copied if necessary (i.e. if data was not linear and if start 1957 * and end pointers do not point to the same chunk). 1958 * 1959 * A call to this helper is susceptible to change the underlying 1960 * packet buffer. Therefore, at load time, all checks on pointers 1961 * previously done by the verifier are invalidated and must be 1962 * performed again, if the helper is used in combination with 1963 * direct packet access. 1964 * 1965 * All values for *flags* are reserved for future usage, and must 1966 * be left at zero. 1967 * Return 1968 * 0 on success, or a negative error in case of failure. 1969 * 1970 * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) 1971 * Description 1972 * Bind the socket associated to *ctx* to the address pointed by 1973 * *addr*, of length *addr_len*. This allows for making outgoing 1974 * connection from the desired IP address, which can be useful for 1975 * example when all processes inside a cgroup should use one 1976 * single IP address on a host that has multiple IP configured. 1977 * 1978 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The 1979 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or 1980 * **AF_INET6**). Looking for a free port to bind to can be 1981 * expensive, therefore binding to port is not permitted by the 1982 * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively) 1983 * must be set to zero. 1984 * Return 1985 * 0 on success, or a negative error in case of failure. 1986 * 1987 * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) 1988 * Description 1989 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is 1990 * only possible to shrink the packet as of this writing, 1991 * therefore *delta* must be a negative integer. 1992 * 1993 * A call to this helper is susceptible to change the underlying 1994 * packet buffer. Therefore, at load time, all checks on pointers 1995 * previously done by the verifier are invalidated and must be 1996 * performed again, if the helper is used in combination with 1997 * direct packet access. 1998 * Return 1999 * 0 on success, or a negative error in case of failure. 2000 *
2001 * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) 2002 * Description 2003 * Retrieve the XFRM state (IP transform framework, see also 2004 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. 2005 * 2006 * The retrieved value is stored in the **struct bpf_xfrm_state** 2007 * pointed by *xfrm_state* and of length *size*. 2008 * 2009 * All values for *flags* are reserved for future usage, and must 2010 * be left at zero. 2011 * 2012 * This helper is available only if the kernel was compiled with 2013 * **CONFIG_XFRM** configuration option. 2014 * Return 2015 * 0 on success, or a negative error in case of failure. 2016 * 2017 * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) 2018 * Description 2019 * Return a user or a kernel stack in bpf program provided buffer. 2020 * To achieve this, the helper needs *ctx*, which is a pointer 2021 * to the context on which the tracing program is executed. 2022 * To store the stacktrace, the bpf program provides *buf* with 2023 * a nonnegative *size*. 2024 * 2025 * The last argument, *flags*, holds the number of stack frames to 2026 * skip (from 0 to 255), masked with 2027 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 2028 * the following flags: 2029 * 2030 * **BPF_F_USER_STACK** 2031 * Collect a user space stack instead of a kernel stack. 2032 * **BPF_F_USER_BUILD_ID** 2033 * Collect buildid+offset instead of ips for user stack, 2034 * only valid if **BPF_F_USER_STACK** is also specified. 2035 * 2036 * **bpf_get_stack**\ () can collect up to 2037 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 2038 * to sufficient large buffer size. Note that 2039 * this limit can be controlled with the **sysctl** program, and 2040 * that it should be manually increased in order to profile long 2041 * user stacks (such as stacks for Java programs). To do so, use: 2042 * 2043 * :: 2044 * 2045 * # sysctl kernel.perf_event_max_stack=<new value> 2046 * Return 2047 * A non-negative value equal to or less than *size* on success, 2048 * or a negative error in case of failure. 2049 * 2050 * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) 2051 * Description 2052 * This helper is similar to **bpf_skb_load_bytes**\ () in that 2053 * it provides an easy way to load *len* bytes from *offset* 2054 * from the packet associated to *skb*, into the buffer pointed 2055 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that 2056 * a fifth argument *start_header* exists in order to select a 2057 * base offset to start from. *start_header* can be one of: 2058 * 2059 * **BPF_HDR_START_MAC** 2060 * Base offset to load data from is *skb*'s mac header. 2061 * **BPF_HDR_START_NET** 2062 * Base offset to load data from is *skb*'s network header. 2063 * 2064 * In general, "direct packet access" is the preferred method to 2065 * access packet data, however, this helper is in particular useful 2066 * in socket filters where *skb*\ **->data** does not always point 2067 * to the start of the mac header and where "direct packet access" 2068 * is not available. 2069 * Return 2070 * 0 on success, or a negative error in case of failure. 2071 * 2072 * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) 2073 * Description 2074 * Do FIB lookup in kernel tables using parameters in *params*. 2075 * If lookup is successful and result shows packet is to be 2076 * forwarded, the neighbor tables are searched for the nexthop. 2077 * If successful (ie., FIB lookup shows forwarding and nexthop 2078 * is resolved), the nexthop address is returned in ipv4_dst 2079 * or ipv6_dst based on family, smac is set to mac address of 2080 * egress device, dmac is set to nexthop mac address, rt_metric 2081 * is set to metric from route (IPv4/IPv6 only), and ifindex 2082 * is set to the device index of the nexthop from the FIB lookup. 2083 * 2084 * *plen* argument is the size of the passed in struct. 2085 * *flags* argument can be a combination of one or more of the 2086 * following values: 2087 * 2088 * **BPF_FIB_LOOKUP_DIRECT** 2089 * Do a direct table lookup vs full lookup using FIB 2090 * rules. 2091 * **BPF_FIB_LOOKUP_OUTPUT** 2092 * Perform lookup from an egress perspective (default is 2093 * ingress). 2094 * 2095 * *ctx* is either **struct xdp_md** for XDP programs or 2096 * **struct sk_buff** tc cls_act programs. 2097 * Return 2098 * * < 0 if any input argument is invalid 2099 * * 0 on success (packet is forwarded, nexthop neighbor exists) 2100 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the 2101 * packet is not forwarded or needs assist from full stack 2102 * 2103 * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 2104 * Description 2105 * Add an entry to, or update a sockhash *map* referencing sockets. 2106 * The *skops* is used as a new value for the entry associated to 2107 * *key*. *flags* is one of: 2108 * 2109 * **BPF_NOEXIST** 2110 * The entry for *key* must not exist in the map. 2111 * **BPF_EXIST** 2112 * The entry for *key* must already exist in the map. 2113 * **BPF_ANY** 2114 * No condition on the existence of the entry for *key*. 2115 * 2116 * If the *map* has eBPF programs (parser and verdict), those will 2117 * be inherited by the socket being added. If the socket is 2118 * already attached to eBPF programs, this results in an error. 2119 * Return 2120 * 0 on success, or a negative error in case of failure. 2121 * 2122 * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) 2123 * Description 2124 * This helper is used in programs implementing policies at the 2125 * socket level. If the message *msg* is allowed to pass (i.e. if 2126 * the verdict eBPF program returns **SK_PASS**), redirect it to 2127 * the socket referenced by *map* (of type 2128 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 2129 * egress interfaces can be used for redirection. The 2130 * **BPF_F_INGRESS** value in *flags* is used to make the 2131 * distinction (ingress path is selected if the flag is present, 2132 * egress path otherwise). This is the only flag supported for now. 2133 * Return 2134 * **SK_PASS** on success, or **SK_DROP** on error. 2135 * 2136 * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) 2137 * Description 2138 * This helper is used in programs implementing policies at the 2139 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. 2140 * if the verdeict eBPF program returns **SK_PASS**), redirect it 2141 * to the socket referenced by *map* (of type 2142 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 2143 * egress interfaces can be used for redirection. The 2144 * **BPF_F_INGRESS** value in *flags* is used to make the 2145 * distinction (ingress path is selected if the flag is present, 2146 * egress otherwise). This is the only flag supported for now. 2147 * Return 2148 * **SK_PASS** on success, or **SK_DROP** on error. 2149 * 2150 * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 2151 * Description 2152 * Encapsulate the packet associated to *skb* within a Layer 3 2153 * protocol header. This header is provided in the buffer at 2154 * address *hdr*, with *len* its size in bytes. *type* indicates 2155 * the protocol of the header and can be one of: 2156 * 2157 * **BPF_LWT_ENCAP_SEG6** 2158 * IPv6 encapsulation with Segment Routing Header 2159 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, 2160 * the IPv6 header is computed by the kernel. 2161 * **BPF_LWT_ENCAP_SEG6_INLINE** 2162 * Only works if *skb* contains an IPv6 packet. Insert a 2163 * Segment Routing Header (**struct ipv6_sr_hdr**) inside 2164 * the IPv6 header. 2165 * **BPF_LWT_ENCAP_IP** 2166 * IP encapsulation (GRE/GUE/IPIP/etc). The outer header 2167 * must be IPv4 or IPv6, followed by zero or more 2168 * additional headers, up to **LWT_BPF_MAX_HEADROOM** 2169 * total bytes in all prepended headers. Please note that 2170 * if **skb_is_gso**\ (*skb*) is true, no more than two 2171 * headers can be prepended, and the inner header, if 2172 * present, should be either GRE or UDP/GUE. 2173 * 2174 * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs 2175 * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can 2176 * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and 2177 * **BPF_PROG_TYPE_LWT_XMIT**. 2178 * 2179 * A call to this helper is susceptible to change the underlying 2180 * packet buffer. Therefore, at load time, all checks on pointers 2181 * previously done by the verifier are invalidated and must be 2182 * performed again, if the helper is used in combination with 2183 * direct packet access. 2184 * Return 2185 * 0 on success, or a negative error in case of failure. 2186 * 2187 * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) 2188 * Description 2189 * Store *len* bytes from address *from* into the packet 2190 * associated to *skb*, at *offset*. Only the flags, tag and TLVs 2191 * inside the outermost IPv6 Segment Routing Header can be 2192 * modified through this helper. 2193 * 2194 * A call to this helper is susceptible to change the underlying 2195 * packet buffer. Therefore, at load time, all checks on pointers 2196 * previously done by the verifier are invalidated and must be 2197 * performed again, if the helper is used in combination with 2198 * direct packet access. 2199 * Return 2200 * 0 on success, or a negative error in case of failure. 2201 * 2202 * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) 2203 * Description 2204 * Adjust the size allocated to TLVs in the outermost IPv6 2205 * Segment Routing Header contained in the packet associated to 2206 * *skb*, at position *offset* by *delta* bytes. Only offsets 2207 * after the segments are accepted. *delta* can be as well 2208 * positive (growing) as negative (shrinking). 2209 * 2210 * A call to this helper is susceptible to change the underlying 2211 * packet buffer. Therefore, at load time, all checks on pointers 2212 * previously done by the verifier are invalidated and must be 2213 * performed again, if the helper is used in combination with 2214 * direct packet access. 2215 * Return 2216 * 0 on success, or a negative error in case of failure. 2217 * 2218 * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) 2219 * Description 2220 * Apply an IPv6 Segment Routing action of type *action* to the 2221 * packet associated to *skb*. Each action takes a parameter 2222 * contained at address *param*, and of length *param_len* bytes. 2223 * *action* can be one of: 2224 * 2225 * **SEG6_LOCAL_ACTION_END_X** 2226 * End.X action: Endpoint with Layer-3 cross-connect. 2227 * Type of *param*: **struct in6_addr**. 2228 * **SEG6_LOCAL_ACTION_END_T** 2229 * End.T action: Endpoint with specific IPv6 table lookup. 2230 * Type of *param*: **int**. 2231 * **SEG6_LOCAL_ACTION_END_B6** 2232 * End.B6 action: Endpoint bound to an SRv6 policy. 2233 * Type of *param*: **struct ipv6_sr_hdr**. 2234 * **SEG6_LOCAL_ACTION_END_B6_ENCAP** 2235 * End.B6.Encap action: Endpoint bound to an SRv6 2236 * encapsulation policy. 2237 * Type of *param*: **struct ipv6_sr_hdr**. 2238 * 2239 * A call to this helper is susceptible to change the underlying 2240 * packet buffer. Therefore, at load time, all checks on pointers 2241 * previously done by the verifier are invalidated and must be 2242 * performed again, if the helper is used in combination with 2243 * direct packet access. 2244 * Return 2245 * 0 on success, or a negative error in case of failure. 2246 * 2247 * int bpf_rc_repeat(void *ctx) 2248 * Description 2249 * This helper is used in programs implementing IR decoding, to 2250 * report a successfully decoded repeat key message. This delays 2251 * the generation of a key up event for previously generated 2252 * key down event. 2253 * 2254 * Some IR protocols like NEC have a special IR message for 2255 * repeating last button, for when a button is held down. 2256 * 2257 * The *ctx* should point to the lirc sample as passed into 2258 * the program. 2259 * 2260 * This helper is only available is the kernel was compiled with 2261 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2262 * "**y**". 2263 * Return 2264 * 0 2265 * 2266 * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) 2267 * Description 2268 * This helper is used in programs implementing IR decoding, to 2269 * report a successfully decoded key press with *scancode*, 2270 * *toggle* value in the given *protocol*. The scancode will be 2271 * translated to a keycode using the rc keymap, and reported as 2272 * an input key down event. After a period a key up event is 2273 * generated. This period can be extended by calling either 2274 * **bpf_rc_keydown**\ () again with the same values, or calling 2275 * **bpf_rc_repeat**\ (). 2276 * 2277 * Some protocols include a toggle bit, in case the button was 2278 * released and pressed again between consecutive scancodes. 2279 * 2280 * The *ctx* should point to the lirc sample as passed into 2281 * the program. 2282 * 2283 * The *protocol* is the decoded protocol number (see 2284 * **enum rc_proto** for some predefined values). 2285 * 2286 * This helper is only available is the kernel was compiled with 2287 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2288 * "**y**". 2289 * Return 2290 * 0 2291 * 2292 * u64 bpf_skb_cgroup_id(struct sk_buff *skb) 2293 * Description 2294 * Return the cgroup v2 id of the socket associated with the *skb*. 2295 * This is roughly similar to the **bpf_get_cgroup_classid**\ () 2296 * helper for cgroup v1 by providing a tag resp. identifier that 2297 * can be matched on or used for map lookups e.g. to implement 2298 * policy. The cgroup v2 id of a given path in the hierarchy is 2299 * exposed in user space through the f_handle API in order to get 2300 * to the same 64-bit id. 2301 * 2302 * This helper can be used on TC egress path, but not on ingress, 2303 * and is available only if the kernel was compiled with the 2304 * **CONFIG_SOCK_CGROUP_DATA** configuration option. 2305 * Return 2306 * The id is returned or 0 in case the id could not be retrieved. 2307 * 2308 * u64 bpf_get_current_cgroup_id(void) 2309 * Return 2310 * A 64-bit integer containing the current cgroup id based 2311 * on the cgroup within which the current task is running. 2312 * 2313 * void *bpf_get_local_storage(void *map, u64 flags) 2314 * Description 2315 * Get the pointer to the local storage area. 2316 * The type and the size of the local storage is defined 2317 * by the *map* argument. 2318 * The *flags* meaning is specific for each map type, 2319 * and has to be 0 for cgroup local storage. 2320 * 2321 * Depending on the BPF program type, a local storage area 2322 * can be shared between multiple instances of the BPF program, 2323 * running simultaneously. 2324 * 2325 * A user should care about the synchronization by himself. 2326 * For example, by using the **BPF_STX_XADD** instruction to alter 2327 * the shared data. 2328 * Return 2329 * A pointer to the local storage area. 2330 * 2331 * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) 2332 * Description 2333 * Select a **SO_REUSEPORT** socket from a 2334 * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. 2335 * It checks the selected socket is matching the incoming 2336 * request in the socket buffer. 2337 * Return 2338 * 0 on success, or a negative error in case of failure. 2339 * 2340 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) 2341 * Description 2342 * Return id of cgroup v2 that is ancestor of cgroup associated 2343 * with the *skb* at the *ancestor_level*. The root cgroup is at 2344 * *ancestor_level* zero and each step down the hierarchy 2345 * increments the level. If *ancestor_level* == level of cgroup 2346 * associated with *skb*, then return value will be same as that 2347 * of **bpf_skb_cgroup_id**\ (). 2348 * 2349 * The helper is useful to implement policies based on cgroups 2350 * that are upper in hierarchy than immediate cgroup associated 2351 * with *skb*. 2352 * 2353 * The format of returned id and helper limitations are same as in 2354 * **bpf_skb_cgroup_id**\ (). 2355 * Return 2356 * The id is returned or 0 in case the id could not be retrieved. 2357 * 2358 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2359 * Description 2360 * Look for TCP socket matching *tuple*, optionally in a child 2361 * network namespace *netns*. The return value must be checked, 2362 * and if non-**NULL**, released via **bpf_sk_release**\ (). 2363 * 2364 * The *ctx* should point to the context of the program, such as 2365 * the skb or socket (depending on the hook in use). This is used 2366 * to determine the base network namespace for the lookup. 2367 * 2368 * *tuple_size* must be one of: 2369 * 2370 * **sizeof**\ (*tuple*\ **->ipv4**) 2371 * Look for an IPv4 socket. 2372 * **sizeof**\ (*tuple*\ **->ipv6**) 2373 * Look for an IPv6 socket. 2374 * 2375 * If the *netns* is a negative signed 32-bit integer, then the 2376 * socket lookup table in the netns associated with the *ctx* will 2377 * will be used. For the TC hooks, this is the netns of the device 2378 * in the skb. For socket hooks, this is the netns of the socket. 2379 * If *netns* is any other signed 32-bit value greater than or 2380 * equal to zero then it specifies the ID of the netns relative to 2381 * the netns associated with the *ctx*. *netns* values beyond the 2382 * range of 32-bit integers are reserved for future use. 2383 * 2384 * All values for *flags* are reserved for future usage, and must 2385 * be left at zero. 2386 * 2387 * This helper is available only if the kernel was compiled with 2388 * **CONFIG_NET** configuration option. 2389 * Return 2390 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2391 * For sockets with reuseport option, the **struct bpf_sock** 2392 * result is from *reuse*\ **->socks**\ [] using the hash of the 2393 * tuple. 2394 * 2395 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2396 * Description 2397 * Look for UDP socket matching *tuple*, optionally in a child 2398 * network namespace *netns*. The return value must be checked, 2399 * and if non-**NULL**, released via **bpf_sk_release**\ (). 2400 * 2401 * The *ctx* should point to the context of the program, such as 2402 * the skb or socket (depending on the hook in use). This is used 2403 * to determine the base network namespace for the lookup. 2404 * 2405 * *tuple_size* must be one of: 2406 * 2407 * **sizeof**\ (*tuple*\ **->ipv4**) 2408 * Look for an IPv4 socket. 2409 * **sizeof**\ (*tuple*\ **->ipv6**) 2410 * Look for an IPv6 socket. 2411 * 2412 * If the *netns* is a negative signed 32-bit integer, then the 2413 * socket lookup table in the netns associated with the *ctx* will 2414 * will be used. For the TC hooks, this is the netns of the device 2415 * in the skb. For socket hooks, this is the netns of the socket. 2416 * If *netns* is any other signed 32-bit value greater than or 2417 * equal to zero then it specifies the ID of the netns relative to 2418 * the netns associated with the *ctx*. *netns* values beyond the 2419 * range of 32-bit integers are reserved for future use. 2420 * 2421 * All values for *flags* are reserved for future usage, and must 2422 * be left at zero. 2423 * 2424 * This helper is available only if the kernel was compiled with 2425 * **CONFIG_NET** configuration option. 2426 * Return 2427 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2428 * For sockets with reuseport option, the **struct bpf_sock** 2429 * result is from *reuse*\ **->socks**\ [] using the hash of the 2430 * tuple. 2431 * 2432 * int bpf_sk_release(struct bpf_sock *sock) 2433 * Description 2434 * Release the reference held by *sock*. *sock* must be a 2435 * non-**NULL** pointer that was returned from 2436 * **bpf_sk_lookup_xxx**\ (). 2437 * Return 2438 * 0 on success, or a negative error in case of failure. 2439 * 2440 * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) 2441 * Description 2442 * Push an element *value* in *map*. *flags* is one of: 2443 * 2444 * **BPF_EXIST** 2445 * If the queue/stack is full, the oldest element is 2446 * removed to make room for this. 2447 * Return 2448 * 0 on success, or a negative error in case of failure. 2449 * 2450 * int bpf_map_pop_elem(struct bpf_map *map, void *value) 2451 * Description 2452 * Pop an element from *map*. 2453 * Return 2454 * 0 on success, or a negative error in case of failure. 2455 * 2456 * int bpf_map_peek_elem(struct bpf_map *map, void *value) 2457 * Description 2458 * Get an element from *map* without removing it. 2459 * Return 2460 * 0 on success, or a negative error in case of failure. 2461 * 2462 * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 2463 * Description 2464 * For socket policies, insert *len* bytes into *msg* at offset 2465 * *start*. 2466 * 2467 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 2468 * *msg* it may want to insert metadata or options into the *msg*. 2469 * This can later be read and used by any of the lower layer BPF 2470 * hooks. 2471 * 2472 * This helper may fail if under memory pressure (a malloc 2473 * fails) in these cases BPF programs will get an appropriate 2474 * error and BPF programs will need to handle them. 2475 * Return 2476 * 0 on success, or a negative error in case of failure. 2477 * 2478 * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 2479 * Description 2480 * Will remove *len* bytes from a *msg* starting at byte *start*. 2481 * This may result in **ENOMEM** errors under certain situations if 2482 * an allocation and copy are required due to a full ring buffer. 2483 * However, the helper will try to avoid doing the allocation 2484 * if possible. Other errors can occur if input parameters are 2485 * invalid either due to *start* byte not being valid part of *msg* 2486 * payload and/or *pop* value being to large. 2487 * Return 2488 * 0 on success, or a negative error in case of failure. 2489 * 2490 * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) 2491 * Description 2492 * This helper is used in programs implementing IR decoding, to 2493 * report a successfully decoded pointer movement. 2494 * 2495 * The *ctx* should point to the lirc sample as passed into 2496 * the program. 2497 * 2498 * This helper is only available is the kernel was compiled with 2499 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2500 * "**y**". 2501 * Return 2502 * 0 2503 * 2504 * int bpf_spin_lock(struct bpf_spin_lock *lock) 2505 * Description 2506 * Acquire a spinlock represented by the pointer *lock*, which is 2507 * stored as part of a value of a map. Taking the lock allows to 2508 * safely update the rest of the fields in that value. The 2509 * spinlock can (and must) later be released with a call to 2510 * **bpf_spin_unlock**\ (\ *lock*\ ). 2511 * 2512 * Spinlocks in BPF programs come with a number of restrictions 2513 * and constraints: 2514 * 2515 * * **bpf_spin_lock** objects are only allowed inside maps of 2516 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this 2517 * list could be extended in the future). 2518 * * BTF description of the map is mandatory. 2519 * * The BPF program can take ONE lock at a time, since taking two 2520 * or more could cause dead locks. 2521 * * Only one **struct bpf_spin_lock** is allowed per map element. 2522 * * When the lock is taken, calls (either BPF to BPF or helpers) 2523 * are not allowed. 2524 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not 2525 * allowed inside a spinlock-ed region. 2526 * * The BPF program MUST call **bpf_spin_unlock**\ () to release 2527 * the lock, on all execution paths, before it returns. 2528 * * The BPF program can access **struct bpf_spin_lock** only via 2529 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () 2530 * helpers. Loading or storing data into the **struct 2531 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. 2532 * * To use the **bpf_spin_lock**\ () helper, the BTF description 2533 * of the map value must be a struct and have **struct 2534 * bpf_spin_lock** *anyname*\ **;** field at the top level. 2535 * Nested lock inside another struct is not allowed. 2536 * * The **struct bpf_spin_lock** *lock* field in a map value must 2537 * be aligned on a multiple of 4 bytes in that value. 2538 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy 2539 * the **bpf_spin_lock** field to user space. 2540 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from 2541 * a BPF program, do not update the **bpf_spin_lock** field. 2542 * * **bpf_spin_lock** cannot be on the stack or inside a 2543 * networking packet (it can only be inside of a map values). 2544 * * **bpf_spin_lock** is available to root only. 2545 * * Tracing programs and socket filter programs cannot use 2546 * **bpf_spin_lock**\ () due to insufficient preemption checks 2547 * (but this may change in the future). 2548 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. 2549 * Return 2550 * 0 2551 * 2552 * int bpf_spin_unlock(struct bpf_spin_lock *lock) 2553 * Description 2554 * Release the *lock* previously locked by a call to 2555 * **bpf_spin_lock**\ (\ *lock*\ ). 2556 * Return 2557 * 0 2558 * 2559 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) 2560 * Description 2561 * This helper gets a **struct bpf_sock** pointer such 2562 * that all the fields in this **bpf_sock** can be accessed. 2563 * Return 2564 * A **struct bpf_sock** pointer on success, or **NULL** in 2565 * case of failure. 2566 * 2567 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) 2568 * Description 2569 * This helper gets a **struct bpf_tcp_sock** pointer from a 2570 * **struct bpf_sock** pointer. 2571 * Return 2572 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in 2573 * case of failure. 2574 * 2575 * int bpf_skb_ecn_set_ce(struct sk_buff *skb) 2576 * Description 2577 * Set ECN (Explicit Congestion Notification) field of IP header 2578 * to **CE** (Congestion Encountered) if current value is **ECT** 2579 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 2580 * and IPv4. 2581 * Return 2582 * 1 if the **CE** flag is set (either by the current helper call 2583 * or because it was already present), 0 if it is not set. 2584 * 2585 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) 2586 * Description 2587 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. 2588 * **bpf_sk_release**\ () is unnecessary and not allowed. 2589 * Return 2590 * A **struct bpf_sock** pointer on success, or **NULL** in 2591 * case of failure. 2592 * 2593 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2594 * Description 2595 * Look for TCP socket matching *tuple*, optionally in a child 2596 * network namespace *netns*. The return value must be checked, 2597 * and if non-**NULL**, released via **bpf_sk_release**\ (). 2598 * 2599 * This function is identical to **bpf_sk_lookup_tcp**\ (), except 2600 * that it also returns timewait or request sockets. Use 2601 * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the 2602 * full structure. 2603 * 2604 * This helper is available only if the kernel was compiled with 2605 * **CONFIG_NET** configuration option. 2606 * Return 2607 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2608 * For sockets with reuseport option, the **struct bpf_sock** 2609 * result is from *reuse*\ **->socks**\ [] using the hash of the 2610 * tuple. 2611 * 2612 * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 2613 * Description 2614 * Check whether *iph* and *th* contain a valid SYN cookie ACK for 2615 * the listening socket in *sk*. 2616 * 2617 * *iph* points to the start of the IPv4 or IPv6 header, while 2618 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2619 * **sizeof**\ (**struct ip6hdr**). 2620 * 2621 * *th* points to the start of the TCP header, while *th_len* 2622 * contains **sizeof**\ (**struct tcphdr**). 2623 * 2624 * Return 2625 * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative 2626 * error otherwise. 2627 * 2628 * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) 2629 * Description 2630 * Get name of sysctl in /proc/sys/ and copy it into provided by 2631 * program buffer *buf* of size *buf_len*. 2632 * 2633 * The buffer is always NUL terminated, unless it's zero-sized. 2634 * 2635 * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is 2636 * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name 2637 * only (e.g. "tcp_mem"). 2638 * Return 2639 * Number of character copied (not including the trailing NUL). 2640 * 2641 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2642 * truncated name in this case). 2643 * 2644 * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 2645 * Description 2646 * Get current value of sysctl as it is presented in /proc/sys 2647 * (incl. newline, etc), and copy it as a string into provided 2648 * by program buffer *buf* of size *buf_len*. 2649 * 2650 * The whole value is copied, no matter what file position user 2651 * space issued e.g. sys_read at. 2652 * 2653 * The buffer is always NUL terminated, unless it's zero-sized. 2654 * Return 2655 * Number of character copied (not including the trailing NUL). 2656 * 2657 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2658 * truncated name in this case). 2659 * 2660 * **-EINVAL** if current value was unavailable, e.g. because 2661 * sysctl is uninitialized and read returns -EIO for it. 2662 * 2663 * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 2664 * Description 2665 * Get new value being written by user space to sysctl (before 2666 * the actual write happens) and copy it as a string into 2667 * provided by program buffer *buf* of size *buf_len*. 2668 * 2669 * User space may write new value at file position > 0. 2670 * 2671 * The buffer is always NUL terminated, unless it's zero-sized. 2672 * Return 2673 * Number of character copied (not including the trailing NUL). 2674 * 2675 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2676 * truncated name in this case). 2677 * 2678 * **-EINVAL** if sysctl is being read. 2679 * 2680 * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) 2681 * Description 2682 * Override new value being written by user space to sysctl with 2683 * value provided by program in buffer *buf* of size *buf_len*. 2684 * 2685 * *buf* should contain a string in same form as provided by user 2686 * space on sysctl write. 2687 * 2688 * User space may write new value at file position > 0. To override 2689 * the whole sysctl value file position should be set to zero. 2690 * Return 2691 * 0 on success. 2692 * 2693 * **-E2BIG** if the *buf_len* is too big. 2694 * 2695 * **-EINVAL** if sysctl is being read. 2696 * 2697 * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) 2698 * Description 2699 * Convert the initial part of the string from buffer *buf* of 2700 * size *buf_len* to a long integer according to the given base 2701 * and save the result in *res*. 2702 * 2703 * The string may begin with an arbitrary amount of white space 2704 * (as determined by **isspace**\ (3)) followed by a single 2705 * optional '**-**' sign. 2706 * 2707 * Five least significant bits of *flags* encode base, other bits 2708 * are currently unused. 2709 * 2710 * Base must be either 8, 10, 16 or 0 to detect it automatically 2711 * similar to user space **strtol**\ (3). 2712 * Return 2713 * Number of characters consumed on success. Must be positive but 2714 * no more than *buf_len*. 2715 * 2716 * **-EINVAL** if no valid digits were found or unsupported base 2717 * was provided. 2718 * 2719 * **-ERANGE** if resulting value was out of range. 2720 * 2721 * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) 2722 * Description 2723 * Convert the initial part of the string from buffer *buf* of 2724 * size *buf_len* to an unsigned long integer according to the 2725 * given base and save the result in *res*. 2726 * 2727 * The string may begin with an arbitrary amount of white space 2728 * (as determined by **isspace**\ (3)). 2729 * 2730 * Five least significant bits of *flags* encode base, other bits 2731 * are currently unused. 2732 * 2733 * Base must be either 8, 10, 16 or 0 to detect it automatically 2734 * similar to user space **strtoul**\ (3). 2735 * Return 2736 * Number of characters consumed on success. Must be positive but 2737 * no more than *buf_len*. 2738 * 2739 * **-EINVAL** if no valid digits were found or unsupported base 2740 * was provided. 2741 * 2742 * **-ERANGE** if resulting value was out of range. 2743 * 2744 * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) 2745 * Description 2746 * Get a bpf-local-storage from a *sk*. 2747 * 2748 * Logically, it could be thought of getting the value from 2749 * a *map* with *sk* as the **key**. From this 2750 * perspective, the usage is not much different from 2751 * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this 2752 * helper enforces the key must be a full socket and the map must 2753 * be a **BPF_MAP_TYPE_SK_STORAGE** also. 2754 * 2755 * Underneath, the value is stored locally at *sk* instead of 2756 * the *map*. The *map* is used as the bpf-local-storage 2757 * "type". The bpf-local-storage "type" (i.e. the *map*) is 2758 * searched against all bpf-local-storages residing at *sk*. 2759 * 2760 * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be 2761 * used such that a new bpf-local-storage will be 2762 * created if one does not exist. *value* can be used 2763 * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify 2764 * the initial value of a bpf-local-storage. If *value* is 2765 * **NULL**, the new bpf-local-storage will be zero initialized. 2766 * Return 2767 * A bpf-local-storage pointer is returned on success. 2768 * 2769 * **NULL** if not found or there was an error in adding 2770 * a new bpf-local-storage. 2771 * 2772 * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) 2773 * Description 2774 * Delete a bpf-local-storage from a *sk*. 2775 * Return 2776 * 0 on success. 2777 * 2778 * **-ENOENT** if the bpf-local-storage cannot be found. 2779 * 2780 * int bpf_send_signal(u32 sig) 2781 * Description 2782 * Send signal *sig* to the process of the current task. 2783 * The signal may be delivered to any of this process's threads. 2784 * Return 2785 * 0 on success or successfully queued. 2786 * 2787 * **-EBUSY** if work queue under nmi is full. 2788 * 2789 * **-EINVAL** if *sig* is invalid. 2790 * 2791 * **-EPERM** if no permission to send the *sig*. 2792 * 2793 * **-EAGAIN** if bpf program can try again. 2794 * 2795 * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 2796 * Description 2797 * Try to issue a SYN cookie for the packet with corresponding 2798 * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. 2799 * 2800 * *iph* points to the start of the IPv4 or IPv6 header, while 2801 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2802 * **sizeof**\ (**struct ip6hdr**). 2803 * 2804 * *th* points to the start of the TCP header, while *th_len* 2805 * contains the length of the TCP header. 2806 * 2807 * Return 2808 * On success, lower 32 bits hold the generated SYN cookie in 2809 * followed by 16 bits which hold the MSS value for that cookie, 2810 * and the top 16 bits are unused. 2811 * 2812 * On failure, the returned value is one of the following: 2813 * 2814 * **-EINVAL** SYN cookie cannot be issued due to error 2815 * 2816 * **-ENOENT** SYN cookie should not be issued (no SYN flood) 2817 * 2818 * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies 2819 * 2820 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 2821 * 2822 * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 2823 * Description 2824 * Write raw *data* blob into a special BPF perf event held by 2825 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 2826 * event must have the following attributes: **PERF_SAMPLE_RAW** 2827 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 2828 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 2829 * 2830 * The *flags* are used to indicate the index in *map* for which 2831 * the value must be put, masked with **BPF_F_INDEX_MASK**. 2832 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 2833 * to indicate that the index of the current CPU core should be 2834 * used. 2835 * 2836 * The value to write, of *size*, is passed through eBPF stack and 2837 * pointed by *data*. 2838 * 2839 * *ctx* is a pointer to in-kernel struct sk_buff. 2840 * 2841 * This helper is similar to **bpf_perf_event_output**\ () but 2842 * restricted to raw_tracepoint bpf programs. 2843 * Return 2844 * 0 on success, or a negative error in case of failure. 2845 * 2846 * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) 2847 * Description 2848 * Safely attempt to read *size* bytes from user space address 2849 * *unsafe_ptr* and store the data in *dst*. 2850 * Return 2851 * 0 on success, or a negative error in case of failure. 2852 * 2853 * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 2854 * Description 2855 * Safely attempt to read *size* bytes from kernel space address 2856 * *unsafe_ptr* and store the data in *dst*. 2857 * Return 2858 * 0 on success, or a negative error in case of failure. 2859 * 2860 * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) 2861 * Description 2862 * Copy a NUL terminated string from an unsafe user address 2863 * *unsafe_ptr* to *dst*. The *size* should include the 2864 * terminating NUL byte. In case the string length is smaller than 2865 * *size*, the target is not padded with further NUL bytes. If the 2866 * string length is larger than *size*, just *size*-1 bytes are 2867 * copied and the last byte is set to NUL. 2868 * 2869 * On success, the length of the copied string is returned. This 2870 * makes this helper useful in tracing programs for reading 2871 * strings, and more importantly to get its length at runtime. See 2872 * the following snippet: 2873 * 2874 * :: 2875 * 2876 * SEC("kprobe/sys_open") 2877 * void bpf_sys_open(struct pt_regs *ctx) 2878 * { 2879 * char buf[PATHLEN]; // PATHLEN is defined to 256 2880 * int res = bpf_probe_read_user_str(buf, sizeof(buf), 2881 * ctx->di); 2882 * 2883 * // Consume buf, for example push it to 2884 * // userspace via bpf_perf_event_output(); we 2885 * // can use res (the string length) as event 2886 * // size, after checking its boundaries. 2887 * } 2888 * 2889 * In comparison, using **bpf_probe_read_user()** helper here 2890 * instead to read the string would require to estimate the length 2891 * at compile time, and would often result in copying more memory 2892 * than necessary. 2893 * 2894 * Another useful use case is when parsing individual process 2895 * arguments or individual environment variables navigating 2896 * *current*\ **->mm->arg_start** and *current*\ 2897 * **->mm->env_start**: using this helper and the return value, 2898 * one can quickly iterate at the right offset of the memory area. 2899 * Return 2900 * On success, the strictly positive length of the string, 2901 * including the trailing NUL character. On error, a negative 2902 * value. 2903 * 2904 * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) 2905 * Description 2906 * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* 2907 * to *dst*. Same semantics as with bpf_probe_read_user_str() apply. 2908 * Return 2909 * On success, the strictly positive length of the string, including 2910 * the trailing NUL character. On error, a negative value. 2911 * 2912 * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt) 2913 * Description 2914 * Send out a tcp-ack. *tp* is the in-kernel struct tcp_sock. 2915 * *rcv_nxt* is the ack_seq to be sent out. 2916 * Return 2917 * 0 on success, or a negative error in case of failure. 2918 * 2919 * int bpf_send_signal_thread(u32 sig) 2920 * Description 2921 * Send signal *sig* to the thread corresponding to the current task. 2922 * Return 2923 * 0 on success or successfully queued. 2924 * 2925 * **-EBUSY** if work queue under nmi is full. 2926 * 2927 * **-EINVAL** if *sig* is invalid. 2928 * 2929 * **-EPERM** if no permission to send the *sig*. 2930 * 2931 * **-EAGAIN** if bpf program can try again. 2932 * 2933 * u64 bpf_jiffies64(void) 2934 * Description 2935 * Obtain the 64bit jiffies 2936 * Return 2937 * The 64 bit jiffies 2938 * 2939 * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) 2940 * Description 2941 * For an eBPF program attached to a perf event, retrieve the 2942 * branch records (struct perf_branch_entry) associated to *ctx* 2943 * and store it in the buffer pointed by *buf* up to size 2944 * *size* bytes. 2945 * Return 2946 * On success, number of bytes written to *buf*. On error, a 2947 * negative value. 2948 * 2949 * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to 2950 * instead return the number of bytes required to store all the 2951 * branch entries. If this flag is set, *buf* may be NULL. 2952 * 2953 * **-EINVAL** if arguments invalid or **size** not a multiple 2954 * of sizeof(struct perf_branch_entry). 2955 * 2956 * **-ENOENT** if architecture does not support branch records. 2957 * 2958 * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) 2959 * Description 2960 * Returns 0 on success, values for *pid* and *tgid* as seen from the current 2961 * *namespace* will be returned in *nsdata*. 2962 * 2963 * On failure, the returned value is one of the following: 2964 * 2965 * **-EINVAL** if dev and inum supplied don't match dev_t and inode number 2966 * with nsfs of current task, or if dev conversion to dev_t lost high bits. 2967 * 2968 * **-ENOENT** if pidns does not exists for the current task. 2969 * 2970 * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 2971 * Description 2972 * Write raw *data* blob into a special BPF perf event held by 2973 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 2974 * event must have the following attributes: **PERF_SAMPLE_RAW** 2975 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 2976 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 2977 * 2978 * The *flags* are used to indicate the index in *map* for which 2979 * the value must be put, masked with **BPF_F_INDEX_MASK**. 2980 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 2981 * to indicate that the index of the current CPU core should be 2982 * used. 2983 * 2984 * The value to write, of *size*, is passed through eBPF stack and 2985 * pointed by *data*. 2986 * 2987 * *ctx* is a pointer to in-kernel struct xdp_buff. 2988 * 2989 * This helper is similar to **bpf_perf_eventoutput**\ () but 2990 * restricted to raw_tracepoint bpf programs. 2991 * Return 2992 * 0 on success, or a negative error in case of failure. 2993 * 2994 * u64 bpf_get_netns_cookie(void *ctx) 2995 * Description 2996 * Retrieve the cookie (generated by the kernel) of the network 2997 * namespace the input *ctx* is associated with. The network 2998 * namespace cookie remains stable for its lifetime and provides 2999 * a global identifier that can be assumed unique. If *ctx* is 3000 * NULL, then the helper returns the cookie for the initial
3001 * network namespace. The cookie itself is very similar to that 3002 * of bpf_get_socket_cookie() helper, but for network namespaces 3003 * instead of sockets. 3004 * Return 3005 * A 8-byte long opaque number. 3006 * 3007 * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) 3008 * Description 3009 * Return id of cgroup v2 that is ancestor of the cgroup associated 3010 * with the current task at the *ancestor_level*. The root cgroup 3011 * is at *ancestor_level* zero and each step down the hierarchy 3012 * increments the level. If *ancestor_level* == level of cgroup 3013 * associated with the current task, then return value will be the 3014 * same as that of **bpf_get_current_cgroup_id**\ (). 3015 * 3016 * The helper is useful to implement policies based on cgroups 3017 * that are upper in hierarchy than immediate cgroup associated 3018 * with the current task. 3019 * 3020 * The format of returned id and helper limitations are same as in 3021 * **bpf_get_current_cgroup_id**\ (). 3022 * Return 3023 * The id is returned or 0 in case the id could not be retrieved. 3024 * 3025 * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) 3026 * Description 3027 * Assign the *sk* to the *skb*. When combined with appropriate 3028 * routing configuration to receive the packet towards the socket, 3029 * will cause *skb* to be delivered to the specified socket. 3030 * Subsequent redirection of *skb* via **bpf_redirect**\ (), 3031 * **bpf_clone_redirect**\ () or other methods outside of BPF may 3032 * interfere with successful delivery to the socket. 3033 * 3034 * This operation is only valid from TC ingress path. 3035 * 3036 * The *flags* argument must be zero. 3037 * Return 3038 * 0 on success, or a negative errno in case of failure. 3039 * 3040 * * **-EINVAL** Unsupported flags specified. 3041 * * **-ENOENT** Socket is unavailable for assignment. 3042 * * **-ENETUNREACH** Socket is unreachable (wrong netns). 3043 * * **-EOPNOTSUPP** Unsupported operation, for example a 3044 * call from outside of TC ingress. 3045 * * **-ESOCKTNOSUPPORT** Socket type not supported (reuseport). 3046 */ 3047#define __BPF_FUNC_MAPPER(FN) \ 3048 FN(unspec), \ 3049 FN(map_lookup_elem), \ 3050 FN(map_update_elem), \ 3051 FN(map_delete_elem), \ 3052 FN(probe_read), \ 3053 FN(ktime_get_ns), \ 3054 FN(trace_printk), \ 3055 FN(get_prandom_u32), \ 3056 FN(get_smp_processor_id), \ 3057 FN(skb_store_bytes), \ 3058 FN(l3_csum_replace), \ 3059 FN(l4_csum_replace), \ 3060 FN(tail_call), \ 3061 FN(clone_redirect), \ 3062 FN(get_current_pid_tgid), \ 3063 FN(get_current_uid_gid), \ 3064 FN(get_current_comm), \ 3065 FN(get_cgroup_classid), \ 3066 FN(skb_vlan_push), \ 3067 FN(skb_vlan_pop), \ 3068 FN(skb_get_tunnel_key), \ 3069 FN(skb_set_tunnel_key), \ 3070 FN(perf_event_read), \ 3071 FN(redirect), \ 3072 FN(get_route_realm), \ 3073 FN(perf_event_output), \ 3074 FN(skb_load_bytes), \ 3075 FN(get_stackid), \ 3076 FN(csum_diff), \ 3077 FN(skb_get_tunnel_opt), \ 3078 FN(skb_set_tunnel_opt), \ 3079 FN(skb_change_proto), \ 3080 FN(skb_change_type), \ 3081 FN(skb_under_cgroup), \ 3082 FN(get_hash_recalc), \ 3083 FN(get_current_task), \ 3084 FN(probe_write_user), \ 3085 FN(current_task_under_cgroup), \ 3086 FN(skb_change_tail), \ 3087 FN(skb_pull_data), \ 3088 FN(csum_update), \ 3089 FN(set_hash_invalid), \ 3090 FN(get_numa_node_id), \ 3091 FN(skb_change_head), \ 3092 FN(xdp_adjust_head), \ 3093 FN(probe_read_str), \ 3094 FN(get_socket_cookie), \ 3095 FN(get_socket_uid), \ 3096 FN(set_hash), \ 3097 FN(setsockopt), \ 3098 FN(skb_adjust_room), \ 3099 FN(redirect_map), \ 3100 FN(sk_redirect_map), \ 3101 FN(sock_map_update), \ 3102 FN(xdp_adjust_meta), \ 3103 FN(perf_event_read_value), \ 3104 FN(perf_prog_read_value), \ 3105 FN(getsockopt), \ 3106 FN(override_return), \ 3107 FN(sock_ops_cb_flags_set), \ 3108 FN(msg_redirect_map), \ 3109 FN(msg_apply_bytes), \ 3110 FN(msg_cork_bytes), \ 3111 FN(msg_pull_data), \ 3112 FN(bind), \ 3113 FN(xdp_adjust_tail), \ 3114 FN(skb_get_xfrm_state), \ 3115 FN(get_stack), \ 3116 FN(skb_load_bytes_relative), \ 3117 FN(fib_lookup), \ 3118 FN(sock_hash_update), \ 3119 FN(msg_redirect_hash), \ 3120 FN(sk_redirect_hash), \ 3121 FN(lwt_push_encap), \ 3122 FN(lwt_seg6_store_bytes), \ 3123 FN(lwt_seg6_adjust_srh), \ 3124 FN(lwt_seg6_action), \ 3125 FN(rc_repeat), \ 3126 FN(rc_keydown), \ 3127 FN(skb_cgroup_id), \ 3128 FN(get_current_cgroup_id), \ 3129 FN(get_local_storage), \ 3130 FN(sk_select_reuseport), \ 3131 FN(skb_ancestor_cgroup_id), \ 3132 FN(sk_lookup_tcp), \ 3133 FN(sk_lookup_udp), \ 3134 FN(sk_release), \ 3135 FN(map_push_elem), \ 3136 FN(map_pop_elem), \ 3137 FN(map_peek_elem), \ 3138 FN(msg_push_data), \ 3139 FN(msg_pop_data), \ 3140 FN(rc_pointer_rel), \ 3141 FN(spin_lock), \ 3142 FN(spin_unlock), \ 3143 FN(sk_fullsock), \ 3144 FN(tcp_sock), \ 3145 FN(skb_ecn_set_ce), \ 3146 FN(get_listener_sock), \ 3147 FN(skc_lookup_tcp), \ 3148 FN(tcp_check_syncookie), \ 3149 FN(sysctl_get_name), \ 3150 FN(sysctl_get_current_value), \ 3151 FN(sysctl_get_new_value), \ 3152 FN(sysctl_set_new_value), \ 3153 FN(strtol), \ 3154 FN(strtoul), \ 3155 FN(sk_storage_get), \ 3156 FN(sk_storage_delete), \ 3157 FN(send_signal), \ 3158 FN(tcp_gen_syncookie), \ 3159 FN(skb_output), \ 3160 FN(probe_read_user), \ 3161 FN(probe_read_kernel), \ 3162 FN(probe_read_user_str), \ 3163 FN(probe_read_kernel_str), \ 3164 FN(tcp_send_ack), \ 3165 FN(send_signal_thread), \ 3166 FN(jiffies64), \ 3167 FN(read_branch_records), \ 3168 FN(get_ns_current_pid_tgid), \ 3169 FN(xdp_output), \ 3170 FN(get_netns_cookie), \ 3171 FN(get_current_ancestor_cgroup_id), \ 3172 FN(sk_assign), 3173 3174/* integer value in 'imm' field of BPF_CALL instruction selects which helper 3175 * function eBPF program intends to call 3176 */ 3177#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 3178enum bpf_func_id { 3179 __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 3180 __BPF_FUNC_MAX_ID, 3181}; 3182#undef __BPF_ENUM_FN 3183 3184/* All flags used by eBPF helper functions, placed here. */ 3185 3186/* BPF_FUNC_skb_store_bytes flags. */ 3187enum { 3188 BPF_F_RECOMPUTE_CSUM = (1ULL << 0), 3189 BPF_F_INVALIDATE_HASH = (1ULL << 1), 3190}; 3191 3192/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 3193 * First 4 bits are for passing the header field size. 3194 */ 3195enum { 3196 BPF_F_HDR_FIELD_MASK = 0xfULL, 3197}; 3198 3199/* BPF_FUNC_l4_csum_replace flags. */ 3200enum { 3201 BPF_F_PSEUDO_HDR = (1ULL << 4), 3202 BPF_F_MARK_MANGLED_0 = (1ULL << 5), 3203 BPF_F_MARK_ENFORCE = (1ULL << 6), 3204}; 3205 3206/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 3207enum { 3208 BPF_F_INGRESS = (1ULL << 0), 3209}; 3210 3211/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 3212enum { 3213 BPF_F_TUNINFO_IPV6 = (1ULL << 0), 3214}; 3215 3216/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ 3217enum { 3218 BPF_F_SKIP_FIELD_MASK = 0xffULL, 3219 BPF_F_USER_STACK = (1ULL << 8), 3220/* flags used by BPF_FUNC_get_stackid only. */ 3221 BPF_F_FAST_STACK_CMP = (1ULL << 9), 3222 BPF_F_REUSE_STACKID = (1ULL << 10), 3223/* flags used by BPF_FUNC_get_stack only. */ 3224 BPF_F_USER_BUILD_ID = (1ULL << 11), 3225}; 3226 3227/* BPF_FUNC_skb_set_tunnel_key flags. */ 3228enum { 3229 BPF_F_ZERO_CSUM_TX = (1ULL << 1), 3230 BPF_F_DONT_FRAGMENT = (1ULL << 2), 3231 BPF_F_SEQ_NUMBER = (1ULL << 3), 3232}; 3233 3234/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 3235 * BPF_FUNC_perf_event_read_value flags. 3236 */ 3237enum { 3238 BPF_F_INDEX_MASK = 0xffffffffULL, 3239 BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, 3240/* BPF_FUNC_perf_event_output for sk_buff input context. */ 3241 BPF_F_CTXLEN_MASK = (0xfffffULL << 32), 3242}; 3243 3244/* Current network namespace */ 3245enum { 3246 BPF_F_CURRENT_NETNS = (-1L), 3247}; 3248 3249/* BPF_FUNC_skb_adjust_room flags. */ 3250enum { 3251 BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), 3252 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), 3253 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), 3254 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), 3255 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), 3256}; 3257 3258enum { 3259 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, 3260 BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, 3261}; 3262 3263#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ 3264 BPF_ADJ_ROOM_ENCAP_L2_MASK) \ 3265 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) 3266 3267/* BPF_FUNC_sysctl_get_name flags. */ 3268enum { 3269 BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), 3270}; 3271 3272/* BPF_FUNC_sk_storage_get flags */ 3273enum { 3274 BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0), 3275}; 3276 3277/* BPF_FUNC_read_branch_records flags. */ 3278enum { 3279 BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), 3280}; 3281 3282/* Mode for BPF_FUNC_skb_adjust_room helper. */ 3283enum bpf_adj_room_mode { 3284 BPF_ADJ_ROOM_NET, 3285 BPF_ADJ_ROOM_MAC, 3286}; 3287 3288/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ 3289enum bpf_hdr_start_off { 3290 BPF_HDR_START_MAC, 3291 BPF_HDR_START_NET, 3292}; 3293 3294/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ 3295enum bpf_lwt_encap_mode { 3296 BPF_LWT_ENCAP_SEG6, 3297 BPF_LWT_ENCAP_SEG6_INLINE, 3298 BPF_LWT_ENCAP_IP, 3299}; 3300 3301#define __bpf_md_ptr(type, name) \ 3302union { \ 3303 type name; \ 3304 __u64 :64; \ 3305} __attribute__((aligned(8))) 3306 3307/* user accessible mirror of in-kernel sk_buff. 3308 * new fields can only be added to the end of this structure 3309 */ 3310struct __sk_buff { 3311 __u32 len; 3312 __u32 pkt_type; 3313 __u32 mark; 3314 __u32 queue_mapping; 3315 __u32 protocol; 3316 __u32 vlan_present; 3317 __u32 vlan_tci; 3318 __u32 vlan_proto; 3319 __u32 priority; 3320 __u32 ingress_ifindex; 3321 __u32 ifindex; 3322 __u32 tc_index; 3323 __u32 cb[5]; 3324 __u32 hash; 3325 __u32 tc_classid; 3326 __u32 data; 3327 __u32 data_end; 3328 __u32 napi_id; 3329 3330 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ 3331 __u32 family; 3332 __u32 remote_ip4; /* Stored in network byte order */ 3333 __u32 local_ip4; /* Stored in network byte order */ 3334 __u32 remote_ip6[4]; /* Stored in network byte order */ 3335 __u32 local_ip6[4]; /* Stored in network byte order */ 3336 __u32 remote_port; /* Stored in network byte order */ 3337 __u32 local_port; /* stored in host byte order */ 3338 /* ... here. */ 3339 3340 __u32 data_meta; 3341 __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); 3342 __u64 tstamp; 3343 __u32 wire_len; 3344 __u32 gso_segs; 3345 __bpf_md_ptr(struct bpf_sock *, sk); 3346 __u32 gso_size; 3347}; 3348 3349struct bpf_tunnel_key { 3350 __u32 tunnel_id; 3351 union { 3352 __u32 remote_ipv4; 3353 __u32 remote_ipv6[4]; 3354 }; 3355 __u8 tunnel_tos; 3356 __u8 tunnel_ttl; 3357 __u16 tunnel_ext; /* Padding, future use. */ 3358 __u32 tunnel_label; 3359}; 3360 3361/* user accessible mirror of in-kernel xfrm_state. 3362 * new fields can only be added to the end of this structure 3363 */ 3364struct bpf_xfrm_state { 3365 __u32 reqid; 3366 __u32 spi; /* Stored in network byte order */ 3367 __u16 family; 3368 __u16 ext; /* Padding, future use. */ 3369 union { 3370 __u32 remote_ipv4; /* Stored in network byte order */ 3371 __u32 remote_ipv6[4]; /* Stored in network byte order */ 3372 }; 3373}; 3374 3375/* Generic BPF return codes which all BPF program types may support. 3376 * The values are binary compatible with their TC_ACT_* counter-part to 3377 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 3378 * programs. 3379 * 3380 * XDP is handled seprately, see XDP_*. 3381 */ 3382enum bpf_ret_code { 3383 BPF_OK = 0, 3384 /* 1 reserved */ 3385 BPF_DROP = 2, 3386 /* 3-6 reserved */ 3387 BPF_REDIRECT = 7, 3388 /* >127 are reserved for prog type specific return codes. 3389 * 3390 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and 3391 * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been 3392 * changed and should be routed based on its new L3 header. 3393 * (This is an L3 redirect, as opposed to L2 redirect 3394 * represented by BPF_REDIRECT above). 3395 */ 3396 BPF_LWT_REROUTE = 128, 3397}; 3398 3399struct bpf_sock { 3400 __u32 bound_dev_if; 3401 __u32 family; 3402 __u32 type; 3403 __u32 protocol; 3404 __u32 mark; 3405 __u32 priority; 3406 /* IP address also allows 1 and 2 bytes access */ 3407 __u32 src_ip4; 3408 __u32 src_ip6[4]; 3409 __u32 src_port; /* host byte order */ 3410 __u32 dst_port; /* network byte order */ 3411 __u32 dst_ip4; 3412 __u32 dst_ip6[4]; 3413 __u32 state; 3414}; 3415 3416struct bpf_tcp_sock { 3417 __u32 snd_cwnd; /* Sending congestion window */ 3418 __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 3419 __u32 rtt_min; 3420 __u32 snd_ssthresh; /* Slow start size threshold */ 3421 __u32 rcv_nxt; /* What we want to receive next */ 3422 __u32 snd_nxt; /* Next sequence we send */ 3423 __u32 snd_una; /* First byte we want an ack for */ 3424 __u32 mss_cache; /* Cached effective mss, not including SACKS */ 3425 __u32 ecn_flags; /* ECN status bits. */ 3426 __u32 rate_delivered; /* saved rate sample: packets delivered */ 3427 __u32 rate_interval_us; /* saved rate sample: time elapsed */ 3428 __u32 packets_out; /* Packets which are "in flight" */ 3429 __u32 retrans_out; /* Retransmitted packets out */ 3430 __u32 total_retrans; /* Total retransmits for entire connection */ 3431 __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn 3432 * total number of segments in. 3433 */ 3434 __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn 3435 * total number of data segments in. 3436 */ 3437 __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut 3438 * The total number of segments sent. 3439 */ 3440 __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut 3441 * total number of data segments sent. 3442 */ 3443 __u32 lost_out; /* Lost packets */ 3444 __u32 sacked_out; /* SACK'd packets */ 3445 __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived 3446 * sum(delta(rcv_nxt)), or how many bytes 3447 * were acked. 3448 */ 3449 __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 3450 * sum(delta(snd_una)), or how many bytes 3451 * were acked. 3452 */ 3453 __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups 3454 * total number of DSACK blocks received 3455 */ 3456 __u32 delivered; /* Total data packets delivered incl. rexmits */ 3457 __u32 delivered_ce; /* Like the above but only ECE marked packets */ 3458 __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ 3459}; 3460 3461struct bpf_sock_tuple { 3462 union { 3463 struct { 3464 __be32 saddr; 3465 __be32 daddr; 3466 __be16 sport; 3467 __be16 dport; 3468 } ipv4; 3469 struct { 3470 __be32 saddr[4]; 3471 __be32 daddr[4]; 3472 __be16 sport; 3473 __be16 dport; 3474 } ipv6; 3475 }; 3476}; 3477 3478struct bpf_xdp_sock { 3479 __u32 queue_id; 3480}; 3481 3482#define XDP_PACKET_HEADROOM 256 3483 3484/* User return codes for XDP prog type. 3485 * A valid XDP program must return one of these defined values. All other 3486 * return codes are reserved for future use. Unknown return codes will 3487 * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). 3488 */ 3489enum xdp_action { 3490 XDP_ABORTED = 0, 3491 XDP_DROP, 3492 XDP_PASS, 3493 XDP_TX, 3494 XDP_REDIRECT, 3495}; 3496 3497/* user accessible metadata for XDP packet hook 3498 * new fields must be added to the end of this structure 3499 */ 3500struct xdp_md { 3501 __u32 data; 3502 __u32 data_end; 3503 __u32 data_meta; 3504 /* Below access go through struct xdp_rxq_info */ 3505 __u32 ingress_ifindex; /* rxq->dev->ifindex */ 3506 __u32 rx_queue_index; /* rxq->queue_index */ 3507}; 3508 3509enum sk_action { 3510 SK_DROP = 0, 3511 SK_PASS, 3512}; 3513 3514/* user accessible metadata for SK_MSG packet hook, new fields must 3515 * be added to the end of this structure 3516 */ 3517struct sk_msg_md { 3518 __bpf_md_ptr(void *, data); 3519 __bpf_md_ptr(void *, data_end); 3520 3521 __u32 family; 3522 __u32 remote_ip4; /* Stored in network byte order */ 3523 __u32 local_ip4; /* Stored in network byte order */ 3524 __u32 remote_ip6[4]; /* Stored in network byte order */ 3525 __u32 local_ip6[4]; /* Stored in network byte order */ 3526 __u32 remote_port; /* Stored in network byte order */ 3527 __u32 local_port; /* stored in host byte order */ 3528 __u32 size; /* Total size of sk_msg */ 3529}; 3530 3531struct sk_reuseport_md { 3532 /* 3533 * Start of directly accessible data. It begins from 3534 * the tcp/udp header. 3535 */ 3536 __bpf_md_ptr(void *, data); 3537 /* End of directly accessible data */ 3538 __bpf_md_ptr(void *, data_end); 3539 /* 3540 * Total length of packet (starting from the tcp/udp header). 3541 * Note that the directly accessible bytes (data_end - data) 3542 * could be less than this "len". Those bytes could be 3543 * indirectly read by a helper "bpf_skb_load_bytes()". 3544 */ 3545 __u32 len; 3546 /* 3547 * Eth protocol in the mac header (network byte order). e.g. 3548 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) 3549 */ 3550 __u32 eth_protocol; 3551 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ 3552 __u32 bind_inany; /* Is sock bound to an INANY address? */ 3553 __u32 hash; /* A hash of the packet 4 tuples */ 3554}; 3555 3556#define BPF_TAG_SIZE 8 3557 3558struct bpf_prog_info { 3559 __u32 type; 3560 __u32 id; 3561 __u8 tag[BPF_TAG_SIZE]; 3562 __u32 jited_prog_len; 3563 __u32 xlated_prog_len; 3564 __aligned_u64 jited_prog_insns; 3565 __aligned_u64 xlated_prog_insns; 3566 __u64 load_time; /* ns since boottime */ 3567 __u32 created_by_uid; 3568 __u32 nr_map_ids; 3569 __aligned_u64 map_ids; 3570 char name[BPF_OBJ_NAME_LEN]; 3571 __u32 ifindex; 3572 __u32 gpl_compatible:1; 3573 __u32 :31; /* alignment pad */ 3574 __u64 netns_dev; 3575 __u64 netns_ino; 3576 __u32 nr_jited_ksyms; 3577 __u32 nr_jited_func_lens; 3578 __aligned_u64 jited_ksyms; 3579 __aligned_u64 jited_func_lens; 3580 __u32 btf_id; 3581 __u32 func_info_rec_size; 3582 __aligned_u64 func_info; 3583 __u32 nr_func_info; 3584 __u32 nr_line_info; 3585 __aligned_u64 line_info; 3586 __aligned_u64 jited_line_info; 3587 __u32 nr_jited_line_info; 3588 __u32 line_info_rec_size; 3589 __u32 jited_line_info_rec_size; 3590 __u32 nr_prog_tags; 3591 __aligned_u64 prog_tags; 3592 __u64 run_time_ns; 3593 __u64 run_cnt; 3594} __attribute__((aligned(8))); 3595 3596struct bpf_map_info { 3597 __u32 type; 3598 __u32 id; 3599 __u32 key_size; 3600 __u32 value_size; 3601 __u32 max_entries; 3602 __u32 map_flags; 3603 char name[BPF_OBJ_NAME_LEN]; 3604 __u32 ifindex; 3605 __u32 btf_vmlinux_value_type_id; 3606 __u64 netns_dev; 3607 __u64 netns_ino; 3608 __u32 btf_id; 3609 __u32 btf_key_type_id; 3610 __u32 btf_value_type_id; 3611} __attribute__((aligned(8))); 3612 3613struct bpf_btf_info { 3614 __aligned_u64 btf; 3615 __u32 btf_size; 3616 __u32 id; 3617} __attribute__((aligned(8))); 3618 3619/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed 3620 * by user and intended to be used by socket (e.g. to bind to, depends on 3621 * attach attach type). 3622 */ 3623struct bpf_sock_addr { 3624 __u32 user_family; /* Allows 4-byte read, but no write. */ 3625 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. 3626 * Stored in network byte order. 3627 */ 3628 __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 3629 * Stored in network byte order. 3630 */ 3631 __u32 user_port; /* Allows 4-byte read and write. 3632 * Stored in network byte order 3633 */ 3634 __u32 family; /* Allows 4-byte read, but no write */ 3635 __u32 type; /* Allows 4-byte read, but no write */ 3636 __u32 protocol; /* Allows 4-byte read, but no write */ 3637 __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. 3638 * Stored in network byte order. 3639 */ 3640 __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 3641 * Stored in network byte order. 3642 */ 3643 __bpf_md_ptr(struct bpf_sock *, sk); 3644}; 3645 3646/* User bpf_sock_ops struct to access socket values and specify request ops 3647 * and their replies. 3648 * Some of this fields are in network (bigendian) byte order and may need 3649 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). 3650 * New fields can only be added at the end of this structure 3651 */ 3652struct bpf_sock_ops { 3653 __u32 op; 3654 union { 3655 __u32 args[4]; /* Optionally passed to bpf program */ 3656 __u32 reply; /* Returned by bpf program */ 3657 __u32 replylong[4]; /* Optionally returned by bpf prog */ 3658 }; 3659 __u32 family; 3660 __u32 remote_ip4; /* Stored in network byte order */ 3661 __u32 local_ip4; /* Stored in network byte order */ 3662 __u32 remote_ip6[4]; /* Stored in network byte order */ 3663 __u32 local_ip6[4]; /* Stored in network byte order */ 3664 __u32 remote_port; /* Stored in network byte order */ 3665 __u32 local_port; /* stored in host byte order */ 3666 __u32 is_fullsock; /* Some TCP fields are only valid if 3667 * there is a full socket. If not, the 3668 * fields read as zero. 3669 */ 3670 __u32 snd_cwnd; 3671 __u32 srtt_us; /* Averaged RTT << 3 in usecs */ 3672 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ 3673 __u32 state; 3674 __u32 rtt_min; 3675 __u32 snd_ssthresh; 3676 __u32 rcv_nxt; 3677 __u32 snd_nxt; 3678 __u32 snd_una; 3679 __u32 mss_cache; 3680 __u32 ecn_flags; 3681 __u32 rate_delivered; 3682 __u32 rate_interval_us; 3683 __u32 packets_out; 3684 __u32 retrans_out; 3685 __u32 total_retrans; 3686 __u32 segs_in; 3687 __u32 data_segs_in; 3688 __u32 segs_out; 3689 __u32 data_segs_out; 3690 __u32 lost_out; 3691 __u32 sacked_out; 3692 __u32 sk_txhash; 3693 __u64 bytes_received; 3694 __u64 bytes_acked; 3695 __bpf_md_ptr(struct bpf_sock *, sk); 3696}; 3697 3698/* Definitions for bpf_sock_ops_cb_flags */ 3699enum { 3700 BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), 3701 BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), 3702 BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), 3703 BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), 3704/* Mask of all currently supported cb flags */ 3705 BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF, 3706}; 3707 3708/* List of known BPF sock_ops operators. 3709 * New entries can only be added at the end 3710 */ 3711enum { 3712 BPF_SOCK_OPS_VOID, 3713 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or 3714 * -1 if default value should be used 3715 */ 3716 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized 3717 * window (in packets) or -1 if default 3718 * value should be used 3719 */ 3720 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an 3721 * active connection is initialized 3722 */ 3723 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an 3724 * active connection is 3725 * established 3726 */ 3727 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a 3728 * passive connection is 3729 * established 3730 */ 3731 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 3732 * needs ECN 3733 */ 3734 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is 3735 * based on the path and may be 3736 * dependent on the congestion control 3737 * algorithm. In general it indicates 3738 * a congestion threshold. RTTs above 3739 * this indicate congestion 3740 */ 3741 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. 3742 * Arg1: value of icsk_retransmits 3743 * Arg2: value of icsk_rto 3744 * Arg3: whether RTO has expired 3745 */ 3746 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. 3747 * Arg1: sequence number of 1st byte 3748 * Arg2: # segments 3749 * Arg3: return value of 3750 * tcp_transmit_skb (0 => success) 3751 */ 3752 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. 3753 * Arg1: old_state 3754 * Arg2: new_state 3755 */ 3756 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after 3757 * socket transition to LISTEN state. 3758 */ 3759 BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. 3760 */ 3761}; 3762 3763/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect 3764 * changes between the TCP and BPF versions. Ideally this should never happen. 3765 * If it does, we need to add code to convert them before calling 3766 * the BPF sock_ops function. 3767 */ 3768enum { 3769 BPF_TCP_ESTABLISHED = 1, 3770 BPF_TCP_SYN_SENT, 3771 BPF_TCP_SYN_RECV, 3772 BPF_TCP_FIN_WAIT1, 3773 BPF_TCP_FIN_WAIT2, 3774 BPF_TCP_TIME_WAIT, 3775 BPF_TCP_CLOSE, 3776 BPF_TCP_CLOSE_WAIT, 3777 BPF_TCP_LAST_ACK, 3778 BPF_TCP_LISTEN, 3779 BPF_TCP_CLOSING, /* Now a valid state */ 3780 BPF_TCP_NEW_SYN_RECV, 3781 3782 BPF_TCP_MAX_STATES /* Leave at the end! */ 3783}; 3784 3785enum { 3786 TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ 3787 TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ 3788}; 3789 3790struct bpf_perf_event_value { 3791 __u64 counter; 3792 __u64 enabled; 3793 __u64 running; 3794}; 3795 3796enum { 3797 BPF_DEVCG_ACC_MKNOD = (1ULL << 0), 3798 BPF_DEVCG_ACC_READ = (1ULL << 1), 3799 BPF_DEVCG_ACC_WRITE = (1ULL << 2), 3800}; 3801 3802enum { 3803 BPF_DEVCG_DEV_BLOCK = (1ULL << 0), 3804 BPF_DEVCG_DEV_CHAR = (1ULL << 1), 3805}; 3806 3807struct bpf_cgroup_dev_ctx { 3808 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ 3809 __u32 access_type; 3810 __u32 major; 3811 __u32 minor; 3812}; 3813 3814struct bpf_raw_tracepoint_args { 3815 __u64 args[0]; 3816}; 3817 3818/* DIRECT: Skip the FIB rules and go to FIB table associated with device 3819 * OUTPUT: Do lookup from egress perspective; default is ingress 3820 */ 3821enum { 3822 BPF_FIB_LOOKUP_DIRECT = (1U << 0), 3823 BPF_FIB_LOOKUP_OUTPUT = (1U << 1), 3824}; 3825 3826enum { 3827 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ 3828 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ 3829 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ 3830 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ 3831 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ 3832 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ 3833 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ 3834 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ 3835 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ 3836}; 3837 3838struct bpf_fib_lookup { 3839 /* input: network family for lookup (AF_INET, AF_INET6) 3840 * output: network family of egress nexthop 3841 */ 3842 __u8 family; 3843 3844 /* set if lookup is to consider L4 data - e.g., FIB rules */ 3845 __u8 l4_protocol; 3846 __be16 sport; 3847 __be16 dport; 3848 3849 /* total length of packet from network header - used for MTU check */ 3850 __u16 tot_len; 3851 3852 /* input: L3 device index for lookup 3853 * output: device index from FIB lookup 3854 */ 3855 __u32 ifindex; 3856 3857 union { 3858 /* inputs to lookup */ 3859 __u8 tos; /* AF_INET */ 3860 __be32 flowinfo; /* AF_INET6, flow_label + priority */ 3861 3862 /* output: metric of fib result (IPv4/IPv6 only) */ 3863 __u32 rt_metric; 3864 }; 3865 3866 union { 3867 __be32 ipv4_src; 3868 __u32 ipv6_src[4]; /* in6_addr; network order */ 3869 }; 3870 3871 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in 3872 * network header. output: bpf_fib_lookup sets to gateway address 3873 * if FIB lookup returns gateway route 3874 */ 3875 union { 3876 __be32 ipv4_dst; 3877 __u32 ipv6_dst[4]; /* in6_addr; network order */ 3878 }; 3879 3880 /* output */ 3881 __be16 h_vlan_proto; 3882 __be16 h_vlan_TCI; 3883 __u8 smac[6]; /* ETH_ALEN */ 3884 __u8 dmac[6]; /* ETH_ALEN */ 3885}; 3886 3887enum bpf_task_fd_type { 3888 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ 3889 BPF_FD_TYPE_TRACEPOINT, /* tp name */ 3890 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ 3891 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ 3892 BPF_FD_TYPE_UPROBE, /* filename + offset */ 3893 BPF_FD_TYPE_URETPROBE, /* filename + offset */ 3894}; 3895 3896enum { 3897 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), 3898 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), 3899 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), 3900}; 3901 3902struct bpf_flow_keys { 3903 __u16 nhoff; 3904 __u16 thoff; 3905 __u16 addr_proto; /* ETH_P_* of valid addrs */ 3906 __u8 is_frag; 3907 __u8 is_first_frag; 3908 __u8 is_encap; 3909 __u8 ip_proto; 3910 __be16 n_proto; 3911 __be16 sport; 3912 __be16 dport; 3913 union { 3914 struct { 3915 __be32 ipv4_src; 3916 __be32 ipv4_dst; 3917 }; 3918 struct { 3919 __u32 ipv6_src[4]; /* in6_addr; network order */ 3920 __u32 ipv6_dst[4]; /* in6_addr; network order */ 3921 }; 3922 }; 3923 __u32 flags; 3924 __be32 flow_label; 3925}; 3926 3927struct bpf_func_info { 3928 __u32 insn_off; 3929 __u32 type_id; 3930}; 3931 3932#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) 3933#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) 3934 3935struct bpf_line_info { 3936 __u32 insn_off; 3937 __u32 file_name_off; 3938 __u32 line_off; 3939 __u32 line_col; 3940}; 3941 3942struct bpf_spin_lock { 3943 __u32 val; 3944}; 3945 3946struct bpf_sysctl { 3947 __u32 write; /* Sysctl is being read (= 0) or written (= 1). 3948 * Allows 1,2,4-byte read, but no write. 3949 */ 3950 __u32 file_pos; /* Sysctl file position to read from, write to. 3951 * Allows 1,2,4-byte read an 4-byte write. 3952 */ 3953}; 3954 3955struct bpf_sockopt { 3956 __bpf_md_ptr(struct bpf_sock *, sk); 3957 __bpf_md_ptr(void *, optval); 3958 __bpf_md_ptr(void *, optval_end); 3959 3960 __s32 level; 3961 __s32 optname; 3962 __s32 optlen; 3963 __s32 retval; 3964}; 3965 3966struct bpf_pidns_info { 3967 __u32 pid; 3968 __u32 tgid; 3969}; 3970#endif /* _UAPI__LINUX_BPF_H__ */ 3971