1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 */ 8#ifndef _UAPI__LINUX_BPF_H__ 9#define _UAPI__LINUX_BPF_H__ 10 11#include <linux/types.h> 12#include <linux/bpf_common.h> 13 14/* Extended instruction set based on top of classic BPF */ 15 16/* instruction classes */ 17#define BPF_JMP32 0x06 /* jmp mode in word width */ 18#define BPF_ALU64 0x07 /* alu mode in double word width */ 19 20/* ld/ldx fields */ 21#define BPF_DW 0x18 /* double word (64-bit) */ 22#define BPF_XADD 0xc0 /* exclusive add */ 23 24/* alu/jmp fields */ 25#define BPF_MOV 0xb0 /* mov reg to reg */ 26#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 27 28/* change endianness of a register */ 29#define BPF_END 0xd0 /* flags for endianness conversion: */ 30#define BPF_TO_LE 0x00 /* convert to little-endian */ 31#define BPF_TO_BE 0x08 /* convert to big-endian */ 32#define BPF_FROM_LE BPF_TO_LE 33#define BPF_FROM_BE BPF_TO_BE 34 35/* jmp encodings */ 36#define BPF_JNE 0x50 /* jump != */ 37#define BPF_JLT 0xa0 /* LT is unsigned, '<' */ 38#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ 39#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 40#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 41#define BPF_JSLT 0xc0 /* SLT is signed, '<' */ 42#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ 43#define BPF_CALL 0x80 /* function call */ 44#define BPF_EXIT 0x90 /* function return */ 45 46/* Register numbers */ 47enum { 48 BPF_REG_0 = 0, 49 BPF_REG_1, 50 BPF_REG_2, 51 BPF_REG_3, 52 BPF_REG_4, 53 BPF_REG_5, 54 BPF_REG_6, 55 BPF_REG_7, 56 BPF_REG_8, 57 BPF_REG_9, 58 BPF_REG_10, 59 __MAX_BPF_REG, 60}; 61 62/* BPF has 10 general purpose 64-bit registers and stack frame. */ 63#define MAX_BPF_REG __MAX_BPF_REG 64 65struct bpf_insn { 66 __u8 code; /* opcode */ 67 __u8 dst_reg:4; /* dest register */ 68 __u8 src_reg:4; /* source register */ 69 __s16 off; /* signed offset */ 70 __s32 imm; /* signed immediate constant */ 71}; 72 73/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 74struct bpf_lpm_trie_key { 75 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 76 __u8 data[0]; /* Arbitrary size */ 77}; 78 79struct bpf_cgroup_storage_key { 80 __u64 cgroup_inode_id; /* cgroup inode id */ 81 __u32 attach_type; /* program attach type */ 82}; 83 84union bpf_iter_link_info { 85 struct { 86 __u32 map_fd; 87 } map; 88}; 89 90/* BPF syscall commands, see bpf(2) man-page for details. */ 91enum bpf_cmd { 92 BPF_MAP_CREATE, 93 BPF_MAP_LOOKUP_ELEM, 94 BPF_MAP_UPDATE_ELEM, 95 BPF_MAP_DELETE_ELEM, 96 BPF_MAP_GET_NEXT_KEY, 97 BPF_PROG_LOAD, 98 BPF_OBJ_PIN, 99 BPF_OBJ_GET, 100 BPF_PROG_ATTACH, 101 BPF_PROG_DETACH, 102 BPF_PROG_TEST_RUN, 103 BPF_PROG_GET_NEXT_ID, 104 BPF_MAP_GET_NEXT_ID, 105 BPF_PROG_GET_FD_BY_ID, 106 BPF_MAP_GET_FD_BY_ID, 107 BPF_OBJ_GET_INFO_BY_FD, 108 BPF_PROG_QUERY, 109 BPF_RAW_TRACEPOINT_OPEN, 110 BPF_BTF_LOAD, 111 BPF_BTF_GET_FD_BY_ID, 112 BPF_TASK_FD_QUERY, 113 BPF_MAP_LOOKUP_AND_DELETE_ELEM, 114 BPF_MAP_FREEZE, 115 BPF_BTF_GET_NEXT_ID, 116 BPF_MAP_LOOKUP_BATCH, 117 BPF_MAP_LOOKUP_AND_DELETE_BATCH, 118 BPF_MAP_UPDATE_BATCH, 119 BPF_MAP_DELETE_BATCH, 120 BPF_LINK_CREATE, 121 BPF_LINK_UPDATE, 122 BPF_LINK_GET_FD_BY_ID, 123 BPF_LINK_GET_NEXT_ID, 124 BPF_ENABLE_STATS, 125 BPF_ITER_CREATE, 126 BPF_LINK_DETACH, 127}; 128 129enum bpf_map_type { 130 BPF_MAP_TYPE_UNSPEC, 131 BPF_MAP_TYPE_HASH, 132 BPF_MAP_TYPE_ARRAY, 133 BPF_MAP_TYPE_PROG_ARRAY, 134 BPF_MAP_TYPE_PERF_EVENT_ARRAY, 135 BPF_MAP_TYPE_PERCPU_HASH, 136 BPF_MAP_TYPE_PERCPU_ARRAY, 137 BPF_MAP_TYPE_STACK_TRACE, 138 BPF_MAP_TYPE_CGROUP_ARRAY, 139 BPF_MAP_TYPE_LRU_HASH, 140 BPF_MAP_TYPE_LRU_PERCPU_HASH, 141 BPF_MAP_TYPE_LPM_TRIE, 142 BPF_MAP_TYPE_ARRAY_OF_MAPS, 143 BPF_MAP_TYPE_HASH_OF_MAPS, 144 BPF_MAP_TYPE_DEVMAP, 145 BPF_MAP_TYPE_SOCKMAP, 146 BPF_MAP_TYPE_CPUMAP, 147 BPF_MAP_TYPE_XSKMAP, 148 BPF_MAP_TYPE_SOCKHASH, 149 BPF_MAP_TYPE_CGROUP_STORAGE, 150 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, 151 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, 152 BPF_MAP_TYPE_QUEUE, 153 BPF_MAP_TYPE_STACK, 154 BPF_MAP_TYPE_SK_STORAGE, 155 BPF_MAP_TYPE_DEVMAP_HASH, 156 BPF_MAP_TYPE_STRUCT_OPS, 157 BPF_MAP_TYPE_RINGBUF, 158}; 159 160/* Note that tracing related programs such as 161 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} 162 * are not subject to a stable API since kernel internal data 163 * structures can change from release to release and may 164 * therefore break existing tracing BPF programs. Tracing BPF 165 * programs correspond to /a/ specific kernel which is to be 166 * analyzed, and not /a/ specific kernel /and/ all future ones. 167 */ 168enum bpf_prog_type { 169 BPF_PROG_TYPE_UNSPEC, 170 BPF_PROG_TYPE_SOCKET_FILTER, 171 BPF_PROG_TYPE_KPROBE, 172 BPF_PROG_TYPE_SCHED_CLS, 173 BPF_PROG_TYPE_SCHED_ACT, 174 BPF_PROG_TYPE_TRACEPOINT, 175 BPF_PROG_TYPE_XDP, 176 BPF_PROG_TYPE_PERF_EVENT, 177 BPF_PROG_TYPE_CGROUP_SKB, 178 BPF_PROG_TYPE_CGROUP_SOCK, 179 BPF_PROG_TYPE_LWT_IN, 180 BPF_PROG_TYPE_LWT_OUT, 181 BPF_PROG_TYPE_LWT_XMIT, 182 BPF_PROG_TYPE_SOCK_OPS, 183 BPF_PROG_TYPE_SK_SKB, 184 BPF_PROG_TYPE_CGROUP_DEVICE, 185 BPF_PROG_TYPE_SK_MSG, 186 BPF_PROG_TYPE_RAW_TRACEPOINT, 187 BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 188 BPF_PROG_TYPE_LWT_SEG6LOCAL, 189 BPF_PROG_TYPE_LIRC_MODE2, 190 BPF_PROG_TYPE_SK_REUSEPORT, 191 BPF_PROG_TYPE_FLOW_DISSECTOR, 192 BPF_PROG_TYPE_CGROUP_SYSCTL, 193 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 194 BPF_PROG_TYPE_CGROUP_SOCKOPT, 195 BPF_PROG_TYPE_TRACING, 196 BPF_PROG_TYPE_STRUCT_OPS, 197 BPF_PROG_TYPE_EXT, 198 BPF_PROG_TYPE_LSM, 199 BPF_PROG_TYPE_SK_LOOKUP, 200}; 201 202enum bpf_attach_type { 203 BPF_CGROUP_INET_INGRESS, 204 BPF_CGROUP_INET_EGRESS, 205 BPF_CGROUP_INET_SOCK_CREATE, 206 BPF_CGROUP_SOCK_OPS, 207 BPF_SK_SKB_STREAM_PARSER, 208 BPF_SK_SKB_STREAM_VERDICT, 209 BPF_CGROUP_DEVICE, 210 BPF_SK_MSG_VERDICT, 211 BPF_CGROUP_INET4_BIND, 212 BPF_CGROUP_INET6_BIND, 213 BPF_CGROUP_INET4_CONNECT, 214 BPF_CGROUP_INET6_CONNECT, 215 BPF_CGROUP_INET4_POST_BIND, 216 BPF_CGROUP_INET6_POST_BIND, 217 BPF_CGROUP_UDP4_SENDMSG, 218 BPF_CGROUP_UDP6_SENDMSG, 219 BPF_LIRC_MODE2, 220 BPF_FLOW_DISSECTOR, 221 BPF_CGROUP_SYSCTL, 222 BPF_CGROUP_UDP4_RECVMSG, 223 BPF_CGROUP_UDP6_RECVMSG, 224 BPF_CGROUP_GETSOCKOPT, 225 BPF_CGROUP_SETSOCKOPT, 226 BPF_TRACE_RAW_TP, 227 BPF_TRACE_FENTRY, 228 BPF_TRACE_FEXIT, 229 BPF_MODIFY_RETURN, 230 BPF_LSM_MAC, 231 BPF_TRACE_ITER, 232 BPF_CGROUP_INET4_GETPEERNAME, 233 BPF_CGROUP_INET6_GETPEERNAME, 234 BPF_CGROUP_INET4_GETSOCKNAME, 235 BPF_CGROUP_INET6_GETSOCKNAME, 236 BPF_XDP_DEVMAP, 237 BPF_CGROUP_INET_SOCK_RELEASE, 238 BPF_XDP_CPUMAP, 239 BPF_SK_LOOKUP, 240 BPF_XDP, 241 __MAX_BPF_ATTACH_TYPE 242}; 243 244#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 245 246enum bpf_link_type { 247 BPF_LINK_TYPE_UNSPEC = 0, 248 BPF_LINK_TYPE_RAW_TRACEPOINT = 1, 249 BPF_LINK_TYPE_TRACING = 2, 250 BPF_LINK_TYPE_CGROUP = 3, 251 BPF_LINK_TYPE_ITER = 4, 252 BPF_LINK_TYPE_NETNS = 5, 253 BPF_LINK_TYPE_XDP = 6, 254 255 MAX_BPF_LINK_TYPE, 256}; 257 258/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command 259 * 260 * NONE(default): No further bpf programs allowed in the subtree. 261 * 262 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, 263 * the program in this cgroup yields to sub-cgroup program. 264 * 265 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, 266 * that cgroup program gets run in addition to the program in this cgroup. 267 * 268 * Only one program is allowed to be attached to a cgroup with 269 * NONE or BPF_F_ALLOW_OVERRIDE flag. 270 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will 271 * release old program and attach the new one. Attach flags has to match. 272 * 273 * Multiple programs are allowed to be attached to a cgroup with 274 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order 275 * (those that were attached first, run first) 276 * The programs of sub-cgroup are executed first, then programs of 277 * this cgroup and then programs of parent cgroup. 278 * When children program makes decision (like picking TCP CA or sock bind) 279 * parent program has a chance to override it. 280 * 281 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of 282 * programs for a cgroup. Though it's possible to replace an old program at 283 * any position by also specifying BPF_F_REPLACE flag and position itself in 284 * replace_bpf_fd attribute. Old program at this position will be released. 285 * 286 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. 287 * A cgroup with NONE doesn't allow any programs in sub-cgroups. 288 * Ex1: 289 * cgrp1 (MULTI progs A, B) -> 290 * cgrp2 (OVERRIDE prog C) -> 291 * cgrp3 (MULTI prog D) -> 292 * cgrp4 (OVERRIDE prog E) -> 293 * cgrp5 (NONE prog F) 294 * the event in cgrp5 triggers execution of F,D,A,B in that order. 295 * if prog F is detached, the execution is E,D,A,B 296 * if prog F and D are detached, the execution is E,A,B 297 * if prog F, E and D are detached, the execution is C,A,B 298 * 299 * All eligible programs are executed regardless of return code from 300 * earlier programs. 301 */ 302#define BPF_F_ALLOW_OVERRIDE (1U << 0) 303#define BPF_F_ALLOW_MULTI (1U << 1) 304#define BPF_F_REPLACE (1U << 2) 305 306/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 307 * verifier will perform strict alignment checking as if the kernel 308 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 309 * and NET_IP_ALIGN defined to 2. 310 */ 311#define BPF_F_STRICT_ALIGNMENT (1U << 0) 312 313/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the 314 * verifier will allow any alignment whatsoever. On platforms 315 * with strict alignment requirements for loads ands stores (such 316 * as sparc and mips) the verifier validates that all loads and 317 * stores provably follow this requirement. This flag turns that 318 * checking and enforcement off. 319 * 320 * It is mostly used for testing when we want to validate the 321 * context and memory access aspects of the verifier, but because 322 * of an unaligned access the alignment check would trigger before 323 * the one we are interested in. 324 */ 325#define BPF_F_ANY_ALIGNMENT (1U << 1) 326 327/* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. 328 * Verifier does sub-register def/use analysis and identifies instructions whose 329 * def only matters for low 32-bit, high 32-bit is never referenced later 330 * through implicit zero extension. Therefore verifier notifies JIT back-ends 331 * that it is safe to ignore clearing high 32-bit for these instructions. This 332 * saves some back-ends a lot of code-gen. However such optimization is not 333 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends 334 * hence hasn't used verifier's analysis result. But, we really want to have a 335 * way to be able to verify the correctness of the described optimization on 336 * x86_64 on which testsuites are frequently exercised. 337 * 338 * So, this flag is introduced. Once it is set, verifier will randomize high 339 * 32-bit for those instructions who has been identified as safe to ignore them. 340 * Then, if verifier is not doing correct analysis, such randomization will 341 * regress tests to expose bugs. 342 */ 343#define BPF_F_TEST_RND_HI32 (1U << 2) 344 345/* The verifier internal test flag. Behavior is undefined */ 346#define BPF_F_TEST_STATE_FREQ (1U << 3) 347 348/* When BPF ldimm64's insn[0].src_reg != 0 then this can have 349 * two extensions: 350 * 351 * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE 352 * insn[0].imm: map fd map fd 353 * insn[1].imm: 0 offset into value 354 * insn[0].off: 0 0 355 * insn[1].off: 0 0 356 * ldimm64 rewrite: address of map address of map[0]+offset 357 * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE 358 */ 359#define BPF_PSEUDO_MAP_FD 1 360#define BPF_PSEUDO_MAP_VALUE 2 361 362/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative 363 * offset to another bpf function 364 */ 365#define BPF_PSEUDO_CALL 1 366 367/* flags for BPF_MAP_UPDATE_ELEM command */ 368enum { 369 BPF_ANY = 0, /* create new element or update existing */ 370 BPF_NOEXIST = 1, /* create new element if it didn't exist */ 371 BPF_EXIST = 2, /* update existing element */ 372 BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ 373}; 374 375/* flags for BPF_MAP_CREATE command */ 376enum { 377 BPF_F_NO_PREALLOC = (1U << 0), 378/* Instead of having one common LRU list in the 379 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 380 * which can scale and perform better. 381 * Note, the LRU nodes (including free nodes) cannot be moved 382 * across different LRU lists. 383 */ 384 BPF_F_NO_COMMON_LRU = (1U << 1), 385/* Specify numa node during map creation */ 386 BPF_F_NUMA_NODE = (1U << 2), 387 388/* Flags for accessing BPF object from syscall side. */ 389 BPF_F_RDONLY = (1U << 3), 390 BPF_F_WRONLY = (1U << 4), 391 392/* Flag for stack_map, store build_id+offset instead of pointer */ 393 BPF_F_STACK_BUILD_ID = (1U << 5), 394 395/* Zero-initialize hash function seed. This should only be used for testing. */ 396 BPF_F_ZERO_SEED = (1U << 6), 397 398/* Flags for accessing BPF object from program side. */ 399 BPF_F_RDONLY_PROG = (1U << 7), 400 BPF_F_WRONLY_PROG = (1U << 8), 401 402/* Clone map from listener for newly accepted socket */ 403 BPF_F_CLONE = (1U << 9), 404 405/* Enable memory-mapping BPF map */ 406 BPF_F_MMAPABLE = (1U << 10), 407}; 408 409/* Flags for BPF_PROG_QUERY. */ 410 411/* Query effective (directly attached + inherited from ancestor cgroups) 412 * programs that will be executed for events within a cgroup. 413 * attach_flags with this flag are returned only for directly attached programs. 414 */ 415#define BPF_F_QUERY_EFFECTIVE (1U << 0) 416 417/* type for BPF_ENABLE_STATS */ 418enum bpf_stats_type { 419 /* enabled run_time_ns and run_cnt */ 420 BPF_STATS_RUN_TIME = 0, 421}; 422 423enum bpf_stack_build_id_status { 424 /* user space need an empty entry to identify end of a trace */ 425 BPF_STACK_BUILD_ID_EMPTY = 0, 426 /* with valid build_id and offset */ 427 BPF_STACK_BUILD_ID_VALID = 1, 428 /* couldn't get build_id, fallback to ip */ 429 BPF_STACK_BUILD_ID_IP = 2, 430}; 431 432#define BPF_BUILD_ID_SIZE 20 433struct bpf_stack_build_id { 434 __s32 status; 435 unsigned char build_id[BPF_BUILD_ID_SIZE]; 436 union { 437 __u64 offset; 438 __u64 ip; 439 }; 440}; 441 442#define BPF_OBJ_NAME_LEN 16U 443 444union bpf_attr { 445 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 446 __u32 map_type; /* one of enum bpf_map_type */ 447 __u32 key_size; /* size of key in bytes */ 448 __u32 value_size; /* size of value in bytes */ 449 __u32 max_entries; /* max number of entries in a map */ 450 __u32 map_flags; /* BPF_MAP_CREATE related 451 * flags defined above. 452 */ 453 __u32 inner_map_fd; /* fd pointing to the inner map */ 454 __u32 numa_node; /* numa node (effective only if 455 * BPF_F_NUMA_NODE is set). 456 */ 457 char map_name[BPF_OBJ_NAME_LEN]; 458 __u32 map_ifindex; /* ifindex of netdev to create on */ 459 __u32 btf_fd; /* fd pointing to a BTF type data */ 460 __u32 btf_key_type_id; /* BTF type_id of the key */ 461 __u32 btf_value_type_id; /* BTF type_id of the value */ 462 __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- 463 * struct stored as the 464 * map value 465 */ 466 }; 467 468 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 469 __u32 map_fd; 470 __aligned_u64 key; 471 union { 472 __aligned_u64 value; 473 __aligned_u64 next_key; 474 }; 475 __u64 flags; 476 }; 477 478 struct { /* struct used by BPF_MAP_*_BATCH commands */ 479 __aligned_u64 in_batch; /* start batch, 480 * NULL to start from beginning 481 */ 482 __aligned_u64 out_batch; /* output: next start batch */ 483 __aligned_u64 keys; 484 __aligned_u64 values; 485 __u32 count; /* input/output: 486 * input: # of key/value 487 * elements 488 * output: # of filled elements 489 */ 490 __u32 map_fd; 491 __u64 elem_flags; 492 __u64 flags; 493 } batch; 494 495 struct { /* anonymous struct used by BPF_PROG_LOAD command */ 496 __u32 prog_type; /* one of enum bpf_prog_type */ 497 __u32 insn_cnt; 498 __aligned_u64 insns; 499 __aligned_u64 license; 500 __u32 log_level; /* verbosity level of verifier */ 501 __u32 log_size; /* size of user buffer */ 502 __aligned_u64 log_buf; /* user supplied buffer */ 503 __u32 kern_version; /* not used */ 504 __u32 prog_flags; 505 char prog_name[BPF_OBJ_NAME_LEN]; 506 __u32 prog_ifindex; /* ifindex of netdev to prep for */ 507 /* For some prog types expected attach type must be known at 508 * load time to verify attach type specific parts of prog 509 * (context accesses, allowed helpers, etc). 510 */ 511 __u32 expected_attach_type; 512 __u32 prog_btf_fd; /* fd pointing to BTF type data */ 513 __u32 func_info_rec_size; /* userspace bpf_func_info size */ 514 __aligned_u64 func_info; /* func info */ 515 __u32 func_info_cnt; /* number of bpf_func_info records */ 516 __u32 line_info_rec_size; /* userspace bpf_line_info size */ 517 __aligned_u64 line_info; /* line info */ 518 __u32 line_info_cnt; /* number of bpf_line_info records */ 519 __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 520 __u32 attach_prog_fd; /* 0 to attach to vmlinux */ 521 }; 522 523 struct { /* anonymous struct used by BPF_OBJ_* commands */ 524 __aligned_u64 pathname; 525 __u32 bpf_fd; 526 __u32 file_flags; 527 }; 528 529 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 530 __u32 target_fd; /* container object to attach to */ 531 __u32 attach_bpf_fd; /* eBPF program to attach */ 532 __u32 attach_type; 533 __u32 attach_flags; 534 __u32 replace_bpf_fd; /* previously attached eBPF 535 * program to replace if 536 * BPF_F_REPLACE is used 537 */ 538 }; 539 540 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 541 __u32 prog_fd; 542 __u32 retval; 543 __u32 data_size_in; /* input: len of data_in */ 544 __u32 data_size_out; /* input/output: len of data_out 545 * returns ENOSPC if data_out 546 * is too small. 547 */ 548 __aligned_u64 data_in; 549 __aligned_u64 data_out; 550 __u32 repeat; 551 __u32 duration; 552 __u32 ctx_size_in; /* input: len of ctx_in */ 553 __u32 ctx_size_out; /* input/output: len of ctx_out 554 * returns ENOSPC if ctx_out 555 * is too small. 556 */ 557 __aligned_u64 ctx_in; 558 __aligned_u64 ctx_out; 559 } test; 560 561 struct { /* anonymous struct used by BPF_*_GET_*_ID */ 562 union { 563 __u32 start_id; 564 __u32 prog_id; 565 __u32 map_id; 566 __u32 btf_id; 567 __u32 link_id; 568 }; 569 __u32 next_id; 570 __u32 open_flags; 571 }; 572 573 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 574 __u32 bpf_fd; 575 __u32 info_len; 576 __aligned_u64 info; 577 } info; 578 579 struct { /* anonymous struct used by BPF_PROG_QUERY command */ 580 __u32 target_fd; /* container object to query */ 581 __u32 attach_type; 582 __u32 query_flags; 583 __u32 attach_flags; 584 __aligned_u64 prog_ids; 585 __u32 prog_cnt; 586 } query; 587 588 struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ 589 __u64 name; 590 __u32 prog_fd; 591 } raw_tracepoint; 592 593 struct { /* anonymous struct for BPF_BTF_LOAD */ 594 __aligned_u64 btf; 595 __aligned_u64 btf_log_buf; 596 __u32 btf_size; 597 __u32 btf_log_size; 598 __u32 btf_log_level; 599 }; 600 601 struct { 602 __u32 pid; /* input: pid */ 603 __u32 fd; /* input: fd */ 604 __u32 flags; /* input: flags */ 605 __u32 buf_len; /* input/output: buf len */ 606 __aligned_u64 buf; /* input/output: 607 * tp_name for tracepoint 608 * symbol for kprobe 609 * filename for uprobe 610 */ 611 __u32 prog_id; /* output: prod_id */ 612 __u32 fd_type; /* output: BPF_FD_TYPE_* */ 613 __u64 probe_offset; /* output: probe_offset */ 614 __u64 probe_addr; /* output: probe_addr */ 615 } task_fd_query; 616 617 struct { /* struct used by BPF_LINK_CREATE command */ 618 __u32 prog_fd; /* eBPF program to attach */ 619 union { 620 __u32 target_fd; /* object to attach to */ 621 __u32 target_ifindex; /* target ifindex */ 622 }; 623 __u32 attach_type; /* attach type */ 624 __u32 flags; /* extra flags */ 625 __aligned_u64 iter_info; /* extra bpf_iter_link_info */ 626 __u32 iter_info_len; /* iter_info length */ 627 } link_create; 628 629 struct { /* struct used by BPF_LINK_UPDATE command */ 630 __u32 link_fd; /* link fd */ 631 /* new program fd to update link with */ 632 __u32 new_prog_fd; 633 __u32 flags; /* extra flags */ 634 /* expected link's program fd; is specified only if 635 * BPF_F_REPLACE flag is set in flags */ 636 __u32 old_prog_fd; 637 } link_update; 638 639 struct { 640 __u32 link_fd; 641 } link_detach; 642 643 struct { /* struct used by BPF_ENABLE_STATS command */ 644 __u32 type; 645 } enable_stats; 646 647 struct { /* struct used by BPF_ITER_CREATE command */ 648 __u32 link_fd; 649 __u32 flags; 650 } iter_create; 651 652} __attribute__((aligned(8))); 653 654/* The description below is an attempt at providing documentation to eBPF 655 * developers about the multiple available eBPF helper functions. It can be 656 * parsed and used to produce a manual page. The workflow is the following, 657 * and requires the rst2man utility: 658 * 659 * $ ./scripts/bpf_helpers_doc.py \ 660 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst 661 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 662 * $ man /tmp/bpf-helpers.7 663 * 664 * Note that in order to produce this external documentation, some RST 665 * formatting is used in the descriptions to get "bold" and "italics" in 666 * manual pages. Also note that the few trailing white spaces are 667 * intentional, removing them would break paragraphs for rst2man. 668 * 669 * Start of BPF helper function descriptions: 670 * 671 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) 672 * Description 673 * Perform a lookup in *map* for an entry associated to *key*. 674 * Return 675 * Map value associated to *key*, or **NULL** if no entry was 676 * found. 677 * 678 * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) 679 * Description 680 * Add or update the value of the entry associated to *key* in 681 * *map* with *value*. *flags* is one of: 682 * 683 * **BPF_NOEXIST** 684 * The entry for *key* must not exist in the map. 685 * **BPF_EXIST** 686 * The entry for *key* must already exist in the map. 687 * **BPF_ANY** 688 * No condition on the existence of the entry for *key*. 689 * 690 * Flag value **BPF_NOEXIST** cannot be used for maps of types 691 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all 692 * elements always exist), the helper would return an error. 693 * Return 694 * 0 on success, or a negative error in case of failure. 695 * 696 * long bpf_map_delete_elem(struct bpf_map *map, const void *key) 697 * Description 698 * Delete entry with *key* from *map*. 699 * Return 700 * 0 on success, or a negative error in case of failure. 701 * 702 * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) 703 * Description 704 * For tracing programs, safely attempt to read *size* bytes from 705 * kernel space address *unsafe_ptr* and store the data in *dst*. 706 * 707 * Generally, use **bpf_probe_read_user**\ () or 708 * **bpf_probe_read_kernel**\ () instead. 709 * Return 710 * 0 on success, or a negative error in case of failure. 711 * 712 * u64 bpf_ktime_get_ns(void) 713 * Description 714 * Return the time elapsed since system boot, in nanoseconds. 715 * Does not include time the system was suspended. 716 * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) 717 * Return 718 * Current *ktime*. 719 * 720 * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) 721 * Description 722 * This helper is a "printk()-like" facility for debugging. It 723 * prints a message defined by format *fmt* (of size *fmt_size*) 724 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if 725 * available. It can take up to three additional **u64** 726 * arguments (as an eBPF helpers, the total number of arguments is 727 * limited to five). 728 * 729 * Each time the helper is called, it appends a line to the trace. 730 * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is 731 * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. 732 * The format of the trace is customizable, and the exact output 733 * one will get depends on the options set in 734 * *\/sys/kernel/debug/tracing/trace_options* (see also the 735 * *README* file under the same directory). However, it usually 736 * defaults to something like: 737 * 738 * :: 739 * 740 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> 741 * 742 * In the above: 743 * 744 * * ``telnet`` is the name of the current task. 745 * * ``470`` is the PID of the current task. 746 * * ``001`` is the CPU number on which the task is 747 * running. 748 * * In ``.N..``, each character refers to a set of 749 * options (whether irqs are enabled, scheduling 750 * options, whether hard/softirqs are running, level of 751 * preempt_disabled respectively). **N** means that 752 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** 753 * are set. 754 * * ``419421.045894`` is a timestamp. 755 * * ``0x00000001`` is a fake value used by BPF for the 756 * instruction pointer register. 757 * * ``<formatted msg>`` is the message formatted with 758 * *fmt*. 759 * 760 * The conversion specifiers supported by *fmt* are similar, but 761 * more limited than for printk(). They are **%d**, **%i**, 762 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, 763 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size 764 * of field, padding with zeroes, etc.) is available, and the 765 * helper will return **-EINVAL** (but print nothing) if it 766 * encounters an unknown specifier. 767 * 768 * Also, note that **bpf_trace_printk**\ () is slow, and should 769 * only be used for debugging purposes. For this reason, a notice 770 * block (spanning several lines) is printed to kernel logs and 771 * states that the helper should not be used "for production use" 772 * the first time this helper is used (or more precisely, when 773 * **trace_printk**\ () buffers are allocated). For passing values 774 * to user space, perf events should be preferred. 775 * Return 776 * The number of bytes written to the buffer, or a negative error 777 * in case of failure. 778 * 779 * u32 bpf_get_prandom_u32(void) 780 * Description 781 * Get a pseudo-random number. 782 * 783 * From a security point of view, this helper uses its own 784 * pseudo-random internal state, and cannot be used to infer the 785 * seed of other random functions in the kernel. However, it is 786 * essential to note that the generator used by the helper is not 787 * cryptographically secure. 788 * Return 789 * A random 32-bit unsigned value. 790 * 791 * u32 bpf_get_smp_processor_id(void) 792 * Description 793 * Get the SMP (symmetric multiprocessing) processor id. Note that 794 * all programs run with preemption disabled, which means that the 795 * SMP processor id is stable during all the execution of the 796 * program. 797 * Return 798 * The SMP id of the processor running the program. 799 * 800 * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) 801 * Description 802 * Store *len* bytes from address *from* into the packet 803 * associated to *skb*, at *offset*. *flags* are a combination of 804 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the 805 * checksum for the packet after storing the bytes) and 806 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ 807 * **->swhash** and *skb*\ **->l4hash** to 0). 808 * 809 * A call to this helper is susceptible to change the underlying 810 * packet buffer. Therefore, at load time, all checks on pointers 811 * previously done by the verifier are invalidated and must be 812 * performed again, if the helper is used in combination with 813 * direct packet access. 814 * Return 815 * 0 on success, or a negative error in case of failure. 816 * 817 * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) 818 * Description 819 * Recompute the layer 3 (e.g. IP) checksum for the packet 820 * associated to *skb*. Computation is incremental, so the helper 821 * must know the former value of the header field that was 822 * modified (*from*), the new value of this field (*to*), and the 823 * number of bytes (2 or 4) for this field, stored in *size*. 824 * Alternatively, it is possible to store the difference between 825 * the previous and the new values of the header field in *to*, by 826 * setting *from* and *size* to 0. For both methods, *offset* 827 * indicates the location of the IP checksum within the packet. 828 * 829 * This helper works in combination with **bpf_csum_diff**\ (), 830 * which does not update the checksum in-place, but offers more 831 * flexibility and can handle sizes larger than 2 or 4 for the 832 * checksum to update. 833 * 834 * A call to this helper is susceptible to change the underlying 835 * packet buffer. Therefore, at load time, all checks on pointers 836 * previously done by the verifier are invalidated and must be 837 * performed again, if the helper is used in combination with 838 * direct packet access. 839 * Return 840 * 0 on success, or a negative error in case of failure. 841 * 842 * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) 843 * Description 844 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the 845 * packet associated to *skb*. Computation is incremental, so the 846 * helper must know the former value of the header field that was 847 * modified (*from*), the new value of this field (*to*), and the 848 * number of bytes (2 or 4) for this field, stored on the lowest 849 * four bits of *flags*. Alternatively, it is possible to store 850 * the difference between the previous and the new values of the 851 * header field in *to*, by setting *from* and the four lowest 852 * bits of *flags* to 0. For both methods, *offset* indicates the 853 * location of the IP checksum within the packet. In addition to 854 * the size of the field, *flags* can be added (bitwise OR) actual 855 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left 856 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and 857 * for updates resulting in a null checksum the value is set to 858 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates 859 * the checksum is to be computed against a pseudo-header. 860 * 861 * This helper works in combination with **bpf_csum_diff**\ (), 862 * which does not update the checksum in-place, but offers more 863 * flexibility and can handle sizes larger than 2 or 4 for the 864 * checksum to update. 865 * 866 * A call to this helper is susceptible to change the underlying 867 * packet buffer. Therefore, at load time, all checks on pointers 868 * previously done by the verifier are invalidated and must be 869 * performed again, if the helper is used in combination with 870 * direct packet access. 871 * Return 872 * 0 on success, or a negative error in case of failure. 873 * 874 * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) 875 * Description 876 * This special helper is used to trigger a "tail call", or in 877 * other words, to jump into another eBPF program. The same stack 878 * frame is used (but values on stack and in registers for the 879 * caller are not accessible to the callee). This mechanism allows 880 * for program chaining, either for raising the maximum number of 881 * available eBPF instructions, or to execute given programs in 882 * conditional blocks. For security reasons, there is an upper 883 * limit to the number of successive tail calls that can be 884 * performed. 885 * 886 * Upon call of this helper, the program attempts to jump into a 887 * program referenced at index *index* in *prog_array_map*, a 888 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes 889 * *ctx*, a pointer to the context. 890 * 891 * If the call succeeds, the kernel immediately runs the first 892 * instruction of the new program. This is not a function call, 893 * and it never returns to the previous program. If the call 894 * fails, then the helper has no effect, and the caller continues 895 * to run its subsequent instructions. A call can fail if the 896 * destination program for the jump does not exist (i.e. *index* 897 * is superior to the number of entries in *prog_array_map*), or 898 * if the maximum number of tail calls has been reached for this 899 * chain of programs. This limit is defined in the kernel by the 900 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), 901 * which is currently set to 32. 902 * Return 903 * 0 on success, or a negative error in case of failure. 904 * 905 * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) 906 * Description 907 * Clone and redirect the packet associated to *skb* to another 908 * net device of index *ifindex*. Both ingress and egress 909 * interfaces can be used for redirection. The **BPF_F_INGRESS** 910 * value in *flags* is used to make the distinction (ingress path 911 * is selected if the flag is present, egress path otherwise). 912 * This is the only flag supported for now. 913 * 914 * In comparison with **bpf_redirect**\ () helper, 915 * **bpf_clone_redirect**\ () has the associated cost of 916 * duplicating the packet buffer, but this can be executed out of 917 * the eBPF program. Conversely, **bpf_redirect**\ () is more 918 * efficient, but it is handled through an action code where the 919 * redirection happens only after the eBPF program has returned. 920 * 921 * A call to this helper is susceptible to change the underlying 922 * packet buffer. Therefore, at load time, all checks on pointers 923 * previously done by the verifier are invalidated and must be 924 * performed again, if the helper is used in combination with 925 * direct packet access. 926 * Return 927 * 0 on success, or a negative error in case of failure. 928 * 929 * u64 bpf_get_current_pid_tgid(void) 930 * Return 931 * A 64-bit integer containing the current tgid and pid, and 932 * created as such: 933 * *current_task*\ **->tgid << 32 \|** 934 * *current_task*\ **->pid**. 935 * 936 * u64 bpf_get_current_uid_gid(void) 937 * Return 938 * A 64-bit integer containing the current GID and UID, and 939 * created as such: *current_gid* **<< 32 \|** *current_uid*. 940 * 941 * long bpf_get_current_comm(void *buf, u32 size_of_buf) 942 * Description 943 * Copy the **comm** attribute of the current task into *buf* of 944 * *size_of_buf*. The **comm** attribute contains the name of 945 * the executable (excluding the path) for the current task. The 946 * *size_of_buf* must be strictly positive. On success, the 947 * helper makes sure that the *buf* is NUL-terminated. On failure, 948 * it is filled with zeroes. 949 * Return 950 * 0 on success, or a negative error in case of failure. 951 * 952 * u32 bpf_get_cgroup_classid(struct sk_buff *skb) 953 * Description 954 * Retrieve the classid for the current task, i.e. for the net_cls 955 * cgroup to which *skb* belongs. 956 * 957 * This helper can be used on TC egress path, but not on ingress. 958 * 959 * The net_cls cgroup provides an interface to tag network packets 960 * based on a user-provided identifier for all traffic coming from 961 * the tasks belonging to the related cgroup. See also the related 962 * kernel documentation, available from the Linux sources in file 963 * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. 964 * 965 * The Linux kernel has two versions for cgroups: there are 966 * cgroups v1 and cgroups v2. Both are available to users, who can 967 * use a mixture of them, but note that the net_cls cgroup is for 968 * cgroup v1 only. This makes it incompatible with BPF programs 969 * run on cgroups, which is a cgroup-v2-only feature (a socket can 970 * only hold data for one version of cgroups at a time). 971 * 972 * This helper is only available is the kernel was compiled with 973 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to 974 * "**y**" or to "**m**". 975 * Return 976 * The classid, or 0 for the default unconfigured classid. 977 * 978 * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 979 * Description 980 * Push a *vlan_tci* (VLAN tag control information) of protocol 981 * *vlan_proto* to the packet associated to *skb*, then update 982 * the checksum. Note that if *vlan_proto* is different from 983 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to 984 * be **ETH_P_8021Q**. 985 * 986 * A call to this helper is susceptible to change the underlying 987 * packet buffer. Therefore, at load time, all checks on pointers 988 * previously done by the verifier are invalidated and must be 989 * performed again, if the helper is used in combination with 990 * direct packet access. 991 * Return 992 * 0 on success, or a negative error in case of failure. 993 * 994 * long bpf_skb_vlan_pop(struct sk_buff *skb) 995 * Description 996 * Pop a VLAN header from the packet associated to *skb*. 997 * 998 * A call to this helper is susceptible to change the underlying 999 * packet buffer. Therefore, at load time, all checks on pointers 1000 * previously done by the verifier are invalidated and must be
1001 * performed again, if the helper is used in combination with 1002 * direct packet access. 1003 * Return 1004 * 0 on success, or a negative error in case of failure. 1005 * 1006 * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 1007 * Description 1008 * Get tunnel metadata. This helper takes a pointer *key* to an 1009 * empty **struct bpf_tunnel_key** of **size**, that will be 1010 * filled with tunnel metadata for the packet associated to *skb*. 1011 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which 1012 * indicates that the tunnel is based on IPv6 protocol instead of 1013 * IPv4. 1014 * 1015 * The **struct bpf_tunnel_key** is an object that generalizes the 1016 * principal parameters used by various tunneling protocols into a 1017 * single struct. This way, it can be used to easily make a 1018 * decision based on the contents of the encapsulation header, 1019 * "summarized" in this struct. In particular, it holds the IP 1020 * address of the remote end (IPv4 or IPv6, depending on the case) 1021 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, 1022 * this struct exposes the *key*\ **->tunnel_id**, which is 1023 * generally mapped to a VNI (Virtual Network Identifier), making 1024 * it programmable together with the **bpf_skb_set_tunnel_key**\ 1025 * () helper. 1026 * 1027 * Let's imagine that the following code is part of a program 1028 * attached to the TC ingress interface, on one end of a GRE 1029 * tunnel, and is supposed to filter out all messages coming from 1030 * remote ends with IPv4 address other than 10.0.0.1: 1031 * 1032 * :: 1033 * 1034 * int ret; 1035 * struct bpf_tunnel_key key = {}; 1036 * 1037 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); 1038 * if (ret < 0) 1039 * return TC_ACT_SHOT; // drop packet 1040 * 1041 * if (key.remote_ipv4 != 0x0a000001) 1042 * return TC_ACT_SHOT; // drop packet 1043 * 1044 * return TC_ACT_OK; // accept packet 1045 * 1046 * This interface can also be used with all encapsulation devices 1047 * that can operate in "collect metadata" mode: instead of having 1048 * one network device per specific configuration, the "collect 1049 * metadata" mode only requires a single device where the 1050 * configuration can be extracted from this helper. 1051 * 1052 * This can be used together with various tunnels such as VXLan, 1053 * Geneve, GRE or IP in IP (IPIP). 1054 * Return 1055 * 0 on success, or a negative error in case of failure. 1056 * 1057 * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 1058 * Description 1059 * Populate tunnel metadata for packet associated to *skb.* The 1060 * tunnel metadata is set to the contents of *key*, of *size*. The 1061 * *flags* can be set to a combination of the following values: 1062 * 1063 * **BPF_F_TUNINFO_IPV6** 1064 * Indicate that the tunnel is based on IPv6 protocol 1065 * instead of IPv4. 1066 * **BPF_F_ZERO_CSUM_TX** 1067 * For IPv4 packets, add a flag to tunnel metadata 1068 * indicating that checksum computation should be skipped 1069 * and checksum set to zeroes. 1070 * **BPF_F_DONT_FRAGMENT** 1071 * Add a flag to tunnel metadata indicating that the 1072 * packet should not be fragmented. 1073 * **BPF_F_SEQ_NUMBER** 1074 * Add a flag to tunnel metadata indicating that a 1075 * sequence number should be added to tunnel header before 1076 * sending the packet. This flag was added for GRE 1077 * encapsulation, but might be used with other protocols 1078 * as well in the future. 1079 * 1080 * Here is a typical usage on the transmit path: 1081 * 1082 * :: 1083 * 1084 * struct bpf_tunnel_key key; 1085 * populate key ... 1086 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); 1087 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); 1088 * 1089 * See also the description of the **bpf_skb_get_tunnel_key**\ () 1090 * helper for additional information. 1091 * Return 1092 * 0 on success, or a negative error in case of failure. 1093 * 1094 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) 1095 * Description 1096 * Read the value of a perf event counter. This helper relies on a 1097 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of 1098 * the perf event counter is selected when *map* is updated with 1099 * perf event file descriptors. The *map* is an array whose size 1100 * is the number of available CPUs, and each cell contains a value 1101 * relative to one CPU. The value to retrieve is indicated by 1102 * *flags*, that contains the index of the CPU to look up, masked 1103 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1104 * **BPF_F_CURRENT_CPU** to indicate that the value for the 1105 * current CPU should be retrieved. 1106 * 1107 * Note that before Linux 4.13, only hardware perf event can be 1108 * retrieved. 1109 * 1110 * Also, be aware that the newer helper 1111 * **bpf_perf_event_read_value**\ () is recommended over 1112 * **bpf_perf_event_read**\ () in general. The latter has some ABI 1113 * quirks where error and counter value are used as a return code 1114 * (which is wrong to do since ranges may overlap). This issue is 1115 * fixed with **bpf_perf_event_read_value**\ (), which at the same 1116 * time provides more features over the **bpf_perf_event_read**\ 1117 * () interface. Please refer to the description of 1118 * **bpf_perf_event_read_value**\ () for details. 1119 * Return 1120 * The value of the perf event counter read from the map, or a 1121 * negative error code in case of failure. 1122 * 1123 * long bpf_redirect(u32 ifindex, u64 flags) 1124 * Description 1125 * Redirect the packet to another net device of index *ifindex*. 1126 * This helper is somewhat similar to **bpf_clone_redirect**\ 1127 * (), except that the packet is not cloned, which provides 1128 * increased performance. 1129 * 1130 * Except for XDP, both ingress and egress interfaces can be used 1131 * for redirection. The **BPF_F_INGRESS** value in *flags* is used 1132 * to make the distinction (ingress path is selected if the flag 1133 * is present, egress path otherwise). Currently, XDP only 1134 * supports redirection to the egress interface, and accepts no 1135 * flag at all. 1136 * 1137 * The same effect can also be attained with the more generic 1138 * **bpf_redirect_map**\ (), which uses a BPF map to store the 1139 * redirect target instead of providing it directly to the helper. 1140 * Return 1141 * For XDP, the helper returns **XDP_REDIRECT** on success or 1142 * **XDP_ABORTED** on error. For other program types, the values 1143 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on 1144 * error. 1145 * 1146 * u32 bpf_get_route_realm(struct sk_buff *skb) 1147 * Description 1148 * Retrieve the realm or the route, that is to say the 1149 * **tclassid** field of the destination for the *skb*. The 1150 * identifier retrieved is a user-provided tag, similar to the 1151 * one used with the net_cls cgroup (see description for 1152 * **bpf_get_cgroup_classid**\ () helper), but here this tag is 1153 * held by a route (a destination entry), not by a task. 1154 * 1155 * Retrieving this identifier works with the clsact TC egress hook 1156 * (see also **tc-bpf(8)**), or alternatively on conventional 1157 * classful egress qdiscs, but not on TC ingress path. In case of 1158 * clsact TC egress hook, this has the advantage that, internally, 1159 * the destination entry has not been dropped yet in the transmit 1160 * path. Therefore, the destination entry does not need to be 1161 * artificially held via **netif_keep_dst**\ () for a classful 1162 * qdisc until the *skb* is freed. 1163 * 1164 * This helper is available only if the kernel was compiled with 1165 * **CONFIG_IP_ROUTE_CLASSID** configuration option. 1166 * Return 1167 * The realm of the route for the packet associated to *skb*, or 0 1168 * if none was found. 1169 * 1170 * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 1171 * Description 1172 * Write raw *data* blob into a special BPF perf event held by 1173 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 1174 * event must have the following attributes: **PERF_SAMPLE_RAW** 1175 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 1176 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 1177 * 1178 * The *flags* are used to indicate the index in *map* for which 1179 * the value must be put, masked with **BPF_F_INDEX_MASK**. 1180 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 1181 * to indicate that the index of the current CPU core should be 1182 * used. 1183 * 1184 * The value to write, of *size*, is passed through eBPF stack and 1185 * pointed by *data*. 1186 * 1187 * The context of the program *ctx* needs also be passed to the 1188 * helper. 1189 * 1190 * On user space, a program willing to read the values needs to 1191 * call **perf_event_open**\ () on the perf event (either for 1192 * one or for all CPUs) and to store the file descriptor into the 1193 * *map*. This must be done before the eBPF program can send data 1194 * into it. An example is available in file 1195 * *samples/bpf/trace_output_user.c* in the Linux kernel source 1196 * tree (the eBPF program counterpart is in 1197 * *samples/bpf/trace_output_kern.c*). 1198 * 1199 * **bpf_perf_event_output**\ () achieves better performance 1200 * than **bpf_trace_printk**\ () for sharing data with user 1201 * space, and is much better suitable for streaming data from eBPF 1202 * programs. 1203 * 1204 * Note that this helper is not restricted to tracing use cases 1205 * and can be used with programs attached to TC or XDP as well, 1206 * where it allows for passing data to user space listeners. Data 1207 * can be: 1208 * 1209 * * Only custom structs, 1210 * * Only the packet payload, or 1211 * * A combination of both. 1212 * Return 1213 * 0 on success, or a negative error in case of failure. 1214 * 1215 * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) 1216 * Description 1217 * This helper was provided as an easy way to load data from a 1218 * packet. It can be used to load *len* bytes from *offset* from 1219 * the packet associated to *skb*, into the buffer pointed by 1220 * *to*. 1221 * 1222 * Since Linux 4.7, usage of this helper has mostly been replaced 1223 * by "direct packet access", enabling packet data to be 1224 * manipulated with *skb*\ **->data** and *skb*\ **->data_end** 1225 * pointing respectively to the first byte of packet data and to 1226 * the byte after the last byte of packet data. However, it 1227 * remains useful if one wishes to read large quantities of data 1228 * at once from a packet into the eBPF stack. 1229 * Return 1230 * 0 on success, or a negative error in case of failure. 1231 * 1232 * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) 1233 * Description 1234 * Walk a user or a kernel stack and return its id. To achieve 1235 * this, the helper needs *ctx*, which is a pointer to the context 1236 * on which the tracing program is executed, and a pointer to a 1237 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. 1238 * 1239 * The last argument, *flags*, holds the number of stack frames to 1240 * skip (from 0 to 255), masked with 1241 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 1242 * a combination of the following flags: 1243 * 1244 * **BPF_F_USER_STACK** 1245 * Collect a user space stack instead of a kernel stack. 1246 * **BPF_F_FAST_STACK_CMP** 1247 * Compare stacks by hash only. 1248 * **BPF_F_REUSE_STACKID** 1249 * If two different stacks hash into the same *stackid*, 1250 * discard the old one. 1251 * 1252 * The stack id retrieved is a 32 bit long integer handle which 1253 * can be further combined with other data (including other stack 1254 * ids) and used as a key into maps. This can be useful for 1255 * generating a variety of graphs (such as flame graphs or off-cpu 1256 * graphs). 1257 * 1258 * For walking a stack, this helper is an improvement over 1259 * **bpf_probe_read**\ (), which can be used with unrolled loops 1260 * but is not efficient and consumes a lot of eBPF instructions. 1261 * Instead, **bpf_get_stackid**\ () can collect up to 1262 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that 1263 * this limit can be controlled with the **sysctl** program, and 1264 * that it should be manually increased in order to profile long 1265 * user stacks (such as stacks for Java programs). To do so, use: 1266 * 1267 * :: 1268 * 1269 * # sysctl kernel.perf_event_max_stack=<new value> 1270 * Return 1271 * The positive or null stack id on success, or a negative error 1272 * in case of failure. 1273 * 1274 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) 1275 * Description 1276 * Compute a checksum difference, from the raw buffer pointed by 1277 * *from*, of length *from_size* (that must be a multiple of 4), 1278 * towards the raw buffer pointed by *to*, of size *to_size* 1279 * (same remark). An optional *seed* can be added to the value 1280 * (this can be cascaded, the seed may come from a previous call 1281 * to the helper). 1282 * 1283 * This is flexible enough to be used in several ways: 1284 * 1285 * * With *from_size* == 0, *to_size* > 0 and *seed* set to 1286 * checksum, it can be used when pushing new data. 1287 * * With *from_size* > 0, *to_size* == 0 and *seed* set to 1288 * checksum, it can be used when removing data from a packet. 1289 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it 1290 * can be used to compute a diff. Note that *from_size* and 1291 * *to_size* do not need to be equal. 1292 * 1293 * This helper can be used in combination with 1294 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to 1295 * which one can feed in the difference computed with 1296 * **bpf_csum_diff**\ (). 1297 * Return 1298 * The checksum result, or a negative error code in case of 1299 * failure. 1300 * 1301 * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 1302 * Description 1303 * Retrieve tunnel options metadata for the packet associated to 1304 * *skb*, and store the raw tunnel option data to the buffer *opt* 1305 * of *size*. 1306 * 1307 * This helper can be used with encapsulation devices that can 1308 * operate in "collect metadata" mode (please refer to the related 1309 * note in the description of **bpf_skb_get_tunnel_key**\ () for 1310 * more details). A particular example where this can be used is 1311 * in combination with the Geneve encapsulation protocol, where it 1312 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) 1313 * and retrieving arbitrary TLVs (Type-Length-Value headers) from 1314 * the eBPF program. This allows for full customization of these 1315 * headers. 1316 * Return 1317 * The size of the option data retrieved. 1318 * 1319 * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 1320 * Description 1321 * Set tunnel options metadata for the packet associated to *skb* 1322 * to the option data contained in the raw buffer *opt* of *size*. 1323 * 1324 * See also the description of the **bpf_skb_get_tunnel_opt**\ () 1325 * helper for additional information. 1326 * Return 1327 * 0 on success, or a negative error in case of failure. 1328 * 1329 * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) 1330 * Description 1331 * Change the protocol of the *skb* to *proto*. Currently 1332 * supported are transition from IPv4 to IPv6, and from IPv6 to 1333 * IPv4. The helper takes care of the groundwork for the 1334 * transition, including resizing the socket buffer. The eBPF 1335 * program is expected to fill the new headers, if any, via 1336 * **skb_store_bytes**\ () and to recompute the checksums with 1337 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ 1338 * (). The main case for this helper is to perform NAT64 1339 * operations out of an eBPF program. 1340 * 1341 * Internally, the GSO type is marked as dodgy so that headers are 1342 * checked and segments are recalculated by the GSO/GRO engine. 1343 * The size for GSO target is adapted as well. 1344 * 1345 * All values for *flags* are reserved for future usage, and must 1346 * be left at zero. 1347 * 1348 * A call to this helper is susceptible to change the underlying 1349 * packet buffer. Therefore, at load time, all checks on pointers 1350 * previously done by the verifier are invalidated and must be 1351 * performed again, if the helper is used in combination with 1352 * direct packet access. 1353 * Return 1354 * 0 on success, or a negative error in case of failure. 1355 * 1356 * long bpf_skb_change_type(struct sk_buff *skb, u32 type) 1357 * Description 1358 * Change the packet type for the packet associated to *skb*. This 1359 * comes down to setting *skb*\ **->pkt_type** to *type*, except 1360 * the eBPF program does not have a write access to *skb*\ 1361 * **->pkt_type** beside this helper. Using a helper here allows 1362 * for graceful handling of errors. 1363 * 1364 * The major use case is to change incoming *skb*s to 1365 * **PACKET_HOST** in a programmatic way instead of having to 1366 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for 1367 * example. 1368 * 1369 * Note that *type* only allows certain values. At this time, they 1370 * are: 1371 * 1372 * **PACKET_HOST** 1373 * Packet is for us. 1374 * **PACKET_BROADCAST** 1375 * Send packet to all. 1376 * **PACKET_MULTICAST** 1377 * Send packet to group. 1378 * **PACKET_OTHERHOST** 1379 * Send packet to someone else. 1380 * Return 1381 * 0 on success, or a negative error in case of failure. 1382 * 1383 * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) 1384 * Description 1385 * Check whether *skb* is a descendant of the cgroup2 held by 1386 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1387 * Return 1388 * The return value depends on the result of the test, and can be: 1389 * 1390 * * 0, if the *skb* failed the cgroup2 descendant test. 1391 * * 1, if the *skb* succeeded the cgroup2 descendant test. 1392 * * A negative error code, if an error occurred. 1393 * 1394 * u32 bpf_get_hash_recalc(struct sk_buff *skb) 1395 * Description 1396 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is 1397 * not set, in particular if the hash was cleared due to mangling, 1398 * recompute this hash. Later accesses to the hash can be done 1399 * directly with *skb*\ **->hash**. 1400 * 1401 * Calling **bpf_set_hash_invalid**\ (), changing a packet 1402 * prototype with **bpf_skb_change_proto**\ (), or calling 1403 * **bpf_skb_store_bytes**\ () with the 1404 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear 1405 * the hash and to trigger a new computation for the next call to 1406 * **bpf_get_hash_recalc**\ (). 1407 * Return 1408 * The 32-bit hash. 1409 * 1410 * u64 bpf_get_current_task(void) 1411 * Return 1412 * A pointer to the current task struct. 1413 * 1414 * long bpf_probe_write_user(void *dst, const void *src, u32 len) 1415 * Description 1416 * Attempt in a safe way to write *len* bytes from the buffer 1417 * *src* to *dst* in memory. It only works for threads that are in 1418 * user context, and *dst* must be a valid user space address. 1419 * 1420 * This helper should not be used to implement any kind of 1421 * security mechanism because of TOC-TOU attacks, but rather to 1422 * debug, divert, and manipulate execution of semi-cooperative 1423 * processes. 1424 * 1425 * Keep in mind that this feature is meant for experiments, and it 1426 * has a risk of crashing the system and running programs. 1427 * Therefore, when an eBPF program using this helper is attached, 1428 * a warning including PID and process name is printed to kernel 1429 * logs. 1430 * Return 1431 * 0 on success, or a negative error in case of failure. 1432 * 1433 * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) 1434 * Description 1435 * Check whether the probe is being run is the context of a given 1436 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by 1437 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1438 * Return 1439 * The return value depends on the result of the test, and can be: 1440 * 1441 * * 0, if the *skb* task belongs to the cgroup2. 1442 * * 1, if the *skb* task does not belong to the cgroup2. 1443 * * A negative error code, if an error occurred. 1444 * 1445 * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) 1446 * Description 1447 * Resize (trim or grow) the packet associated to *skb* to the 1448 * new *len*. The *flags* are reserved for future usage, and must 1449 * be left at zero. 1450 * 1451 * The basic idea is that the helper performs the needed work to 1452 * change the size of the packet, then the eBPF program rewrites 1453 * the rest via helpers like **bpf_skb_store_bytes**\ (), 1454 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () 1455 * and others. This helper is a slow path utility intended for 1456 * replies with control messages. And because it is targeted for 1457 * slow path, the helper itself can afford to be slow: it 1458 * implicitly linearizes, unclones and drops offloads from the 1459 * *skb*. 1460 * 1461 * A call to this helper is susceptible to change the underlying 1462 * packet buffer. Therefore, at load time, all checks on pointers 1463 * previously done by the verifier are invalidated and must be 1464 * performed again, if the helper is used in combination with 1465 * direct packet access. 1466 * Return 1467 * 0 on success, or a negative error in case of failure. 1468 * 1469 * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) 1470 * Description 1471 * Pull in non-linear data in case the *skb* is non-linear and not 1472 * all of *len* are part of the linear section. Make *len* bytes 1473 * from *skb* readable and writable. If a zero value is passed for 1474 * *len*, then the whole length of the *skb* is pulled. 1475 * 1476 * This helper is only needed for reading and writing with direct 1477 * packet access. 1478 * 1479 * For direct packet access, testing that offsets to access 1480 * are within packet boundaries (test on *skb*\ **->data_end**) is 1481 * susceptible to fail if offsets are invalid, or if the requested 1482 * data is in non-linear parts of the *skb*. On failure the 1483 * program can just bail out, or in the case of a non-linear 1484 * buffer, use a helper to make the data available. The 1485 * **bpf_skb_load_bytes**\ () helper is a first solution to access 1486 * the data. Another one consists in using **bpf_skb_pull_data** 1487 * to pull in once the non-linear parts, then retesting and 1488 * eventually access the data. 1489 * 1490 * At the same time, this also makes sure the *skb* is uncloned, 1491 * which is a necessary condition for direct write. As this needs 1492 * to be an invariant for the write part only, the verifier 1493 * detects writes and adds a prologue that is calling 1494 * **bpf_skb_pull_data()** to effectively unclone the *skb* from 1495 * the very beginning in case it is indeed cloned. 1496 * 1497 * A call to this helper is susceptible to change the underlying 1498 * packet buffer. Therefore, at load time, all checks on pointers 1499 * previously done by the verifier are invalidated and must be 1500 * performed again, if the helper is used in combination with 1501 * direct packet access. 1502 * Return 1503 * 0 on success, or a negative error in case of failure. 1504 * 1505 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) 1506 * Description 1507 * Add the checksum *csum* into *skb*\ **->csum** in case the 1508 * driver has supplied a checksum for the entire packet into that 1509 * field. Return an error otherwise. This helper is intended to be 1510 * used in combination with **bpf_csum_diff**\ (), in particular 1511 * when the checksum needs to be updated after data has been 1512 * written into the packet through direct packet access. 1513 * Return 1514 * The checksum on success, or a negative error code in case of 1515 * failure. 1516 * 1517 * void bpf_set_hash_invalid(struct sk_buff *skb) 1518 * Description 1519 * Invalidate the current *skb*\ **->hash**. It can be used after 1520 * mangling on headers through direct packet access, in order to 1521 * indicate that the hash is outdated and to trigger a 1522 * recalculation the next time the kernel tries to access this 1523 * hash or when the **bpf_get_hash_recalc**\ () helper is called. 1524 * 1525 * long bpf_get_numa_node_id(void) 1526 * Description 1527 * Return the id of the current NUMA node. The primary use case 1528 * for this helper is the selection of sockets for the local NUMA 1529 * node, when the program is attached to sockets using the 1530 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), 1531 * but the helper is also available to other eBPF program types, 1532 * similarly to **bpf_get_smp_processor_id**\ (). 1533 * Return 1534 * The id of current NUMA node. 1535 * 1536 * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) 1537 * Description 1538 * Grows headroom of packet associated to *skb* and adjusts the 1539 * offset of the MAC header accordingly, adding *len* bytes of 1540 * space. It automatically extends and reallocates memory as 1541 * required. 1542 * 1543 * This helper can be used on a layer 3 *skb* to push a MAC header 1544 * for redirection into a layer 2 device. 1545 * 1546 * All values for *flags* are reserved for future usage, and must 1547 * be left at zero. 1548 * 1549 * A call to this helper is susceptible to change the underlying 1550 * packet buffer. Therefore, at load time, all checks on pointers 1551 * previously done by the verifier are invalidated and must be 1552 * performed again, if the helper is used in combination with 1553 * direct packet access. 1554 * Return 1555 * 0 on success, or a negative error in case of failure. 1556 * 1557 * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) 1558 * Description 1559 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that 1560 * it is possible to use a negative value for *delta*. This helper 1561 * can be used to prepare the packet for pushing or popping 1562 * headers. 1563 * 1564 * A call to this helper is susceptible to change the underlying 1565 * packet buffer. Therefore, at load time, all checks on pointers 1566 * previously done by the verifier are invalidated and must be 1567 * performed again, if the helper is used in combination with 1568 * direct packet access. 1569 * Return 1570 * 0 on success, or a negative error in case of failure. 1571 * 1572 * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) 1573 * Description 1574 * Copy a NUL terminated string from an unsafe kernel address 1575 * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for 1576 * more details. 1577 * 1578 * Generally, use **bpf_probe_read_user_str**\ () or 1579 * **bpf_probe_read_kernel_str**\ () instead. 1580 * Return 1581 * On success, the strictly positive length of the string, 1582 * including the trailing NUL character. On error, a negative 1583 * value. 1584 * 1585 * u64 bpf_get_socket_cookie(struct sk_buff *skb) 1586 * Description 1587 * If the **struct sk_buff** pointed by *skb* has a known socket, 1588 * retrieve the cookie (generated by the kernel) of this socket. 1589 * If no cookie has been set yet, generate a new cookie. Once 1590 * generated, the socket cookie remains stable for the life of the 1591 * socket. This helper can be useful for monitoring per socket 1592 * networking traffic statistics as it provides a global socket 1593 * identifier that can be assumed unique. 1594 * Return 1595 * A 8-byte long non-decreasing number on success, or 0 if the 1596 * socket field is missing inside *skb*. 1597 * 1598 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) 1599 * Description 1600 * Equivalent to bpf_get_socket_cookie() helper that accepts 1601 * *skb*, but gets socket from **struct bpf_sock_addr** context. 1602 * Return 1603 * A 8-byte long non-decreasing number. 1604 * 1605 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) 1606 * Description 1607 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts 1608 * *skb*, but gets socket from **struct bpf_sock_ops** context. 1609 * Return 1610 * A 8-byte long non-decreasing number. 1611 * 1612 * u32 bpf_get_socket_uid(struct sk_buff *skb) 1613 * Return 1614 * The owner UID of the socket associated to *skb*. If the socket 1615 * is **NULL**, or if it is not a full socket (i.e. if it is a 1616 * time-wait or a request socket instead), **overflowuid** value 1617 * is returned (note that **overflowuid** might also be the actual 1618 * UID value for the socket). 1619 * 1620 * long bpf_set_hash(struct sk_buff *skb, u32 hash) 1621 * Description 1622 * Set the full hash for *skb* (set the field *skb*\ **->hash**) 1623 * to value *hash*. 1624 * Return 1625 * 0 1626 * 1627 * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 1628 * Description 1629 * Emulate a call to **setsockopt()** on the socket associated to 1630 * *bpf_socket*, which must be a full socket. The *level* at 1631 * which the option resides and the name *optname* of the option 1632 * must be specified, see **setsockopt(2)** for more information. 1633 * The option value of length *optlen* is pointed by *optval*. 1634 * 1635 * *bpf_socket* should be one of the following: 1636 * 1637 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 1638 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 1639 * and **BPF_CGROUP_INET6_CONNECT**. 1640 * 1641 * This helper actually implements a subset of **setsockopt()**. 1642 * It supports the following *level*\ s: 1643 * 1644 * * **SOL_SOCKET**, which supports the following *optname*\ s: 1645 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, 1646 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, 1647 * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. 1648 * * **IPPROTO_TCP**, which supports the following *optname*\ s: 1649 * **TCP_CONGESTION**, **TCP_BPF_IW**, 1650 * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, 1651 * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, 1652 * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. 1653 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1654 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1655 * Return 1656 * 0 on success, or a negative error in case of failure. 1657 * 1658 * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) 1659 * Description 1660 * Grow or shrink the room for data in the packet associated to 1661 * *skb* by *len_diff*, and according to the selected *mode*. 1662 * 1663 * By default, the helper will reset any offloaded checksum 1664 * indicator of the skb to CHECKSUM_NONE. This can be avoided 1665 * by the following flag: 1666 * 1667 * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded 1668 * checksum data of the skb to CHECKSUM_NONE. 1669 * 1670 * There are two supported modes at this time: 1671 * 1672 * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer 1673 * (room space is added or removed below the layer 2 header). 1674 * 1675 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer 1676 * (room space is added or removed below the layer 3 header). 1677 * 1678 * The following flags are supported at this time: 1679 * 1680 * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. 1681 * Adjusting mss in this way is not allowed for datagrams. 1682 * 1683 * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, 1684 * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: 1685 * Any new space is reserved to hold a tunnel header. 1686 * Configure skb offsets and other fields accordingly. 1687 * 1688 * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, 1689 * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: 1690 * Use with ENCAP_L3 flags to further specify the tunnel type. 1691 * 1692 * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): 1693 * Use with ENCAP_L3/L4 flags to further specify the tunnel 1694 * type; *len* is the length of the inner MAC header. 1695 * 1696 * A call to this helper is susceptible to change the underlying 1697 * packet buffer. Therefore, at load time, all checks on pointers 1698 * previously done by the verifier are invalidated and must be 1699 * performed again, if the helper is used in combination with 1700 * direct packet access. 1701 * Return 1702 * 0 on success, or a negative error in case of failure. 1703 * 1704 * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) 1705 * Description 1706 * Redirect the packet to the endpoint referenced by *map* at 1707 * index *key*. Depending on its type, this *map* can contain 1708 * references to net devices (for forwarding packets through other 1709 * ports), or to CPUs (for redirecting XDP frames to another CPU; 1710 * but this is only implemented for native XDP (with driver 1711 * support) as of this writing). 1712 * 1713 * The lower two bits of *flags* are used as the return code if 1714 * the map lookup fails. This is so that the return value can be 1715 * one of the XDP program return codes up to **XDP_TX**, as chosen 1716 * by the caller. Any higher bits in the *flags* argument must be 1717 * unset. 1718 * 1719 * See also **bpf_redirect**\ (), which only supports redirecting 1720 * to an ifindex, but doesn't require a map to do so. 1721 * Return 1722 * **XDP_REDIRECT** on success, or the value of the two lower bits 1723 * of the *flags* argument on error. 1724 * 1725 * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) 1726 * Description 1727 * Redirect the packet to the socket referenced by *map* (of type 1728 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1729 * egress interfaces can be used for redirection. The 1730 * **BPF_F_INGRESS** value in *flags* is used to make the 1731 * distinction (ingress path is selected if the flag is present, 1732 * egress path otherwise). This is the only flag supported for now. 1733 * Return 1734 * **SK_PASS** on success, or **SK_DROP** on error. 1735 * 1736 * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 1737 * Description 1738 * Add an entry to, or update a *map* referencing sockets. The 1739 * *skops* is used as a new value for the entry associated to 1740 * *key*. *flags* is one of: 1741 * 1742 * **BPF_NOEXIST** 1743 * The entry for *key* must not exist in the map. 1744 * **BPF_EXIST** 1745 * The entry for *key* must already exist in the map. 1746 * **BPF_ANY** 1747 * No condition on the existence of the entry for *key*. 1748 * 1749 * If the *map* has eBPF programs (parser and verdict), those will 1750 * be inherited by the socket being added. If the socket is 1751 * already attached to eBPF programs, this results in an error. 1752 * Return 1753 * 0 on success, or a negative error in case of failure. 1754 * 1755 * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) 1756 * Description 1757 * Adjust the address pointed by *xdp_md*\ **->data_meta** by 1758 * *delta* (which can be positive or negative). Note that this 1759 * operation modifies the address stored in *xdp_md*\ **->data**, 1760 * so the latter must be loaded only after the helper has been 1761 * called. 1762 * 1763 * The use of *xdp_md*\ **->data_meta** is optional and programs 1764 * are not required to use it. The rationale is that when the 1765 * packet is processed with XDP (e.g. as DoS filter), it is 1766 * possible to push further meta data along with it before passing 1767 * to the stack, and to give the guarantee that an ingress eBPF 1768 * program attached as a TC classifier on the same device can pick 1769 * this up for further post-processing. Since TC works with socket 1770 * buffers, it remains possible to set from XDP the **mark** or 1771 * **priority** pointers, or other pointers for the socket buffer. 1772 * Having this scratch space generic and programmable allows for 1773 * more flexibility as the user is free to store whatever meta 1774 * data they need. 1775 * 1776 * A call to this helper is susceptible to change the underlying 1777 * packet buffer. Therefore, at load time, all checks on pointers 1778 * previously done by the verifier are invalidated and must be 1779 * performed again, if the helper is used in combination with 1780 * direct packet access. 1781 * Return 1782 * 0 on success, or a negative error in case of failure. 1783 * 1784 * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) 1785 * Description 1786 * Read the value of a perf event counter, and store it into *buf* 1787 * of size *buf_size*. This helper relies on a *map* of type 1788 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event 1789 * counter is selected when *map* is updated with perf event file 1790 * descriptors. The *map* is an array whose size is the number of 1791 * available CPUs, and each cell contains a value relative to one 1792 * CPU. The value to retrieve is indicated by *flags*, that 1793 * contains the index of the CPU to look up, masked with 1794 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1795 * **BPF_F_CURRENT_CPU** to indicate that the value for the 1796 * current CPU should be retrieved. 1797 * 1798 * This helper behaves in a way close to 1799 * **bpf_perf_event_read**\ () helper, save that instead of 1800 * just returning the value observed, it fills the *buf* 1801 * structure. This allows for additional data to be retrieved: in 1802 * particular, the enabled and running times (in *buf*\ 1803 * **->enabled** and *buf*\ **->running**, respectively) are 1804 * copied. In general, **bpf_perf_event_read_value**\ () is 1805 * recommended over **bpf_perf_event_read**\ (), which has some 1806 * ABI issues and provides fewer functionalities. 1807 * 1808 * These values are interesting, because hardware PMU (Performance 1809 * Monitoring Unit) counters are limited resources. When there are 1810 * more PMU based perf events opened than available counters, 1811 * kernel will multiplex these events so each event gets certain 1812 * percentage (but not all) of the PMU time. In case that 1813 * multiplexing happens, the number of samples or counter value 1814 * will not reflect the case compared to when no multiplexing 1815 * occurs. This makes comparison between different runs difficult. 1816 * Typically, the counter value should be normalized before 1817 * comparing to other experiments. The usual normalization is done 1818 * as follows. 1819 * 1820 * :: 1821 * 1822 * normalized_counter = counter * t_enabled / t_running 1823 * 1824 * Where t_enabled is the time enabled for event and t_running is 1825 * the time running for event since last normalization. The 1826 * enabled and running times are accumulated since the perf event 1827 * open. To achieve scaling factor between two invocations of an 1828 * eBPF program, users can use CPU id as the key (which is 1829 * typical for perf array usage model) to remember the previous 1830 * value and do the calculation inside the eBPF program. 1831 * Return 1832 * 0 on success, or a negative error in case of failure. 1833 * 1834 * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) 1835 * Description 1836 * For en eBPF program attached to a perf event, retrieve the 1837 * value of the event counter associated to *ctx* and store it in 1838 * the structure pointed by *buf* and of size *buf_size*. Enabled 1839 * and running times are also stored in the structure (see 1840 * description of helper **bpf_perf_event_read_value**\ () for 1841 * more details). 1842 * Return 1843 * 0 on success, or a negative error in case of failure. 1844 * 1845 * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 1846 * Description 1847 * Emulate a call to **getsockopt()** on the socket associated to 1848 * *bpf_socket*, which must be a full socket. The *level* at 1849 * which the option resides and the name *optname* of the option 1850 * must be specified, see **getsockopt(2)** for more information. 1851 * The retrieved value is stored in the structure pointed by 1852 * *opval* and of length *optlen*. 1853 * 1854 * *bpf_socket* should be one of the following: 1855 * 1856 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 1857 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 1858 * and **BPF_CGROUP_INET6_CONNECT**. 1859 * 1860 * This helper actually implements a subset of **getsockopt()**. 1861 * It supports the following *level*\ s: 1862 * 1863 * * **IPPROTO_TCP**, which supports *optname* 1864 * **TCP_CONGESTION**. 1865 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1866 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1867 * Return 1868 * 0 on success, or a negative error in case of failure. 1869 * 1870 * long bpf_override_return(struct pt_regs *regs, u64 rc) 1871 * Description 1872 * Used for error injection, this helper uses kprobes to override 1873 * the return value of the probed function, and to set it to *rc*. 1874 * The first argument is the context *regs* on which the kprobe 1875 * works. 1876 * 1877 * This helper works by setting the PC (program counter) 1878 * to an override function which is run in place of the original 1879 * probed function. This means the probed function is not run at 1880 * all. The replacement function just returns with the required 1881 * value. 1882 * 1883 * This helper has security implications, and thus is subject to 1884 * restrictions. It is only available if the kernel was compiled 1885 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration 1886 * option, and in this case it only works on functions tagged with 1887 * **ALLOW_ERROR_INJECTION** in the kernel code. 1888 * 1889 * Also, the helper is only available for the architectures having 1890 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, 1891 * x86 architecture is the only one to support this feature. 1892 * Return 1893 * 0 1894 * 1895 * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) 1896 * Description 1897 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field 1898 * for the full TCP socket associated to *bpf_sock_ops* to 1899 * *argval*. 1900 * 1901 * The primary use of this field is to determine if there should 1902 * be calls to eBPF programs of type 1903 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP 1904 * code. A program of the same type can change its value, per 1905 * connection and as necessary, when the connection is 1906 * established. This field is directly accessible for reading, but 1907 * this helper must be used for updates in order to return an 1908 * error if an eBPF program tries to set a callback that is not 1909 * supported in the current kernel. 1910 * 1911 * *argval* is a flag array which can combine these flags: 1912 * 1913 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) 1914 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) 1915 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) 1916 * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) 1917 * 1918 * Therefore, this function can be used to clear a callback flag by 1919 * setting the appropriate bit to zero. e.g. to disable the RTO 1920 * callback: 1921 * 1922 * **bpf_sock_ops_cb_flags_set(bpf_sock,** 1923 * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** 1924 * 1925 * Here are some examples of where one could call such eBPF 1926 * program: 1927 * 1928 * * When RTO fires. 1929 * * When a packet is retransmitted. 1930 * * When the connection terminates. 1931 * * When a packet is sent. 1932 * * When a packet is received. 1933 * Return 1934 * Code **-EINVAL** if the socket is not a full TCP socket; 1935 * otherwise, a positive number containing the bits that could not 1936 * be set is returned (which comes down to 0 if all bits were set 1937 * as required). 1938 * 1939 * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) 1940 * Description 1941 * This helper is used in programs implementing policies at the 1942 * socket level. If the message *msg* is allowed to pass (i.e. if 1943 * the verdict eBPF program returns **SK_PASS**), redirect it to 1944 * the socket referenced by *map* (of type 1945 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1946 * egress interfaces can be used for redirection. The 1947 * **BPF_F_INGRESS** value in *flags* is used to make the 1948 * distinction (ingress path is selected if the flag is present, 1949 * egress path otherwise). This is the only flag supported for now. 1950 * Return 1951 * **SK_PASS** on success, or **SK_DROP** on error. 1952 * 1953 * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) 1954 * Description 1955 * For socket policies, apply the verdict of the eBPF program to 1956 * the next *bytes* (number of bytes) of message *msg*. 1957 * 1958 * For example, this helper can be used in the following cases: 1959 * 1960 * * A single **sendmsg**\ () or **sendfile**\ () system call 1961 * contains multiple logical messages that the eBPF program is 1962 * supposed to read and for which it should apply a verdict. 1963 * * An eBPF program only cares to read the first *bytes* of a 1964 * *msg*. If the message has a large payload, then setting up 1965 * and calling the eBPF program repeatedly for all bytes, even 1966 * though the verdict is already known, would create unnecessary 1967 * overhead. 1968 * 1969 * When called from within an eBPF program, the helper sets a 1970 * counter internal to the BPF infrastructure, that is used to 1971 * apply the last verdict to the next *bytes*. If *bytes* is 1972 * smaller than the current data being processed from a 1973 * **sendmsg**\ () or **sendfile**\ () system call, the first 1974 * *bytes* will be sent and the eBPF program will be re-run with 1975 * the pointer for start of data pointing to byte number *bytes* 1976 * **+ 1**. If *bytes* is larger than the current data being 1977 * processed, then the eBPF verdict will be applied to multiple 1978 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are 1979 * consumed. 1980 * 1981 * Note that if a socket closes with the internal counter holding 1982 * a non-zero value, this is not a problem because data is not 1983 * being buffered for *bytes* and is sent as it is received. 1984 * Return 1985 * 0 1986 * 1987 * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) 1988 * Description 1989 * For socket policies, prevent the execution of the verdict eBPF 1990 * program for message *msg* until *bytes* (byte number) have been 1991 * accumulated. 1992 * 1993 * This can be used when one needs a specific number of bytes 1994 * before a verdict can be assigned, even if the data spans 1995 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme 1996 * case would be a user calling **sendmsg**\ () repeatedly with 1997 * 1-byte long message segments. Obviously, this is bad for 1998 * performance, but it is still valid. If the eBPF program needs 1999 * *bytes* bytes to validate a header, this helper can be used to 2000 * prevent the eBPF program to be called again until *bytes* have
2001 * been accumulated. 2002 * Return 2003 * 0 2004 * 2005 * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) 2006 * Description 2007 * For socket policies, pull in non-linear data from user space 2008 * for *msg* and set pointers *msg*\ **->data** and *msg*\ 2009 * **->data_end** to *start* and *end* bytes offsets into *msg*, 2010 * respectively. 2011 * 2012 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 2013 * *msg* it can only parse data that the (**data**, **data_end**) 2014 * pointers have already consumed. For **sendmsg**\ () hooks this 2015 * is likely the first scatterlist element. But for calls relying 2016 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will 2017 * be the range (**0**, **0**) because the data is shared with 2018 * user space and by default the objective is to avoid allowing 2019 * user space to modify data while (or after) eBPF verdict is 2020 * being decided. This helper can be used to pull in data and to 2021 * set the start and end pointer to given values. Data will be 2022 * copied if necessary (i.e. if data was not linear and if start 2023 * and end pointers do not point to the same chunk). 2024 * 2025 * A call to this helper is susceptible to change the underlying 2026 * packet buffer. Therefore, at load time, all checks on pointers 2027 * previously done by the verifier are invalidated and must be 2028 * performed again, if the helper is used in combination with 2029 * direct packet access. 2030 * 2031 * All values for *flags* are reserved for future usage, and must 2032 * be left at zero. 2033 * Return 2034 * 0 on success, or a negative error in case of failure. 2035 * 2036 * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) 2037 * Description 2038 * Bind the socket associated to *ctx* to the address pointed by 2039 * *addr*, of length *addr_len*. This allows for making outgoing 2040 * connection from the desired IP address, which can be useful for 2041 * example when all processes inside a cgroup should use one 2042 * single IP address on a host that has multiple IP configured. 2043 * 2044 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The 2045 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or 2046 * **AF_INET6**). It's advised to pass zero port (**sin_port** 2047 * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like 2048 * behavior and lets the kernel efficiently pick up an unused 2049 * port as long as 4-tuple is unique. Passing non-zero port might 2050 * lead to degraded performance. 2051 * Return 2052 * 0 on success, or a negative error in case of failure. 2053 * 2054 * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) 2055 * Description 2056 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is 2057 * possible to both shrink and grow the packet tail. 2058 * Shrink done via *delta* being a negative integer. 2059 * 2060 * A call to this helper is susceptible to change the underlying 2061 * packet buffer. Therefore, at load time, all checks on pointers 2062 * previously done by the verifier are invalidated and must be 2063 * performed again, if the helper is used in combination with 2064 * direct packet access. 2065 * Return 2066 * 0 on success, or a negative error in case of failure. 2067 * 2068 * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) 2069 * Description 2070 * Retrieve the XFRM state (IP transform framework, see also 2071 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. 2072 * 2073 * The retrieved value is stored in the **struct bpf_xfrm_state** 2074 * pointed by *xfrm_state* and of length *size*. 2075 * 2076 * All values for *flags* are reserved for future usage, and must 2077 * be left at zero. 2078 * 2079 * This helper is available only if the kernel was compiled with 2080 * **CONFIG_XFRM** configuration option. 2081 * Return 2082 * 0 on success, or a negative error in case of failure. 2083 * 2084 * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) 2085 * Description 2086 * Return a user or a kernel stack in bpf program provided buffer. 2087 * To achieve this, the helper needs *ctx*, which is a pointer 2088 * to the context on which the tracing program is executed. 2089 * To store the stacktrace, the bpf program provides *buf* with 2090 * a nonnegative *size*. 2091 * 2092 * The last argument, *flags*, holds the number of stack frames to 2093 * skip (from 0 to 255), masked with 2094 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 2095 * the following flags: 2096 * 2097 * **BPF_F_USER_STACK** 2098 * Collect a user space stack instead of a kernel stack. 2099 * **BPF_F_USER_BUILD_ID** 2100 * Collect buildid+offset instead of ips for user stack, 2101 * only valid if **BPF_F_USER_STACK** is also specified. 2102 * 2103 * **bpf_get_stack**\ () can collect up to 2104 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 2105 * to sufficient large buffer size. Note that 2106 * this limit can be controlled with the **sysctl** program, and 2107 * that it should be manually increased in order to profile long 2108 * user stacks (such as stacks for Java programs). To do so, use: 2109 * 2110 * :: 2111 * 2112 * # sysctl kernel.perf_event_max_stack=<new value> 2113 * Return 2114 * A non-negative value equal to or less than *size* on success, 2115 * or a negative error in case of failure. 2116 * 2117 * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) 2118 * Description 2119 * This helper is similar to **bpf_skb_load_bytes**\ () in that 2120 * it provides an easy way to load *len* bytes from *offset* 2121 * from the packet associated to *skb*, into the buffer pointed 2122 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that 2123 * a fifth argument *start_header* exists in order to select a 2124 * base offset to start from. *start_header* can be one of: 2125 * 2126 * **BPF_HDR_START_MAC** 2127 * Base offset to load data from is *skb*'s mac header. 2128 * **BPF_HDR_START_NET** 2129 * Base offset to load data from is *skb*'s network header. 2130 * 2131 * In general, "direct packet access" is the preferred method to 2132 * access packet data, however, this helper is in particular useful 2133 * in socket filters where *skb*\ **->data** does not always point 2134 * to the start of the mac header and where "direct packet access" 2135 * is not available. 2136 * Return 2137 * 0 on success, or a negative error in case of failure. 2138 * 2139 * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) 2140 * Description 2141 * Do FIB lookup in kernel tables using parameters in *params*. 2142 * If lookup is successful and result shows packet is to be 2143 * forwarded, the neighbor tables are searched for the nexthop. 2144 * If successful (ie., FIB lookup shows forwarding and nexthop 2145 * is resolved), the nexthop address is returned in ipv4_dst 2146 * or ipv6_dst based on family, smac is set to mac address of 2147 * egress device, dmac is set to nexthop mac address, rt_metric 2148 * is set to metric from route (IPv4/IPv6 only), and ifindex 2149 * is set to the device index of the nexthop from the FIB lookup. 2150 * 2151 * *plen* argument is the size of the passed in struct. 2152 * *flags* argument can be a combination of one or more of the 2153 * following values: 2154 * 2155 * **BPF_FIB_LOOKUP_DIRECT** 2156 * Do a direct table lookup vs full lookup using FIB 2157 * rules. 2158 * **BPF_FIB_LOOKUP_OUTPUT** 2159 * Perform lookup from an egress perspective (default is 2160 * ingress). 2161 * 2162 * *ctx* is either **struct xdp_md** for XDP programs or 2163 * **struct sk_buff** tc cls_act programs. 2164 * Return 2165 * * < 0 if any input argument is invalid 2166 * * 0 on success (packet is forwarded, nexthop neighbor exists) 2167 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the 2168 * packet is not forwarded or needs assist from full stack 2169 * 2170 * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 2171 * Description 2172 * Add an entry to, or update a sockhash *map* referencing sockets. 2173 * The *skops* is used as a new value for the entry associated to 2174 * *key*. *flags* is one of: 2175 * 2176 * **BPF_NOEXIST** 2177 * The entry for *key* must not exist in the map. 2178 * **BPF_EXIST** 2179 * The entry for *key* must already exist in the map. 2180 * **BPF_ANY** 2181 * No condition on the existence of the entry for *key*. 2182 * 2183 * If the *map* has eBPF programs (parser and verdict), those will 2184 * be inherited by the socket being added. If the socket is 2185 * already attached to eBPF programs, this results in an error. 2186 * Return 2187 * 0 on success, or a negative error in case of failure. 2188 * 2189 * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) 2190 * Description 2191 * This helper is used in programs implementing policies at the 2192 * socket level. If the message *msg* is allowed to pass (i.e. if 2193 * the verdict eBPF program returns **SK_PASS**), redirect it to 2194 * the socket referenced by *map* (of type 2195 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 2196 * egress interfaces can be used for redirection. The 2197 * **BPF_F_INGRESS** value in *flags* is used to make the 2198 * distinction (ingress path is selected if the flag is present, 2199 * egress path otherwise). This is the only flag supported for now. 2200 * Return 2201 * **SK_PASS** on success, or **SK_DROP** on error. 2202 * 2203 * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) 2204 * Description 2205 * This helper is used in programs implementing policies at the 2206 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. 2207 * if the verdeict eBPF program returns **SK_PASS**), redirect it 2208 * to the socket referenced by *map* (of type 2209 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 2210 * egress interfaces can be used for redirection. The 2211 * **BPF_F_INGRESS** value in *flags* is used to make the 2212 * distinction (ingress path is selected if the flag is present, 2213 * egress otherwise). This is the only flag supported for now. 2214 * Return 2215 * **SK_PASS** on success, or **SK_DROP** on error. 2216 * 2217 * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 2218 * Description 2219 * Encapsulate the packet associated to *skb* within a Layer 3 2220 * protocol header. This header is provided in the buffer at 2221 * address *hdr*, with *len* its size in bytes. *type* indicates 2222 * the protocol of the header and can be one of: 2223 * 2224 * **BPF_LWT_ENCAP_SEG6** 2225 * IPv6 encapsulation with Segment Routing Header 2226 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, 2227 * the IPv6 header is computed by the kernel. 2228 * **BPF_LWT_ENCAP_SEG6_INLINE** 2229 * Only works if *skb* contains an IPv6 packet. Insert a 2230 * Segment Routing Header (**struct ipv6_sr_hdr**) inside 2231 * the IPv6 header. 2232 * **BPF_LWT_ENCAP_IP** 2233 * IP encapsulation (GRE/GUE/IPIP/etc). The outer header 2234 * must be IPv4 or IPv6, followed by zero or more 2235 * additional headers, up to **LWT_BPF_MAX_HEADROOM** 2236 * total bytes in all prepended headers. Please note that 2237 * if **skb_is_gso**\ (*skb*) is true, no more than two 2238 * headers can be prepended, and the inner header, if 2239 * present, should be either GRE or UDP/GUE. 2240 * 2241 * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs 2242 * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can 2243 * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and 2244 * **BPF_PROG_TYPE_LWT_XMIT**. 2245 * 2246 * A call to this helper is susceptible to change the underlying 2247 * packet buffer. Therefore, at load time, all checks on pointers 2248 * previously done by the verifier are invalidated and must be 2249 * performed again, if the helper is used in combination with 2250 * direct packet access. 2251 * Return 2252 * 0 on success, or a negative error in case of failure. 2253 * 2254 * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) 2255 * Description 2256 * Store *len* bytes from address *from* into the packet 2257 * associated to *skb*, at *offset*. Only the flags, tag and TLVs 2258 * inside the outermost IPv6 Segment Routing Header can be 2259 * modified through this helper. 2260 * 2261 * A call to this helper is susceptible to change the underlying 2262 * packet buffer. Therefore, at load time, all checks on pointers 2263 * previously done by the verifier are invalidated and must be 2264 * performed again, if the helper is used in combination with 2265 * direct packet access. 2266 * Return 2267 * 0 on success, or a negative error in case of failure. 2268 * 2269 * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) 2270 * Description 2271 * Adjust the size allocated to TLVs in the outermost IPv6 2272 * Segment Routing Header contained in the packet associated to 2273 * *skb*, at position *offset* by *delta* bytes. Only offsets 2274 * after the segments are accepted. *delta* can be as well 2275 * positive (growing) as negative (shrinking). 2276 * 2277 * A call to this helper is susceptible to change the underlying 2278 * packet buffer. Therefore, at load time, all checks on pointers 2279 * previously done by the verifier are invalidated and must be 2280 * performed again, if the helper is used in combination with 2281 * direct packet access. 2282 * Return 2283 * 0 on success, or a negative error in case of failure. 2284 * 2285 * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) 2286 * Description 2287 * Apply an IPv6 Segment Routing action of type *action* to the 2288 * packet associated to *skb*. Each action takes a parameter 2289 * contained at address *param*, and of length *param_len* bytes. 2290 * *action* can be one of: 2291 * 2292 * **SEG6_LOCAL_ACTION_END_X** 2293 * End.X action: Endpoint with Layer-3 cross-connect. 2294 * Type of *param*: **struct in6_addr**. 2295 * **SEG6_LOCAL_ACTION_END_T** 2296 * End.T action: Endpoint with specific IPv6 table lookup. 2297 * Type of *param*: **int**. 2298 * **SEG6_LOCAL_ACTION_END_B6** 2299 * End.B6 action: Endpoint bound to an SRv6 policy. 2300 * Type of *param*: **struct ipv6_sr_hdr**. 2301 * **SEG6_LOCAL_ACTION_END_B6_ENCAP** 2302 * End.B6.Encap action: Endpoint bound to an SRv6 2303 * encapsulation policy. 2304 * Type of *param*: **struct ipv6_sr_hdr**. 2305 * 2306 * A call to this helper is susceptible to change the underlying 2307 * packet buffer. Therefore, at load time, all checks on pointers 2308 * previously done by the verifier are invalidated and must be 2309 * performed again, if the helper is used in combination with 2310 * direct packet access. 2311 * Return 2312 * 0 on success, or a negative error in case of failure. 2313 * 2314 * long bpf_rc_repeat(void *ctx) 2315 * Description 2316 * This helper is used in programs implementing IR decoding, to 2317 * report a successfully decoded repeat key message. This delays 2318 * the generation of a key up event for previously generated 2319 * key down event. 2320 * 2321 * Some IR protocols like NEC have a special IR message for 2322 * repeating last button, for when a button is held down. 2323 * 2324 * The *ctx* should point to the lirc sample as passed into 2325 * the program. 2326 * 2327 * This helper is only available is the kernel was compiled with 2328 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2329 * "**y**". 2330 * Return 2331 * 0 2332 * 2333 * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) 2334 * Description 2335 * This helper is used in programs implementing IR decoding, to 2336 * report a successfully decoded key press with *scancode*, 2337 * *toggle* value in the given *protocol*. The scancode will be 2338 * translated to a keycode using the rc keymap, and reported as 2339 * an input key down event. After a period a key up event is 2340 * generated. This period can be extended by calling either 2341 * **bpf_rc_keydown**\ () again with the same values, or calling 2342 * **bpf_rc_repeat**\ (). 2343 * 2344 * Some protocols include a toggle bit, in case the button was 2345 * released and pressed again between consecutive scancodes. 2346 * 2347 * The *ctx* should point to the lirc sample as passed into 2348 * the program. 2349 * 2350 * The *protocol* is the decoded protocol number (see 2351 * **enum rc_proto** for some predefined values). 2352 * 2353 * This helper is only available is the kernel was compiled with 2354 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2355 * "**y**". 2356 * Return 2357 * 0 2358 * 2359 * u64 bpf_skb_cgroup_id(struct sk_buff *skb) 2360 * Description 2361 * Return the cgroup v2 id of the socket associated with the *skb*. 2362 * This is roughly similar to the **bpf_get_cgroup_classid**\ () 2363 * helper for cgroup v1 by providing a tag resp. identifier that 2364 * can be matched on or used for map lookups e.g. to implement 2365 * policy. The cgroup v2 id of a given path in the hierarchy is 2366 * exposed in user space through the f_handle API in order to get 2367 * to the same 64-bit id. 2368 * 2369 * This helper can be used on TC egress path, but not on ingress, 2370 * and is available only if the kernel was compiled with the 2371 * **CONFIG_SOCK_CGROUP_DATA** configuration option. 2372 * Return 2373 * The id is returned or 0 in case the id could not be retrieved. 2374 * 2375 * u64 bpf_get_current_cgroup_id(void) 2376 * Return 2377 * A 64-bit integer containing the current cgroup id based 2378 * on the cgroup within which the current task is running. 2379 * 2380 * void *bpf_get_local_storage(void *map, u64 flags) 2381 * Description 2382 * Get the pointer to the local storage area. 2383 * The type and the size of the local storage is defined 2384 * by the *map* argument. 2385 * The *flags* meaning is specific for each map type, 2386 * and has to be 0 for cgroup local storage. 2387 * 2388 * Depending on the BPF program type, a local storage area 2389 * can be shared between multiple instances of the BPF program, 2390 * running simultaneously. 2391 * 2392 * A user should care about the synchronization by himself. 2393 * For example, by using the **BPF_STX_XADD** instruction to alter 2394 * the shared data. 2395 * Return 2396 * A pointer to the local storage area. 2397 * 2398 * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) 2399 * Description 2400 * Select a **SO_REUSEPORT** socket from a 2401 * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. 2402 * It checks the selected socket is matching the incoming 2403 * request in the socket buffer. 2404 * Return 2405 * 0 on success, or a negative error in case of failure. 2406 * 2407 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) 2408 * Description 2409 * Return id of cgroup v2 that is ancestor of cgroup associated 2410 * with the *skb* at the *ancestor_level*. The root cgroup is at 2411 * *ancestor_level* zero and each step down the hierarchy 2412 * increments the level. If *ancestor_level* == level of cgroup 2413 * associated with *skb*, then return value will be same as that 2414 * of **bpf_skb_cgroup_id**\ (). 2415 * 2416 * The helper is useful to implement policies based on cgroups 2417 * that are upper in hierarchy than immediate cgroup associated 2418 * with *skb*. 2419 * 2420 * The format of returned id and helper limitations are same as in 2421 * **bpf_skb_cgroup_id**\ (). 2422 * Return 2423 * The id is returned or 0 in case the id could not be retrieved. 2424 * 2425 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2426 * Description 2427 * Look for TCP socket matching *tuple*, optionally in a child 2428 * network namespace *netns*. The return value must be checked, 2429 * and if non-**NULL**, released via **bpf_sk_release**\ (). 2430 * 2431 * The *ctx* should point to the context of the program, such as 2432 * the skb or socket (depending on the hook in use). This is used 2433 * to determine the base network namespace for the lookup. 2434 * 2435 * *tuple_size* must be one of: 2436 * 2437 * **sizeof**\ (*tuple*\ **->ipv4**) 2438 * Look for an IPv4 socket. 2439 * **sizeof**\ (*tuple*\ **->ipv6**) 2440 * Look for an IPv6 socket. 2441 * 2442 * If the *netns* is a negative signed 32-bit integer, then the 2443 * socket lookup table in the netns associated with the *ctx* 2444 * will be used. For the TC hooks, this is the netns of the device 2445 * in the skb. For socket hooks, this is the netns of the socket. 2446 * If *netns* is any other signed 32-bit value greater than or 2447 * equal to zero then it specifies the ID of the netns relative to 2448 * the netns associated with the *ctx*. *netns* values beyond the 2449 * range of 32-bit integers are reserved for future use. 2450 * 2451 * All values for *flags* are reserved for future usage, and must 2452 * be left at zero. 2453 * 2454 * This helper is available only if the kernel was compiled with 2455 * **CONFIG_NET** configuration option. 2456 * Return 2457 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2458 * For sockets with reuseport option, the **struct bpf_sock** 2459 * result is from *reuse*\ **->socks**\ [] using the hash of the 2460 * tuple. 2461 * 2462 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2463 * Description 2464 * Look for UDP socket matching *tuple*, optionally in a child 2465 * network namespace *netns*. The return value must be checked, 2466 * and if non-**NULL**, released via **bpf_sk_release**\ (). 2467 * 2468 * The *ctx* should point to the context of the program, such as 2469 * the skb or socket (depending on the hook in use). This is used 2470 * to determine the base network namespace for the lookup. 2471 * 2472 * *tuple_size* must be one of: 2473 * 2474 * **sizeof**\ (*tuple*\ **->ipv4**) 2475 * Look for an IPv4 socket. 2476 * **sizeof**\ (*tuple*\ **->ipv6**) 2477 * Look for an IPv6 socket. 2478 * 2479 * If the *netns* is a negative signed 32-bit integer, then the 2480 * socket lookup table in the netns associated with the *ctx* 2481 * will be used. For the TC hooks, this is the netns of the device 2482 * in the skb. For socket hooks, this is the netns of the socket. 2483 * If *netns* is any other signed 32-bit value greater than or 2484 * equal to zero then it specifies the ID of the netns relative to 2485 * the netns associated with the *ctx*. *netns* values beyond the 2486 * range of 32-bit integers are reserved for future use. 2487 * 2488 * All values for *flags* are reserved for future usage, and must 2489 * be left at zero. 2490 * 2491 * This helper is available only if the kernel was compiled with 2492 * **CONFIG_NET** configuration option. 2493 * Return 2494 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2495 * For sockets with reuseport option, the **struct bpf_sock** 2496 * result is from *reuse*\ **->socks**\ [] using the hash of the 2497 * tuple. 2498 * 2499 * long bpf_sk_release(struct bpf_sock *sock) 2500 * Description 2501 * Release the reference held by *sock*. *sock* must be a 2502 * non-**NULL** pointer that was returned from 2503 * **bpf_sk_lookup_xxx**\ (). 2504 * Return 2505 * 0 on success, or a negative error in case of failure. 2506 * 2507 * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) 2508 * Description 2509 * Push an element *value* in *map*. *flags* is one of: 2510 * 2511 * **BPF_EXIST** 2512 * If the queue/stack is full, the oldest element is 2513 * removed to make room for this. 2514 * Return 2515 * 0 on success, or a negative error in case of failure. 2516 * 2517 * long bpf_map_pop_elem(struct bpf_map *map, void *value) 2518 * Description 2519 * Pop an element from *map*. 2520 * Return 2521 * 0 on success, or a negative error in case of failure. 2522 * 2523 * long bpf_map_peek_elem(struct bpf_map *map, void *value) 2524 * Description 2525 * Get an element from *map* without removing it. 2526 * Return 2527 * 0 on success, or a negative error in case of failure. 2528 * 2529 * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 2530 * Description 2531 * For socket policies, insert *len* bytes into *msg* at offset 2532 * *start*. 2533 * 2534 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 2535 * *msg* it may want to insert metadata or options into the *msg*. 2536 * This can later be read and used by any of the lower layer BPF 2537 * hooks. 2538 * 2539 * This helper may fail if under memory pressure (a malloc 2540 * fails) in these cases BPF programs will get an appropriate 2541 * error and BPF programs will need to handle them. 2542 * Return 2543 * 0 on success, or a negative error in case of failure. 2544 * 2545 * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 2546 * Description 2547 * Will remove *len* bytes from a *msg* starting at byte *start*. 2548 * This may result in **ENOMEM** errors under certain situations if 2549 * an allocation and copy are required due to a full ring buffer. 2550 * However, the helper will try to avoid doing the allocation 2551 * if possible. Other errors can occur if input parameters are 2552 * invalid either due to *start* byte not being valid part of *msg* 2553 * payload and/or *pop* value being to large. 2554 * Return 2555 * 0 on success, or a negative error in case of failure. 2556 * 2557 * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) 2558 * Description 2559 * This helper is used in programs implementing IR decoding, to 2560 * report a successfully decoded pointer movement. 2561 * 2562 * The *ctx* should point to the lirc sample as passed into 2563 * the program. 2564 * 2565 * This helper is only available is the kernel was compiled with 2566 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2567 * "**y**". 2568 * Return 2569 * 0 2570 * 2571 * long bpf_spin_lock(struct bpf_spin_lock *lock) 2572 * Description 2573 * Acquire a spinlock represented by the pointer *lock*, which is 2574 * stored as part of a value of a map. Taking the lock allows to 2575 * safely update the rest of the fields in that value. The 2576 * spinlock can (and must) later be released with a call to 2577 * **bpf_spin_unlock**\ (\ *lock*\ ). 2578 * 2579 * Spinlocks in BPF programs come with a number of restrictions 2580 * and constraints: 2581 * 2582 * * **bpf_spin_lock** objects are only allowed inside maps of 2583 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this 2584 * list could be extended in the future). 2585 * * BTF description of the map is mandatory. 2586 * * The BPF program can take ONE lock at a time, since taking two 2587 * or more could cause dead locks. 2588 * * Only one **struct bpf_spin_lock** is allowed per map element. 2589 * * When the lock is taken, calls (either BPF to BPF or helpers) 2590 * are not allowed. 2591 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not 2592 * allowed inside a spinlock-ed region. 2593 * * The BPF program MUST call **bpf_spin_unlock**\ () to release 2594 * the lock, on all execution paths, before it returns. 2595 * * The BPF program can access **struct bpf_spin_lock** only via 2596 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () 2597 * helpers. Loading or storing data into the **struct 2598 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. 2599 * * To use the **bpf_spin_lock**\ () helper, the BTF description 2600 * of the map value must be a struct and have **struct 2601 * bpf_spin_lock** *anyname*\ **;** field at the top level. 2602 * Nested lock inside another struct is not allowed. 2603 * * The **struct bpf_spin_lock** *lock* field in a map value must 2604 * be aligned on a multiple of 4 bytes in that value. 2605 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy 2606 * the **bpf_spin_lock** field to user space. 2607 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from 2608 * a BPF program, do not update the **bpf_spin_lock** field. 2609 * * **bpf_spin_lock** cannot be on the stack or inside a 2610 * networking packet (it can only be inside of a map values). 2611 * * **bpf_spin_lock** is available to root only. 2612 * * Tracing programs and socket filter programs cannot use 2613 * **bpf_spin_lock**\ () due to insufficient preemption checks 2614 * (but this may change in the future). 2615 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. 2616 * Return 2617 * 0 2618 * 2619 * long bpf_spin_unlock(struct bpf_spin_lock *lock) 2620 * Description 2621 * Release the *lock* previously locked by a call to 2622 * **bpf_spin_lock**\ (\ *lock*\ ). 2623 * Return 2624 * 0 2625 * 2626 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) 2627 * Description 2628 * This helper gets a **struct bpf_sock** pointer such 2629 * that all the fields in this **bpf_sock** can be accessed. 2630 * Return 2631 * A **struct bpf_sock** pointer on success, or **NULL** in 2632 * case of failure. 2633 * 2634 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) 2635 * Description 2636 * This helper gets a **struct bpf_tcp_sock** pointer from a 2637 * **struct bpf_sock** pointer. 2638 * Return 2639 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in 2640 * case of failure. 2641 * 2642 * long bpf_skb_ecn_set_ce(struct sk_buff *skb) 2643 * Description 2644 * Set ECN (Explicit Congestion Notification) field of IP header 2645 * to **CE** (Congestion Encountered) if current value is **ECT** 2646 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 2647 * and IPv4. 2648 * Return 2649 * 1 if the **CE** flag is set (either by the current helper call 2650 * or because it was already present), 0 if it is not set. 2651 * 2652 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) 2653 * Description 2654 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. 2655 * **bpf_sk_release**\ () is unnecessary and not allowed. 2656 * Return 2657 * A **struct bpf_sock** pointer on success, or **NULL** in 2658 * case of failure. 2659 * 2660 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2661 * Description 2662 * Look for TCP socket matching *tuple*, optionally in a child 2663 * network namespace *netns*. The return value must be checked, 2664 * and if non-**NULL**, released via **bpf_sk_release**\ (). 2665 * 2666 * This function is identical to **bpf_sk_lookup_tcp**\ (), except 2667 * that it also returns timewait or request sockets. Use 2668 * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the 2669 * full structure. 2670 * 2671 * This helper is available only if the kernel was compiled with 2672 * **CONFIG_NET** configuration option. 2673 * Return 2674 * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2675 * For sockets with reuseport option, the **struct bpf_sock** 2676 * result is from *reuse*\ **->socks**\ [] using the hash of the 2677 * tuple. 2678 * 2679 * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 2680 * Description 2681 * Check whether *iph* and *th* contain a valid SYN cookie ACK for 2682 * the listening socket in *sk*. 2683 * 2684 * *iph* points to the start of the IPv4 or IPv6 header, while 2685 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2686 * **sizeof**\ (**struct ip6hdr**). 2687 * 2688 * *th* points to the start of the TCP header, while *th_len* 2689 * contains **sizeof**\ (**struct tcphdr**). 2690 * Return 2691 * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative 2692 * error otherwise. 2693 * 2694 * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) 2695 * Description 2696 * Get name of sysctl in /proc/sys/ and copy it into provided by 2697 * program buffer *buf* of size *buf_len*. 2698 * 2699 * The buffer is always NUL terminated, unless it's zero-sized. 2700 * 2701 * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is 2702 * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name 2703 * only (e.g. "tcp_mem"). 2704 * Return 2705 * Number of character copied (not including the trailing NUL). 2706 * 2707 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2708 * truncated name in this case). 2709 * 2710 * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 2711 * Description 2712 * Get current value of sysctl as it is presented in /proc/sys 2713 * (incl. newline, etc), and copy it as a string into provided 2714 * by program buffer *buf* of size *buf_len*. 2715 * 2716 * The whole value is copied, no matter what file position user 2717 * space issued e.g. sys_read at. 2718 * 2719 * The buffer is always NUL terminated, unless it's zero-sized. 2720 * Return 2721 * Number of character copied (not including the trailing NUL). 2722 * 2723 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2724 * truncated name in this case). 2725 * 2726 * **-EINVAL** if current value was unavailable, e.g. because 2727 * sysctl is uninitialized and read returns -EIO for it. 2728 * 2729 * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 2730 * Description 2731 * Get new value being written by user space to sysctl (before 2732 * the actual write happens) and copy it as a string into 2733 * provided by program buffer *buf* of size *buf_len*. 2734 * 2735 * User space may write new value at file position > 0. 2736 * 2737 * The buffer is always NUL terminated, unless it's zero-sized. 2738 * Return 2739 * Number of character copied (not including the trailing NUL). 2740 * 2741 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2742 * truncated name in this case). 2743 * 2744 * **-EINVAL** if sysctl is being read. 2745 * 2746 * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) 2747 * Description 2748 * Override new value being written by user space to sysctl with 2749 * value provided by program in buffer *buf* of size *buf_len*. 2750 * 2751 * *buf* should contain a string in same form as provided by user 2752 * space on sysctl write. 2753 * 2754 * User space may write new value at file position > 0. To override 2755 * the whole sysctl value file position should be set to zero. 2756 * Return 2757 * 0 on success. 2758 * 2759 * **-E2BIG** if the *buf_len* is too big. 2760 * 2761 * **-EINVAL** if sysctl is being read. 2762 * 2763 * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) 2764 * Description 2765 * Convert the initial part of the string from buffer *buf* of 2766 * size *buf_len* to a long integer according to the given base 2767 * and save the result in *res*. 2768 * 2769 * The string may begin with an arbitrary amount of white space 2770 * (as determined by **isspace**\ (3)) followed by a single 2771 * optional '**-**' sign. 2772 * 2773 * Five least significant bits of *flags* encode base, other bits 2774 * are currently unused. 2775 * 2776 * Base must be either 8, 10, 16 or 0 to detect it automatically 2777 * similar to user space **strtol**\ (3). 2778 * Return 2779 * Number of characters consumed on success. Must be positive but 2780 * no more than *buf_len*. 2781 * 2782 * **-EINVAL** if no valid digits were found or unsupported base 2783 * was provided. 2784 * 2785 * **-ERANGE** if resulting value was out of range. 2786 * 2787 * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) 2788 * Description 2789 * Convert the initial part of the string from buffer *buf* of 2790 * size *buf_len* to an unsigned long integer according to the 2791 * given base and save the result in *res*. 2792 * 2793 * The string may begin with an arbitrary amount of white space 2794 * (as determined by **isspace**\ (3)). 2795 * 2796 * Five least significant bits of *flags* encode base, other bits 2797 * are currently unused. 2798 * 2799 * Base must be either 8, 10, 16 or 0 to detect it automatically 2800 * similar to user space **strtoul**\ (3). 2801 * Return 2802 * Number of characters consumed on success. Must be positive but 2803 * no more than *buf_len*. 2804 * 2805 * **-EINVAL** if no valid digits were found or unsupported base 2806 * was provided. 2807 * 2808 * **-ERANGE** if resulting value was out of range. 2809 * 2810 * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) 2811 * Description 2812 * Get a bpf-local-storage from a *sk*. 2813 * 2814 * Logically, it could be thought of getting the value from 2815 * a *map* with *sk* as the **key**. From this 2816 * perspective, the usage is not much different from 2817 * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this 2818 * helper enforces the key must be a full socket and the map must 2819 * be a **BPF_MAP_TYPE_SK_STORAGE** also. 2820 * 2821 * Underneath, the value is stored locally at *sk* instead of 2822 * the *map*. The *map* is used as the bpf-local-storage 2823 * "type". The bpf-local-storage "type" (i.e. the *map*) is 2824 * searched against all bpf-local-storages residing at *sk*. 2825 * 2826 * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be 2827 * used such that a new bpf-local-storage will be 2828 * created if one does not exist. *value* can be used 2829 * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify 2830 * the initial value of a bpf-local-storage. If *value* is 2831 * **NULL**, the new bpf-local-storage will be zero initialized. 2832 * Return 2833 * A bpf-local-storage pointer is returned on success. 2834 * 2835 * **NULL** if not found or there was an error in adding 2836 * a new bpf-local-storage. 2837 * 2838 * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) 2839 * Description 2840 * Delete a bpf-local-storage from a *sk*. 2841 * Return 2842 * 0 on success. 2843 * 2844 * **-ENOENT** if the bpf-local-storage cannot be found. 2845 * 2846 * long bpf_send_signal(u32 sig) 2847 * Description 2848 * Send signal *sig* to the process of the current task. 2849 * The signal may be delivered to any of this process's threads. 2850 * Return 2851 * 0 on success or successfully queued. 2852 * 2853 * **-EBUSY** if work queue under nmi is full. 2854 * 2855 * **-EINVAL** if *sig* is invalid. 2856 * 2857 * **-EPERM** if no permission to send the *sig*. 2858 * 2859 * **-EAGAIN** if bpf program can try again. 2860 * 2861 * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 2862 * Description 2863 * Try to issue a SYN cookie for the packet with corresponding 2864 * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. 2865 * 2866 * *iph* points to the start of the IPv4 or IPv6 header, while 2867 * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2868 * **sizeof**\ (**struct ip6hdr**). 2869 * 2870 * *th* points to the start of the TCP header, while *th_len* 2871 * contains the length of the TCP header. 2872 * Return 2873 * On success, lower 32 bits hold the generated SYN cookie in 2874 * followed by 16 bits which hold the MSS value for that cookie, 2875 * and the top 16 bits are unused. 2876 * 2877 * On failure, the returned value is one of the following: 2878 * 2879 * **-EINVAL** SYN cookie cannot be issued due to error 2880 * 2881 * **-ENOENT** SYN cookie should not be issued (no SYN flood) 2882 * 2883 * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies 2884 * 2885 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 2886 * 2887 * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 2888 * Description 2889 * Write raw *data* blob into a special BPF perf event held by 2890 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 2891 * event must have the following attributes: **PERF_SAMPLE_RAW** 2892 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 2893 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 2894 * 2895 * The *flags* are used to indicate the index in *map* for which 2896 * the value must be put, masked with **BPF_F_INDEX_MASK**. 2897 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 2898 * to indicate that the index of the current CPU core should be 2899 * used. 2900 * 2901 * The value to write, of *size*, is passed through eBPF stack and 2902 * pointed by *data*. 2903 * 2904 * *ctx* is a pointer to in-kernel struct sk_buff. 2905 * 2906 * This helper is similar to **bpf_perf_event_output**\ () but 2907 * restricted to raw_tracepoint bpf programs. 2908 * Return 2909 * 0 on success, or a negative error in case of failure. 2910 * 2911 * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) 2912 * Description 2913 * Safely attempt to read *size* bytes from user space address 2914 * *unsafe_ptr* and store the data in *dst*. 2915 * Return 2916 * 0 on success, or a negative error in case of failure. 2917 * 2918 * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 2919 * Description 2920 * Safely attempt to read *size* bytes from kernel space address 2921 * *unsafe_ptr* and store the data in *dst*. 2922 * Return 2923 * 0 on success, or a negative error in case of failure. 2924 * 2925 * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) 2926 * Description 2927 * Copy a NUL terminated string from an unsafe user address 2928 * *unsafe_ptr* to *dst*. The *size* should include the 2929 * terminating NUL byte. In case the string length is smaller than 2930 * *size*, the target is not padded with further NUL bytes. If the 2931 * string length is larger than *size*, just *size*-1 bytes are 2932 * copied and the last byte is set to NUL. 2933 * 2934 * On success, the length of the copied string is returned. This 2935 * makes this helper useful in tracing programs for reading 2936 * strings, and more importantly to get its length at runtime. See 2937 * the following snippet: 2938 * 2939 * :: 2940 * 2941 * SEC("kprobe/sys_open") 2942 * void bpf_sys_open(struct pt_regs *ctx) 2943 * { 2944 * char buf[PATHLEN]; // PATHLEN is defined to 256 2945 * int res = bpf_probe_read_user_str(buf, sizeof(buf), 2946 * ctx->di); 2947 * 2948 * // Consume buf, for example push it to 2949 * // userspace via bpf_perf_event_output(); we 2950 * // can use res (the string length) as event 2951 * // size, after checking its boundaries. 2952 * } 2953 * 2954 * In comparison, using **bpf_probe_read_user**\ () helper here 2955 * instead to read the string would require to estimate the length 2956 * at compile time, and would often result in copying more memory 2957 * than necessary. 2958 * 2959 * Another useful use case is when parsing individual process 2960 * arguments or individual environment variables navigating 2961 * *current*\ **->mm->arg_start** and *current*\ 2962 * **->mm->env_start**: using this helper and the return value, 2963 * one can quickly iterate at the right offset of the memory area. 2964 * Return 2965 * On success, the strictly positive length of the string, 2966 * including the trailing NUL character. On error, a negative 2967 * value. 2968 * 2969 * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) 2970 * Description 2971 * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* 2972 * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. 2973 * Return 2974 * On success, the strictly positive length of the string, including 2975 * the trailing NUL character. On error, a negative value. 2976 * 2977 * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) 2978 * Description 2979 * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. 2980 * *rcv_nxt* is the ack_seq to be sent out. 2981 * Return 2982 * 0 on success, or a negative error in case of failure. 2983 * 2984 * long bpf_send_signal_thread(u32 sig) 2985 * Description 2986 * Send signal *sig* to the thread corresponding to the current task. 2987 * Return 2988 * 0 on success or successfully queued. 2989 * 2990 * **-EBUSY** if work queue under nmi is full. 2991 * 2992 * **-EINVAL** if *sig* is invalid. 2993 * 2994 * **-EPERM** if no permission to send the *sig*. 2995 * 2996 * **-EAGAIN** if bpf program can try again. 2997 * 2998 * u64 bpf_jiffies64(void) 2999 * Description 3000 * Obtain the 64bit jiffies
3001 * Return 3002 * The 64 bit jiffies 3003 * 3004 * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) 3005 * Description 3006 * For an eBPF program attached to a perf event, retrieve the 3007 * branch records (**struct perf_branch_entry**) associated to *ctx* 3008 * and store it in the buffer pointed by *buf* up to size 3009 * *size* bytes. 3010 * Return 3011 * On success, number of bytes written to *buf*. On error, a 3012 * negative value. 3013 * 3014 * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to 3015 * instead return the number of bytes required to store all the 3016 * branch entries. If this flag is set, *buf* may be NULL. 3017 * 3018 * **-EINVAL** if arguments invalid or **size** not a multiple 3019 * of **sizeof**\ (**struct perf_branch_entry**\ ). 3020 * 3021 * **-ENOENT** if architecture does not support branch records. 3022 * 3023 * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) 3024 * Description 3025 * Returns 0 on success, values for *pid* and *tgid* as seen from the current 3026 * *namespace* will be returned in *nsdata*. 3027 * Return 3028 * 0 on success, or one of the following in case of failure: 3029 * 3030 * **-EINVAL** if dev and inum supplied don't match dev_t and inode number 3031 * with nsfs of current task, or if dev conversion to dev_t lost high bits. 3032 * 3033 * **-ENOENT** if pidns does not exists for the current task. 3034 * 3035 * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 3036 * Description 3037 * Write raw *data* blob into a special BPF perf event held by 3038 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 3039 * event must have the following attributes: **PERF_SAMPLE_RAW** 3040 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 3041 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 3042 * 3043 * The *flags* are used to indicate the index in *map* for which 3044 * the value must be put, masked with **BPF_F_INDEX_MASK**. 3045 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 3046 * to indicate that the index of the current CPU core should be 3047 * used. 3048 * 3049 * The value to write, of *size*, is passed through eBPF stack and 3050 * pointed by *data*. 3051 * 3052 * *ctx* is a pointer to in-kernel struct xdp_buff. 3053 * 3054 * This helper is similar to **bpf_perf_eventoutput**\ () but 3055 * restricted to raw_tracepoint bpf programs. 3056 * Return 3057 * 0 on success, or a negative error in case of failure. 3058 * 3059 * u64 bpf_get_netns_cookie(void *ctx) 3060 * Description 3061 * Retrieve the cookie (generated by the kernel) of the network 3062 * namespace the input *ctx* is associated with. The network 3063 * namespace cookie remains stable for its lifetime and provides 3064 * a global identifier that can be assumed unique. If *ctx* is 3065 * NULL, then the helper returns the cookie for the initial 3066 * network namespace. The cookie itself is very similar to that 3067 * of **bpf_get_socket_cookie**\ () helper, but for network 3068 * namespaces instead of sockets. 3069 * Return 3070 * A 8-byte long opaque number. 3071 * 3072 * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) 3073 * Description 3074 * Return id of cgroup v2 that is ancestor of the cgroup associated 3075 * with the current task at the *ancestor_level*. The root cgroup 3076 * is at *ancestor_level* zero and each step down the hierarchy 3077 * increments the level. If *ancestor_level* == level of cgroup 3078 * associated with the current task, then return value will be the 3079 * same as that of **bpf_get_current_cgroup_id**\ (). 3080 * 3081 * The helper is useful to implement policies based on cgroups 3082 * that are upper in hierarchy than immediate cgroup associated 3083 * with the current task. 3084 * 3085 * The format of returned id and helper limitations are same as in 3086 * **bpf_get_current_cgroup_id**\ (). 3087 * Return 3088 * The id is returned or 0 in case the id could not be retrieved. 3089 * 3090 * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) 3091 * Description 3092 * Helper is overloaded depending on BPF program type. This 3093 * description applies to **BPF_PROG_TYPE_SCHED_CLS** and 3094 * **BPF_PROG_TYPE_SCHED_ACT** programs. 3095 * 3096 * Assign the *sk* to the *skb*. When combined with appropriate 3097 * routing configuration to receive the packet towards the socket, 3098 * will cause *skb* to be delivered to the specified socket. 3099 * Subsequent redirection of *skb* via **bpf_redirect**\ (), 3100 * **bpf_clone_redirect**\ () or other methods outside of BPF may 3101 * interfere with successful delivery to the socket. 3102 * 3103 * This operation is only valid from TC ingress path. 3104 * 3105 * The *flags* argument must be zero. 3106 * Return 3107 * 0 on success, or a negative error in case of failure: 3108 * 3109 * **-EINVAL** if specified *flags* are not supported. 3110 * 3111 * **-ENOENT** if the socket is unavailable for assignment. 3112 * 3113 * **-ENETUNREACH** if the socket is unreachable (wrong netns). 3114 * 3115 * **-EOPNOTSUPP** if the operation is not supported, for example 3116 * a call from outside of TC ingress. 3117 * 3118 * **-ESOCKTNOSUPPORT** if the socket type is not supported 3119 * (reuseport). 3120 * 3121 * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) 3122 * Description 3123 * Helper is overloaded depending on BPF program type. This 3124 * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. 3125 * 3126 * Select the *sk* as a result of a socket lookup. 3127 * 3128 * For the operation to succeed passed socket must be compatible 3129 * with the packet description provided by the *ctx* object. 3130 * 3131 * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must 3132 * be an exact match. While IP family (**AF_INET** or 3133 * **AF_INET6**) must be compatible, that is IPv6 sockets 3134 * that are not v6-only can be selected for IPv4 packets. 3135 * 3136 * Only TCP listeners and UDP unconnected sockets can be 3137 * selected. *sk* can also be NULL to reset any previous 3138 * selection. 3139 * 3140 * *flags* argument can combination of following values: 3141 * 3142 * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous 3143 * socket selection, potentially done by a BPF program 3144 * that ran before us. 3145 * 3146 * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip 3147 * load-balancing within reuseport group for the socket 3148 * being selected. 3149 * 3150 * On success *ctx->sk* will point to the selected socket. 3151 * 3152 * Return 3153 * 0 on success, or a negative errno in case of failure. 3154 * 3155 * * **-EAFNOSUPPORT** if socket family (*sk->family*) is 3156 * not compatible with packet family (*ctx->family*). 3157 * 3158 * * **-EEXIST** if socket has been already selected, 3159 * potentially by another program, and 3160 * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. 3161 * 3162 * * **-EINVAL** if unsupported flags were specified. 3163 * 3164 * * **-EPROTOTYPE** if socket L4 protocol 3165 * (*sk->protocol*) doesn't match packet protocol 3166 * (*ctx->protocol*). 3167 * 3168 * * **-ESOCKTNOSUPPORT** if socket is not in allowed 3169 * state (TCP listening or UDP unconnected). 3170 * 3171 * u64 bpf_ktime_get_boot_ns(void) 3172 * Description 3173 * Return the time elapsed since system boot, in nanoseconds. 3174 * Does include the time the system was suspended. 3175 * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) 3176 * Return 3177 * Current *ktime*. 3178 * 3179 * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) 3180 * Description 3181 * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print 3182 * out the format string. 3183 * The *m* represents the seq_file. The *fmt* and *fmt_size* are for 3184 * the format string itself. The *data* and *data_len* are format string 3185 * arguments. The *data* are a **u64** array and corresponding format string 3186 * values are stored in the array. For strings and pointers where pointees 3187 * are accessed, only the pointer values are stored in the *data* array. 3188 * The *data_len* is the size of *data* in bytes. 3189 * 3190 * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. 3191 * Reading kernel memory may fail due to either invalid address or 3192 * valid address but requiring a major memory fault. If reading kernel memory 3193 * fails, the string for **%s** will be an empty string, and the ip 3194 * address for **%p{i,I}{4,6}** will be 0. Not returning error to 3195 * bpf program is consistent with what **bpf_trace_printk**\ () does for now. 3196 * Return 3197 * 0 on success, or a negative error in case of failure: 3198 * 3199 * **-EBUSY** if per-CPU memory copy buffer is busy, can try again 3200 * by returning 1 from bpf program. 3201 * 3202 * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. 3203 * 3204 * **-E2BIG** if *fmt* contains too many format specifiers. 3205 * 3206 * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 3207 * 3208 * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) 3209 * Description 3210 * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. 3211 * The *m* represents the seq_file. The *data* and *len* represent the 3212 * data to write in bytes. 3213 * Return 3214 * 0 on success, or a negative error in case of failure: 3215 * 3216 * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 3217 * 3218 * u64 bpf_sk_cgroup_id(struct bpf_sock *sk) 3219 * Description 3220 * Return the cgroup v2 id of the socket *sk*. 3221 * 3222 * *sk* must be a non-**NULL** pointer to a full socket, e.g. one 3223 * returned from **bpf_sk_lookup_xxx**\ (), 3224 * **bpf_sk_fullsock**\ (), etc. The format of returned id is 3225 * same as in **bpf_skb_cgroup_id**\ (). 3226 * 3227 * This helper is available only if the kernel was compiled with 3228 * the **CONFIG_SOCK_CGROUP_DATA** configuration option. 3229 * Return 3230 * The id is returned or 0 in case the id could not be retrieved. 3231 * 3232 * u64 bpf_sk_ancestor_cgroup_id(struct bpf_sock *sk, int ancestor_level) 3233 * Description 3234 * Return id of cgroup v2 that is ancestor of cgroup associated 3235 * with the *sk* at the *ancestor_level*. The root cgroup is at 3236 * *ancestor_level* zero and each step down the hierarchy 3237 * increments the level. If *ancestor_level* == level of cgroup 3238 * associated with *sk*, then return value will be same as that 3239 * of **bpf_sk_cgroup_id**\ (). 3240 * 3241 * The helper is useful to implement policies based on cgroups 3242 * that are upper in hierarchy than immediate cgroup associated 3243 * with *sk*. 3244 * 3245 * The format of returned id and helper limitations are same as in 3246 * **bpf_sk_cgroup_id**\ (). 3247 * Return 3248 * The id is returned or 0 in case the id could not be retrieved. 3249 * 3250 * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) 3251 * Description 3252 * Copy *size* bytes from *data* into a ring buffer *ringbuf*. 3253 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3254 * of new data availability is sent. 3255 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3256 * of new data availability is sent unconditionally. 3257 * Return 3258 * 0 on success, or a negative error in case of failure. 3259 * 3260 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) 3261 * Description 3262 * Reserve *size* bytes of payload in a ring buffer *ringbuf*. 3263 * Return 3264 * Valid pointer with *size* bytes of memory available; NULL, 3265 * otherwise. 3266 * 3267 * void bpf_ringbuf_submit(void *data, u64 flags) 3268 * Description 3269 * Submit reserved ring buffer sample, pointed to by *data*. 3270 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3271 * of new data availability is sent. 3272 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3273 * of new data availability is sent unconditionally. 3274 * Return 3275 * Nothing. Always succeeds. 3276 * 3277 * void bpf_ringbuf_discard(void *data, u64 flags) 3278 * Description 3279 * Discard reserved ring buffer sample, pointed to by *data*. 3280 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3281 * of new data availability is sent. 3282 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3283 * of new data availability is sent unconditionally. 3284 * Return 3285 * Nothing. Always succeeds. 3286 * 3287 * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) 3288 * Description 3289 * Query various characteristics of provided ring buffer. What 3290 * exactly is queries is determined by *flags*: 3291 * 3292 * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. 3293 * * **BPF_RB_RING_SIZE**: The size of ring buffer. 3294 * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). 3295 * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). 3296 * 3297 * Data returned is just a momentary snapshot of actual values 3298 * and could be inaccurate, so this facility should be used to 3299 * power heuristics and for reporting, not to make 100% correct 3300 * calculation. 3301 * Return 3302 * Requested value, or 0, if *flags* are not recognized. 3303 * 3304 * long bpf_csum_level(struct sk_buff *skb, u64 level) 3305 * Description 3306 * Change the skbs checksum level by one layer up or down, or 3307 * reset it entirely to none in order to have the stack perform 3308 * checksum validation. The level is applicable to the following 3309 * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of 3310 * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | 3311 * through **bpf_skb_adjust_room**\ () helper with passing in 3312 * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call 3313 * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since 3314 * the UDP header is removed. Similarly, an encap of the latter 3315 * into the former could be accompanied by a helper call to 3316 * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the 3317 * skb is still intended to be processed in higher layers of the 3318 * stack instead of just egressing at tc. 3319 * 3320 * There are three supported level settings at this time: 3321 * 3322 * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs 3323 * with CHECKSUM_UNNECESSARY. 3324 * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs 3325 * with CHECKSUM_UNNECESSARY. 3326 * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and 3327 * sets CHECKSUM_NONE to force checksum validation by the stack. 3328 * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current 3329 * skb->csum_level. 3330 * Return 3331 * 0 on success, or a negative error in case of failure. In the 3332 * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level 3333 * is returned or the error code -EACCES in case the skb is not 3334 * subject to CHECKSUM_UNNECESSARY. 3335 * 3336 * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) 3337 * Description 3338 * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. 3339 * Return 3340 * *sk* if casting is valid, or NULL otherwise. 3341 * 3342 * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) 3343 * Description 3344 * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. 3345 * Return 3346 * *sk* if casting is valid, or NULL otherwise. 3347 * 3348 * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) 3349 * Description 3350 * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. 3351 * Return 3352 * *sk* if casting is valid, or NULL otherwise. 3353 * 3354 * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) 3355 * Description 3356 * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. 3357 * Return 3358 * *sk* if casting is valid, or NULL otherwise. 3359 * 3360 * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) 3361 * Description 3362 * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. 3363 * Return 3364 * *sk* if casting is valid, or NULL otherwise. 3365 * 3366 * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) 3367 * Description 3368 * Return a user or a kernel stack in bpf program provided buffer. 3369 * To achieve this, the helper needs *task*, which is a valid 3370 * pointer to struct task_struct. To store the stacktrace, the 3371 * bpf program provides *buf* with a nonnegative *size*. 3372 * 3373 * The last argument, *flags*, holds the number of stack frames to 3374 * skip (from 0 to 255), masked with 3375 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 3376 * the following flags: 3377 * 3378 * **BPF_F_USER_STACK** 3379 * Collect a user space stack instead of a kernel stack. 3380 * **BPF_F_USER_BUILD_ID** 3381 * Collect buildid+offset instead of ips for user stack, 3382 * only valid if **BPF_F_USER_STACK** is also specified. 3383 * 3384 * **bpf_get_task_stack**\ () can collect up to 3385 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 3386 * to sufficient large buffer size. Note that 3387 * this limit can be controlled with the **sysctl** program, and 3388 * that it should be manually increased in order to profile long 3389 * user stacks (such as stacks for Java programs). To do so, use: 3390 * 3391 * :: 3392 * 3393 * # sysctl kernel.perf_event_max_stack=<new value> 3394 * Return 3395 * A non-negative value equal to or less than *size* on success, 3396 * or a negative error in case of failure. 3397 * 3398 */ 3399#define __BPF_FUNC_MAPPER(FN) \ 3400 FN(unspec), \ 3401 FN(map_lookup_elem), \ 3402 FN(map_update_elem), \ 3403 FN(map_delete_elem), \ 3404 FN(probe_read), \ 3405 FN(ktime_get_ns), \ 3406 FN(trace_printk), \ 3407 FN(get_prandom_u32), \ 3408 FN(get_smp_processor_id), \ 3409 FN(skb_store_bytes), \ 3410 FN(l3_csum_replace), \ 3411 FN(l4_csum_replace), \ 3412 FN(tail_call), \ 3413 FN(clone_redirect), \ 3414 FN(get_current_pid_tgid), \ 3415 FN(get_current_uid_gid), \ 3416 FN(get_current_comm), \ 3417 FN(get_cgroup_classid), \ 3418 FN(skb_vlan_push), \ 3419 FN(skb_vlan_pop), \ 3420 FN(skb_get_tunnel_key), \ 3421 FN(skb_set_tunnel_key), \ 3422 FN(perf_event_read), \ 3423 FN(redirect), \ 3424 FN(get_route_realm), \ 3425 FN(perf_event_output), \ 3426 FN(skb_load_bytes), \ 3427 FN(get_stackid), \ 3428 FN(csum_diff), \ 3429 FN(skb_get_tunnel_opt), \ 3430 FN(skb_set_tunnel_opt), \ 3431 FN(skb_change_proto), \ 3432 FN(skb_change_type), \ 3433 FN(skb_under_cgroup), \ 3434 FN(get_hash_recalc), \ 3435 FN(get_current_task), \ 3436 FN(probe_write_user), \ 3437 FN(current_task_under_cgroup), \ 3438 FN(skb_change_tail), \ 3439 FN(skb_pull_data), \ 3440 FN(csum_update), \ 3441 FN(set_hash_invalid), \ 3442 FN(get_numa_node_id), \ 3443 FN(skb_change_head), \ 3444 FN(xdp_adjust_head), \ 3445 FN(probe_read_str), \ 3446 FN(get_socket_cookie), \ 3447 FN(get_socket_uid), \ 3448 FN(set_hash), \ 3449 FN(setsockopt), \ 3450 FN(skb_adjust_room), \ 3451 FN(redirect_map), \ 3452 FN(sk_redirect_map), \ 3453 FN(sock_map_update), \ 3454 FN(xdp_adjust_meta), \ 3455 FN(perf_event_read_value), \ 3456 FN(perf_prog_read_value), \ 3457 FN(getsockopt), \ 3458 FN(override_return), \ 3459 FN(sock_ops_cb_flags_set), \ 3460 FN(msg_redirect_map), \ 3461 FN(msg_apply_bytes), \ 3462 FN(msg_cork_bytes), \ 3463 FN(msg_pull_data), \ 3464 FN(bind), \ 3465 FN(xdp_adjust_tail), \ 3466 FN(skb_get_xfrm_state), \ 3467 FN(get_stack), \ 3468 FN(skb_load_bytes_relative), \ 3469 FN(fib_lookup), \ 3470 FN(sock_hash_update), \ 3471 FN(msg_redirect_hash), \ 3472 FN(sk_redirect_hash), \ 3473 FN(lwt_push_encap), \ 3474 FN(lwt_seg6_store_bytes), \ 3475 FN(lwt_seg6_adjust_srh), \ 3476 FN(lwt_seg6_action), \ 3477 FN(rc_repeat), \ 3478 FN(rc_keydown), \ 3479 FN(skb_cgroup_id), \ 3480 FN(get_current_cgroup_id), \ 3481 FN(get_local_storage), \ 3482 FN(sk_select_reuseport), \ 3483 FN(skb_ancestor_cgroup_id), \ 3484 FN(sk_lookup_tcp), \ 3485 FN(sk_lookup_udp), \ 3486 FN(sk_release), \ 3487 FN(map_push_elem), \ 3488 FN(map_pop_elem), \ 3489 FN(map_peek_elem), \ 3490 FN(msg_push_data), \ 3491 FN(msg_pop_data), \ 3492 FN(rc_pointer_rel), \ 3493 FN(spin_lock), \ 3494 FN(spin_unlock), \ 3495 FN(sk_fullsock), \ 3496 FN(tcp_sock), \ 3497 FN(skb_ecn_set_ce), \ 3498 FN(get_listener_sock), \ 3499 FN(skc_lookup_tcp), \ 3500 FN(tcp_check_syncookie), \ 3501 FN(sysctl_get_name), \ 3502 FN(sysctl_get_current_value), \ 3503 FN(sysctl_get_new_value), \ 3504 FN(sysctl_set_new_value), \ 3505 FN(strtol), \ 3506 FN(strtoul), \ 3507 FN(sk_storage_get), \ 3508 FN(sk_storage_delete), \ 3509 FN(send_signal), \ 3510 FN(tcp_gen_syncookie), \ 3511 FN(skb_output), \ 3512 FN(probe_read_user), \ 3513 FN(probe_read_kernel), \ 3514 FN(probe_read_user_str), \ 3515 FN(probe_read_kernel_str), \ 3516 FN(tcp_send_ack), \ 3517 FN(send_signal_thread), \ 3518 FN(jiffies64), \ 3519 FN(read_branch_records), \ 3520 FN(get_ns_current_pid_tgid), \ 3521 FN(xdp_output), \ 3522 FN(get_netns_cookie), \ 3523 FN(get_current_ancestor_cgroup_id), \ 3524 FN(sk_assign), \ 3525 FN(ktime_get_boot_ns), \ 3526 FN(seq_printf), \ 3527 FN(seq_write), \ 3528 FN(sk_cgroup_id), \ 3529 FN(sk_ancestor_cgroup_id), \ 3530 FN(ringbuf_output), \ 3531 FN(ringbuf_reserve), \ 3532 FN(ringbuf_submit), \ 3533 FN(ringbuf_discard), \ 3534 FN(ringbuf_query), \ 3535 FN(csum_level), \ 3536 FN(skc_to_tcp6_sock), \ 3537 FN(skc_to_tcp_sock), \ 3538 FN(skc_to_tcp_timewait_sock), \ 3539 FN(skc_to_tcp_request_sock), \ 3540 FN(skc_to_udp6_sock), \ 3541 FN(get_task_stack), \ 3542 /* */ 3543 3544/* integer value in 'imm' field of BPF_CALL instruction selects which helper 3545 * function eBPF program intends to call 3546 */ 3547#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 3548enum bpf_func_id { 3549 __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 3550 __BPF_FUNC_MAX_ID, 3551}; 3552#undef __BPF_ENUM_FN 3553 3554/* All flags used by eBPF helper functions, placed here. */ 3555 3556/* BPF_FUNC_skb_store_bytes flags. */ 3557enum { 3558 BPF_F_RECOMPUTE_CSUM = (1ULL << 0), 3559 BPF_F_INVALIDATE_HASH = (1ULL << 1), 3560}; 3561 3562/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 3563 * First 4 bits are for passing the header field size. 3564 */ 3565enum { 3566 BPF_F_HDR_FIELD_MASK = 0xfULL, 3567}; 3568 3569/* BPF_FUNC_l4_csum_replace flags. */ 3570enum { 3571 BPF_F_PSEUDO_HDR = (1ULL << 4), 3572 BPF_F_MARK_MANGLED_0 = (1ULL << 5), 3573 BPF_F_MARK_ENFORCE = (1ULL << 6), 3574}; 3575 3576/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 3577enum { 3578 BPF_F_INGRESS = (1ULL << 0), 3579}; 3580 3581/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 3582enum { 3583 BPF_F_TUNINFO_IPV6 = (1ULL << 0), 3584}; 3585 3586/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ 3587enum { 3588 BPF_F_SKIP_FIELD_MASK = 0xffULL, 3589 BPF_F_USER_STACK = (1ULL << 8), 3590/* flags used by BPF_FUNC_get_stackid only. */ 3591 BPF_F_FAST_STACK_CMP = (1ULL << 9), 3592 BPF_F_REUSE_STACKID = (1ULL << 10), 3593/* flags used by BPF_FUNC_get_stack only. */ 3594 BPF_F_USER_BUILD_ID = (1ULL << 11), 3595}; 3596 3597/* BPF_FUNC_skb_set_tunnel_key flags. */ 3598enum { 3599 BPF_F_ZERO_CSUM_TX = (1ULL << 1), 3600 BPF_F_DONT_FRAGMENT = (1ULL << 2), 3601 BPF_F_SEQ_NUMBER = (1ULL << 3), 3602}; 3603 3604/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 3605 * BPF_FUNC_perf_event_read_value flags. 3606 */ 3607enum { 3608 BPF_F_INDEX_MASK = 0xffffffffULL, 3609 BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, 3610/* BPF_FUNC_perf_event_output for sk_buff input context. */ 3611 BPF_F_CTXLEN_MASK = (0xfffffULL << 32), 3612}; 3613 3614/* Current network namespace */ 3615enum { 3616 BPF_F_CURRENT_NETNS = (-1L), 3617}; 3618 3619/* BPF_FUNC_csum_level level values. */ 3620enum { 3621 BPF_CSUM_LEVEL_QUERY, 3622 BPF_CSUM_LEVEL_INC, 3623 BPF_CSUM_LEVEL_DEC, 3624 BPF_CSUM_LEVEL_RESET, 3625}; 3626 3627/* BPF_FUNC_skb_adjust_room flags. */ 3628enum { 3629 BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), 3630 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), 3631 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), 3632 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), 3633 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), 3634 BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), 3635}; 3636 3637enum { 3638 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, 3639 BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, 3640}; 3641 3642#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ 3643 BPF_ADJ_ROOM_ENCAP_L2_MASK) \ 3644 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) 3645 3646/* BPF_FUNC_sysctl_get_name flags. */ 3647enum { 3648 BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), 3649}; 3650 3651/* BPF_FUNC_sk_storage_get flags */ 3652enum { 3653 BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0), 3654}; 3655 3656/* BPF_FUNC_read_branch_records flags. */ 3657enum { 3658 BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), 3659}; 3660 3661/* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and 3662 * BPF_FUNC_bpf_ringbuf_output flags. 3663 */ 3664enum { 3665 BPF_RB_NO_WAKEUP = (1ULL << 0), 3666 BPF_RB_FORCE_WAKEUP = (1ULL << 1), 3667}; 3668 3669/* BPF_FUNC_bpf_ringbuf_query flags */ 3670enum { 3671 BPF_RB_AVAIL_DATA = 0, 3672 BPF_RB_RING_SIZE = 1, 3673 BPF_RB_CONS_POS = 2, 3674 BPF_RB_PROD_POS = 3, 3675}; 3676 3677/* BPF ring buffer constants */ 3678enum { 3679 BPF_RINGBUF_BUSY_BIT = (1U << 31), 3680 BPF_RINGBUF_DISCARD_BIT = (1U << 30), 3681 BPF_RINGBUF_HDR_SZ = 8, 3682}; 3683 3684/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ 3685enum { 3686 BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), 3687 BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), 3688}; 3689 3690/* Mode for BPF_FUNC_skb_adjust_room helper. */ 3691enum bpf_adj_room_mode { 3692 BPF_ADJ_ROOM_NET, 3693 BPF_ADJ_ROOM_MAC, 3694}; 3695 3696/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ 3697enum bpf_hdr_start_off { 3698 BPF_HDR_START_MAC, 3699 BPF_HDR_START_NET, 3700}; 3701 3702/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ 3703enum bpf_lwt_encap_mode { 3704 BPF_LWT_ENCAP_SEG6, 3705 BPF_LWT_ENCAP_SEG6_INLINE, 3706 BPF_LWT_ENCAP_IP, 3707}; 3708 3709#define __bpf_md_ptr(type, name) \ 3710union { \ 3711 type name; \ 3712 __u64 :64; \ 3713} __attribute__((aligned(8))) 3714 3715/* user accessible mirror of in-kernel sk_buff. 3716 * new fields can only be added to the end of this structure 3717 */ 3718struct __sk_buff { 3719 __u32 len; 3720 __u32 pkt_type; 3721 __u32 mark; 3722 __u32 queue_mapping; 3723 __u32 protocol; 3724 __u32 vlan_present; 3725 __u32 vlan_tci; 3726 __u32 vlan_proto; 3727 __u32 priority; 3728 __u32 ingress_ifindex; 3729 __u32 ifindex; 3730 __u32 tc_index; 3731 __u32 cb[5]; 3732 __u32 hash; 3733 __u32 tc_classid; 3734 __u32 data; 3735 __u32 data_end; 3736 __u32 napi_id; 3737 3738 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ 3739 __u32 family; 3740 __u32 remote_ip4; /* Stored in network byte order */ 3741 __u32 local_ip4; /* Stored in network byte order */ 3742 __u32 remote_ip6[4]; /* Stored in network byte order */ 3743 __u32 local_ip6[4]; /* Stored in network byte order */ 3744 __u32 remote_port; /* Stored in network byte order */ 3745 __u32 local_port; /* stored in host byte order */ 3746 /* ... here. */ 3747 3748 __u32 data_meta; 3749 __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); 3750 __u64 tstamp; 3751 __u32 wire_len; 3752 __u32 gso_segs; 3753 __bpf_md_ptr(struct bpf_sock *, sk); 3754 __u32 gso_size; 3755}; 3756 3757struct bpf_tunnel_key { 3758 __u32 tunnel_id; 3759 union { 3760 __u32 remote_ipv4; 3761 __u32 remote_ipv6[4]; 3762 }; 3763 __u8 tunnel_tos; 3764 __u8 tunnel_ttl; 3765 __u16 tunnel_ext; /* Padding, future use. */ 3766 __u32 tunnel_label; 3767}; 3768 3769/* user accessible mirror of in-kernel xfrm_state. 3770 * new fields can only be added to the end of this structure 3771 */ 3772struct bpf_xfrm_state { 3773 __u32 reqid; 3774 __u32 spi; /* Stored in network byte order */ 3775 __u16 family; 3776 __u16 ext; /* Padding, future use. */ 3777 union { 3778 __u32 remote_ipv4; /* Stored in network byte order */ 3779 __u32 remote_ipv6[4]; /* Stored in network byte order */ 3780 }; 3781}; 3782 3783/* Generic BPF return codes which all BPF program types may support. 3784 * The values are binary compatible with their TC_ACT_* counter-part to 3785 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 3786 * programs. 3787 * 3788 * XDP is handled seprately, see XDP_*. 3789 */ 3790enum bpf_ret_code { 3791 BPF_OK = 0, 3792 /* 1 reserved */ 3793 BPF_DROP = 2, 3794 /* 3-6 reserved */ 3795 BPF_REDIRECT = 7, 3796 /* >127 are reserved for prog type specific return codes. 3797 * 3798 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and 3799 * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been 3800 * changed and should be routed based on its new L3 header. 3801 * (This is an L3 redirect, as opposed to L2 redirect 3802 * represented by BPF_REDIRECT above). 3803 */ 3804 BPF_LWT_REROUTE = 128, 3805}; 3806 3807struct bpf_sock { 3808 __u32 bound_dev_if; 3809 __u32 family; 3810 __u32 type; 3811 __u32 protocol; 3812 __u32 mark; 3813 __u32 priority; 3814 /* IP address also allows 1 and 2 bytes access */ 3815 __u32 src_ip4; 3816 __u32 src_ip6[4]; 3817 __u32 src_port; /* host byte order */ 3818 __u32 dst_port; /* network byte order */ 3819 __u32 dst_ip4; 3820 __u32 dst_ip6[4]; 3821 __u32 state; 3822 __s32 rx_queue_mapping; 3823}; 3824 3825struct bpf_tcp_sock { 3826 __u32 snd_cwnd; /* Sending congestion window */ 3827 __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 3828 __u32 rtt_min; 3829 __u32 snd_ssthresh; /* Slow start size threshold */ 3830 __u32 rcv_nxt; /* What we want to receive next */ 3831 __u32 snd_nxt; /* Next sequence we send */ 3832 __u32 snd_una; /* First byte we want an ack for */ 3833 __u32 mss_cache; /* Cached effective mss, not including SACKS */ 3834 __u32 ecn_flags; /* ECN status bits. */ 3835 __u32 rate_delivered; /* saved rate sample: packets delivered */ 3836 __u32 rate_interval_us; /* saved rate sample: time elapsed */ 3837 __u32 packets_out; /* Packets which are "in flight" */ 3838 __u32 retrans_out; /* Retransmitted packets out */ 3839 __u32 total_retrans; /* Total retransmits for entire connection */ 3840 __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn 3841 * total number of segments in. 3842 */ 3843 __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn 3844 * total number of data segments in. 3845 */ 3846 __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut 3847 * The total number of segments sent. 3848 */ 3849 __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut 3850 * total number of data segments sent. 3851 */ 3852 __u32 lost_out; /* Lost packets */ 3853 __u32 sacked_out; /* SACK'd packets */ 3854 __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived 3855 * sum(delta(rcv_nxt)), or how many bytes 3856 * were acked. 3857 */ 3858 __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 3859 * sum(delta(snd_una)), or how many bytes 3860 * were acked. 3861 */ 3862 __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups 3863 * total number of DSACK blocks received 3864 */ 3865 __u32 delivered; /* Total data packets delivered incl. rexmits */ 3866 __u32 delivered_ce; /* Like the above but only ECE marked packets */ 3867 __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ 3868}; 3869 3870struct bpf_sock_tuple { 3871 union { 3872 struct { 3873 __be32 saddr; 3874 __be32 daddr; 3875 __be16 sport; 3876 __be16 dport; 3877 } ipv4; 3878 struct { 3879 __be32 saddr[4]; 3880 __be32 daddr[4]; 3881 __be16 sport; 3882 __be16 dport; 3883 } ipv6; 3884 }; 3885}; 3886 3887struct bpf_xdp_sock { 3888 __u32 queue_id; 3889}; 3890 3891#define XDP_PACKET_HEADROOM 256 3892 3893/* User return codes for XDP prog type. 3894 * A valid XDP program must return one of these defined values. All other 3895 * return codes are reserved for future use. Unknown return codes will 3896 * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). 3897 */ 3898enum xdp_action { 3899 XDP_ABORTED = 0, 3900 XDP_DROP, 3901 XDP_PASS, 3902 XDP_TX, 3903 XDP_REDIRECT, 3904}; 3905 3906/* user accessible metadata for XDP packet hook 3907 * new fields must be added to the end of this structure 3908 */ 3909struct xdp_md { 3910 __u32 data; 3911 __u32 data_end; 3912 __u32 data_meta; 3913 /* Below access go through struct xdp_rxq_info */ 3914 __u32 ingress_ifindex; /* rxq->dev->ifindex */ 3915 __u32 rx_queue_index; /* rxq->queue_index */ 3916 3917 __u32 egress_ifindex; /* txq->dev->ifindex */ 3918}; 3919 3920/* DEVMAP map-value layout 3921 * 3922 * The struct data-layout of map-value is a configuration interface. 3923 * New members can only be added to the end of this structure. 3924 */ 3925struct bpf_devmap_val { 3926 __u32 ifindex; /* device index */ 3927 union { 3928 int fd; /* prog fd on map write */ 3929 __u32 id; /* prog id on map read */ 3930 } bpf_prog; 3931}; 3932 3933/* CPUMAP map-value layout 3934 * 3935 * The struct data-layout of map-value is a configuration interface. 3936 * New members can only be added to the end of this structure. 3937 */ 3938struct bpf_cpumap_val { 3939 __u32 qsize; /* queue size to remote target CPU */ 3940 union { 3941 int fd; /* prog fd on map write */ 3942 __u32 id; /* prog id on map read */ 3943 } bpf_prog; 3944}; 3945 3946enum sk_action { 3947 SK_DROP = 0, 3948 SK_PASS, 3949}; 3950 3951/* user accessible metadata for SK_MSG packet hook, new fields must 3952 * be added to the end of this structure 3953 */ 3954struct sk_msg_md { 3955 __bpf_md_ptr(void *, data); 3956 __bpf_md_ptr(void *, data_end); 3957 3958 __u32 family; 3959 __u32 remote_ip4; /* Stored in network byte order */ 3960 __u32 local_ip4; /* Stored in network byte order */ 3961 __u32 remote_ip6[4]; /* Stored in network byte order */ 3962 __u32 local_ip6[4]; /* Stored in network byte order */ 3963 __u32 remote_port; /* Stored in network byte order */ 3964 __u32 local_port; /* stored in host byte order */ 3965 __u32 size; /* Total size of sk_msg */ 3966 3967 __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ 3968}; 3969 3970struct sk_reuseport_md { 3971 /* 3972 * Start of directly accessible data. It begins from 3973 * the tcp/udp header. 3974 */ 3975 __bpf_md_ptr(void *, data); 3976 /* End of directly accessible data */ 3977 __bpf_md_ptr(void *, data_end); 3978 /* 3979 * Total length of packet (starting from the tcp/udp header). 3980 * Note that the directly accessible bytes (data_end - data) 3981 * could be less than this "len". Those bytes could be 3982 * indirectly read by a helper "bpf_skb_load_bytes()". 3983 */ 3984 __u32 len; 3985 /* 3986 * Eth protocol in the mac header (network byte order). e.g. 3987 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) 3988 */ 3989 __u32 eth_protocol; 3990 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ 3991 __u32 bind_inany; /* Is sock bound to an INANY address? */ 3992 __u32 hash; /* A hash of the packet 4 tuples */ 3993}; 3994 3995#define BPF_TAG_SIZE 8 3996 3997struct bpf_prog_info { 3998 __u32 type; 3999 __u32 id; 4000 __u8 tag[BPF_TAG_SIZE];
4001 __u32 jited_prog_len; 4002 __u32 xlated_prog_len; 4003 __aligned_u64 jited_prog_insns; 4004 __aligned_u64 xlated_prog_insns; 4005 __u64 load_time; /* ns since boottime */ 4006 __u32 created_by_uid; 4007 __u32 nr_map_ids; 4008 __aligned_u64 map_ids; 4009 char name[BPF_OBJ_NAME_LEN]; 4010 __u32 ifindex; 4011 __u32 gpl_compatible:1; 4012 __u32 :31; /* alignment pad */ 4013 __u64 netns_dev; 4014 __u64 netns_ino; 4015 __u32 nr_jited_ksyms; 4016 __u32 nr_jited_func_lens; 4017 __aligned_u64 jited_ksyms; 4018 __aligned_u64 jited_func_lens; 4019 __u32 btf_id; 4020 __u32 func_info_rec_size; 4021 __aligned_u64 func_info; 4022 __u32 nr_func_info; 4023 __u32 nr_line_info; 4024 __aligned_u64 line_info; 4025 __aligned_u64 jited_line_info; 4026 __u32 nr_jited_line_info; 4027 __u32 line_info_rec_size; 4028 __u32 jited_line_info_rec_size; 4029 __u32 nr_prog_tags; 4030 __aligned_u64 prog_tags; 4031 __u64 run_time_ns; 4032 __u64 run_cnt; 4033} __attribute__((aligned(8))); 4034 4035struct bpf_map_info { 4036 __u32 type; 4037 __u32 id; 4038 __u32 key_size; 4039 __u32 value_size; 4040 __u32 max_entries; 4041 __u32 map_flags; 4042 char name[BPF_OBJ_NAME_LEN]; 4043 __u32 ifindex; 4044 __u32 btf_vmlinux_value_type_id; 4045 __u64 netns_dev; 4046 __u64 netns_ino; 4047 __u32 btf_id; 4048 __u32 btf_key_type_id; 4049 __u32 btf_value_type_id; 4050} __attribute__((aligned(8))); 4051 4052struct bpf_btf_info { 4053 __aligned_u64 btf; 4054 __u32 btf_size; 4055 __u32 id; 4056} __attribute__((aligned(8))); 4057 4058struct bpf_link_info { 4059 __u32 type; 4060 __u32 id; 4061 __u32 prog_id; 4062 union { 4063 struct { 4064 __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ 4065 __u32 tp_name_len; /* in/out: tp_name buffer len */ 4066 } raw_tracepoint; 4067 struct { 4068 __u32 attach_type; 4069 } tracing; 4070 struct { 4071 __u64 cgroup_id; 4072 __u32 attach_type; 4073 } cgroup; 4074 struct { 4075 __u32 netns_ino; 4076 __u32 attach_type; 4077 } netns; 4078 struct { 4079 __u32 ifindex; 4080 } xdp; 4081 }; 4082} __attribute__((aligned(8))); 4083 4084/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed 4085 * by user and intended to be used by socket (e.g. to bind to, depends on 4086 * attach type). 4087 */ 4088struct bpf_sock_addr { 4089 __u32 user_family; /* Allows 4-byte read, but no write. */ 4090 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. 4091 * Stored in network byte order. 4092 */ 4093 __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 4094 * Stored in network byte order. 4095 */ 4096 __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. 4097 * Stored in network byte order 4098 */ 4099 __u32 family; /* Allows 4-byte read, but no write */ 4100 __u32 type; /* Allows 4-byte read, but no write */ 4101 __u32 protocol; /* Allows 4-byte read, but no write */ 4102 __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. 4103 * Stored in network byte order. 4104 */ 4105 __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 4106 * Stored in network byte order. 4107 */ 4108 __bpf_md_ptr(struct bpf_sock *, sk); 4109}; 4110 4111/* User bpf_sock_ops struct to access socket values and specify request ops 4112 * and their replies. 4113 * Some of this fields are in network (bigendian) byte order and may need 4114 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). 4115 * New fields can only be added at the end of this structure 4116 */ 4117struct bpf_sock_ops { 4118 __u32 op; 4119 union { 4120 __u32 args[4]; /* Optionally passed to bpf program */ 4121 __u32 reply; /* Returned by bpf program */ 4122 __u32 replylong[4]; /* Optionally returned by bpf prog */ 4123 }; 4124 __u32 family; 4125 __u32 remote_ip4; /* Stored in network byte order */ 4126 __u32 local_ip4; /* Stored in network byte order */ 4127 __u32 remote_ip6[4]; /* Stored in network byte order */ 4128 __u32 local_ip6[4]; /* Stored in network byte order */ 4129 __u32 remote_port; /* Stored in network byte order */ 4130 __u32 local_port; /* stored in host byte order */ 4131 __u32 is_fullsock; /* Some TCP fields are only valid if 4132 * there is a full socket. If not, the 4133 * fields read as zero. 4134 */ 4135 __u32 snd_cwnd; 4136 __u32 srtt_us; /* Averaged RTT << 3 in usecs */ 4137 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ 4138 __u32 state; 4139 __u32 rtt_min; 4140 __u32 snd_ssthresh; 4141 __u32 rcv_nxt; 4142 __u32 snd_nxt; 4143 __u32 snd_una; 4144 __u32 mss_cache; 4145 __u32 ecn_flags; 4146 __u32 rate_delivered; 4147 __u32 rate_interval_us; 4148 __u32 packets_out; 4149 __u32 retrans_out; 4150 __u32 total_retrans; 4151 __u32 segs_in; 4152 __u32 data_segs_in; 4153 __u32 segs_out; 4154 __u32 data_segs_out; 4155 __u32 lost_out; 4156 __u32 sacked_out; 4157 __u32 sk_txhash; 4158 __u64 bytes_received; 4159 __u64 bytes_acked; 4160 __bpf_md_ptr(struct bpf_sock *, sk); 4161}; 4162 4163/* Definitions for bpf_sock_ops_cb_flags */ 4164enum { 4165 BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), 4166 BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), 4167 BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), 4168 BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), 4169/* Mask of all currently supported cb flags */ 4170 BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF, 4171}; 4172 4173/* List of known BPF sock_ops operators. 4174 * New entries can only be added at the end 4175 */ 4176enum { 4177 BPF_SOCK_OPS_VOID, 4178 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or 4179 * -1 if default value should be used 4180 */ 4181 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized 4182 * window (in packets) or -1 if default 4183 * value should be used 4184 */ 4185 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an 4186 * active connection is initialized 4187 */ 4188 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an 4189 * active connection is 4190 * established 4191 */ 4192 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a 4193 * passive connection is 4194 * established 4195 */ 4196 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 4197 * needs ECN 4198 */ 4199 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is 4200 * based on the path and may be 4201 * dependent on the congestion control 4202 * algorithm. In general it indicates 4203 * a congestion threshold. RTTs above 4204 * this indicate congestion 4205 */ 4206 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. 4207 * Arg1: value of icsk_retransmits 4208 * Arg2: value of icsk_rto 4209 * Arg3: whether RTO has expired 4210 */ 4211 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. 4212 * Arg1: sequence number of 1st byte 4213 * Arg2: # segments 4214 * Arg3: return value of 4215 * tcp_transmit_skb (0 => success) 4216 */ 4217 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. 4218 * Arg1: old_state 4219 * Arg2: new_state 4220 */ 4221 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after 4222 * socket transition to LISTEN state. 4223 */ 4224 BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. 4225 */ 4226}; 4227 4228/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect 4229 * changes between the TCP and BPF versions. Ideally this should never happen. 4230 * If it does, we need to add code to convert them before calling 4231 * the BPF sock_ops function. 4232 */ 4233enum { 4234 BPF_TCP_ESTABLISHED = 1, 4235 BPF_TCP_SYN_SENT, 4236 BPF_TCP_SYN_RECV, 4237 BPF_TCP_FIN_WAIT1, 4238 BPF_TCP_FIN_WAIT2, 4239 BPF_TCP_TIME_WAIT, 4240 BPF_TCP_CLOSE, 4241 BPF_TCP_CLOSE_WAIT, 4242 BPF_TCP_LAST_ACK, 4243 BPF_TCP_LISTEN, 4244 BPF_TCP_CLOSING, /* Now a valid state */ 4245 BPF_TCP_NEW_SYN_RECV, 4246 4247 BPF_TCP_MAX_STATES /* Leave at the end! */ 4248}; 4249 4250enum { 4251 TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ 4252 TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ 4253}; 4254 4255struct bpf_perf_event_value { 4256 __u64 counter; 4257 __u64 enabled; 4258 __u64 running; 4259}; 4260 4261enum { 4262 BPF_DEVCG_ACC_MKNOD = (1ULL << 0), 4263 BPF_DEVCG_ACC_READ = (1ULL << 1), 4264 BPF_DEVCG_ACC_WRITE = (1ULL << 2), 4265}; 4266 4267enum { 4268 BPF_DEVCG_DEV_BLOCK = (1ULL << 0), 4269 BPF_DEVCG_DEV_CHAR = (1ULL << 1), 4270}; 4271 4272struct bpf_cgroup_dev_ctx { 4273 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ 4274 __u32 access_type; 4275 __u32 major; 4276 __u32 minor; 4277}; 4278 4279struct bpf_raw_tracepoint_args { 4280 __u64 args[0]; 4281}; 4282 4283/* DIRECT: Skip the FIB rules and go to FIB table associated with device 4284 * OUTPUT: Do lookup from egress perspective; default is ingress 4285 */ 4286enum { 4287 BPF_FIB_LOOKUP_DIRECT = (1U << 0), 4288 BPF_FIB_LOOKUP_OUTPUT = (1U << 1), 4289}; 4290 4291enum { 4292 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ 4293 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ 4294 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ 4295 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ 4296 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ 4297 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ 4298 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ 4299 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ 4300 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ 4301}; 4302 4303struct bpf_fib_lookup { 4304 /* input: network family for lookup (AF_INET, AF_INET6) 4305 * output: network family of egress nexthop 4306 */ 4307 __u8 family; 4308 4309 /* set if lookup is to consider L4 data - e.g., FIB rules */ 4310 __u8 l4_protocol; 4311 __be16 sport; 4312 __be16 dport; 4313 4314 /* total length of packet from network header - used for MTU check */ 4315 __u16 tot_len; 4316 4317 /* input: L3 device index for lookup 4318 * output: device index from FIB lookup 4319 */ 4320 __u32 ifindex; 4321 4322 union { 4323 /* inputs to lookup */ 4324 __u8 tos; /* AF_INET */ 4325 __be32 flowinfo; /* AF_INET6, flow_label + priority */ 4326 4327 /* output: metric of fib result (IPv4/IPv6 only) */ 4328 __u32 rt_metric; 4329 }; 4330 4331 union { 4332 __be32 ipv4_src; 4333 __u32 ipv6_src[4]; /* in6_addr; network order */ 4334 }; 4335 4336 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in 4337 * network header. output: bpf_fib_lookup sets to gateway address 4338 * if FIB lookup returns gateway route 4339 */ 4340 union { 4341 __be32 ipv4_dst; 4342 __u32 ipv6_dst[4]; /* in6_addr; network order */ 4343 }; 4344 4345 /* output */ 4346 __be16 h_vlan_proto; 4347 __be16 h_vlan_TCI; 4348 __u8 smac[6]; /* ETH_ALEN */ 4349 __u8 dmac[6]; /* ETH_ALEN */ 4350}; 4351 4352enum bpf_task_fd_type { 4353 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ 4354 BPF_FD_TYPE_TRACEPOINT, /* tp name */ 4355 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ 4356 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ 4357 BPF_FD_TYPE_UPROBE, /* filename + offset */ 4358 BPF_FD_TYPE_URETPROBE, /* filename + offset */ 4359}; 4360 4361enum { 4362 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), 4363 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), 4364 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), 4365}; 4366 4367struct bpf_flow_keys { 4368 __u16 nhoff; 4369 __u16 thoff; 4370 __u16 addr_proto; /* ETH_P_* of valid addrs */ 4371 __u8 is_frag; 4372 __u8 is_first_frag; 4373 __u8 is_encap; 4374 __u8 ip_proto; 4375 __be16 n_proto; 4376 __be16 sport; 4377 __be16 dport; 4378 union { 4379 struct { 4380 __be32 ipv4_src; 4381 __be32 ipv4_dst; 4382 }; 4383 struct { 4384 __u32 ipv6_src[4]; /* in6_addr; network order */ 4385 __u32 ipv6_dst[4]; /* in6_addr; network order */ 4386 }; 4387 }; 4388 __u32 flags; 4389 __be32 flow_label; 4390}; 4391 4392struct bpf_func_info { 4393 __u32 insn_off; 4394 __u32 type_id; 4395}; 4396 4397#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) 4398#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) 4399 4400struct bpf_line_info { 4401 __u32 insn_off; 4402 __u32 file_name_off; 4403 __u32 line_off; 4404 __u32 line_col; 4405}; 4406 4407struct bpf_spin_lock { 4408 __u32 val; 4409}; 4410 4411struct bpf_sysctl { 4412 __u32 write; /* Sysctl is being read (= 0) or written (= 1). 4413 * Allows 1,2,4-byte read, but no write. 4414 */ 4415 __u32 file_pos; /* Sysctl file position to read from, write to. 4416 * Allows 1,2,4-byte read an 4-byte write. 4417 */ 4418}; 4419 4420struct bpf_sockopt { 4421 __bpf_md_ptr(struct bpf_sock *, sk); 4422 __bpf_md_ptr(void *, optval); 4423 __bpf_md_ptr(void *, optval_end); 4424 4425 __s32 level; 4426 __s32 optname; 4427 __s32 optlen; 4428 __s32 retval; 4429}; 4430 4431struct bpf_pidns_info { 4432 __u32 pid; 4433 __u32 tgid; 4434}; 4435 4436/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ 4437struct bpf_sk_lookup { 4438 __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ 4439 4440 __u32 family; /* Protocol family (AF_INET, AF_INET6) */ 4441 __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ 4442 __u32 remote_ip4; /* Network byte order */ 4443 __u32 remote_ip6[4]; /* Network byte order */ 4444 __u32 remote_port; /* Network byte order */ 4445 __u32 local_ip4; /* Network byte order */ 4446 __u32 local_ip6[4]; /* Network byte order */ 4447 __u32 local_port; /* Host byte order */ 4448}; 4449 4450#endif /* _UAPI__LINUX_BPF_H__ */ 4451