1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4#ifndef _LINUX_BPF_VERIFIER_H 5#define _LINUX_BPF_VERIFIER_H 1 6 7#include <linux/bpf.h> /* for enum bpf_reg_type */ 8#include <linux/btf.h> /* for struct btf and btf_id() */ 9#include <linux/filter.h> /* for MAX_BPF_STACK */ 10#include <linux/tnum.h> 11 12/* Maximum variable offset umax_value permitted when resolving memory accesses. 13 * In practice this is far bigger than any realistic pointer offset; this limit 14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64. 15 */ 16#define BPF_MAX_VAR_OFF (1 << 29) 17/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures 18 * that converting umax_value to int cannot overflow. 19 */ 20#define BPF_MAX_VAR_SIZ (1 << 29) 21 22/* Liveness marks, used for registers and spilled-regs (in stack slots). 23 * Read marks propagate upwards until they find a write mark; they record that 24 * "one of this state's descendants read this reg" (and therefore the reg is 25 * relevant for states_equal() checks). 26 * Write marks collect downwards and do not propagate; they record that "the 27 * straight-line code that reached this state (from its parent) wrote this reg" 28 * (and therefore that reads propagated from this state or its descendants 29 * should not propagate to its parent). 30 * A state with a write mark can receive read marks; it just won't propagate 31 * them to its parent, since the write mark is a property, not of the state, 32 * but of the link between it and its parent. See mark_reg_read() and 33 * mark_stack_slot_read() in kernel/bpf/verifier.c. 34 */ 35enum bpf_reg_liveness { 36 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ 37 REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ 38 REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ 39 REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, 40 REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ 41 REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ 42}; 43 44struct bpf_reg_state { 45 /* Ordering of fields matters. See states_equal() */ 46 enum bpf_reg_type type; 47 /* Fixed part of pointer offset, pointer types only */ 48 s32 off; 49 union { 50 /* valid when type == PTR_TO_PACKET */ 51 int range; 52 53 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 54 * PTR_TO_MAP_VALUE_OR_NULL 55 */ 56 struct { 57 struct bpf_map *map_ptr; 58 /* To distinguish map lookups from outer map 59 * the map_uid is non-zero for registers 60 * pointing to inner maps. 61 */ 62 u32 map_uid; 63 }; 64 65 /* for PTR_TO_BTF_ID */ 66 struct { 67 struct btf *btf; 68 u32 btf_id; 69 }; 70 71 u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ 72 73 /* Max size from any of the above. */ 74 struct { 75 unsigned long raw1; 76 unsigned long raw2; 77 } raw; 78 79 u32 subprogno; /* for PTR_TO_FUNC */ 80 }; 81 /* For PTR_TO_PACKET, used to find other pointers with the same variable 82 * offset, so they can share range knowledge. 83 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we 84 * came from, when one is tested for != NULL. 85 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation 86 * for the purpose of tracking that it's freed. 87 * For PTR_TO_SOCKET this is used to share which pointers retain the 88 * same reference to the socket, to determine proper reference freeing. 89 */ 90 u32 id; 91 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned 92 * from a pointer-cast helper, bpf_sk_fullsock() and 93 * bpf_tcp_sock(). 94 * 95 * Consider the following where "sk" is a reference counted 96 * pointer returned from "sk = bpf_sk_lookup_tcp();": 97 * 98 * 1: sk = bpf_sk_lookup_tcp(); 99 * 2: if (!sk) { return 0; } 100 * 3: fullsock = bpf_sk_fullsock(sk); 101 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } 102 * 5: tp = bpf_tcp_sock(fullsock); 103 * 6: if (!tp) { bpf_sk_release(sk); return 0; } 104 * 7: bpf_sk_release(sk); 105 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain 106 * 107 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and 108 * "tp" ptr should be invalidated also. In order to do that, 109 * the reg holding "fullsock" and "sk" need to remember 110 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id 111 * such that the verifier can reset all regs which have 112 * ref_obj_id matching the sk_reg->id. 113 * 114 * sk_reg->ref_obj_id is set to sk_reg->id at line 1. 115 * sk_reg->id will stay as NULL-marking purpose only. 116 * After NULL-marking is done, sk_reg->id can be reset to 0. 117 * 118 * After "fullsock = bpf_sk_fullsock(sk);" at line 3, 119 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. 120 * 121 * After "tp = bpf_tcp_sock(fullsock);" at line 5, 122 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id 123 * which is the same as sk_reg->ref_obj_id. 124 * 125 * From the verifier perspective, if sk, fullsock and tp 126 * are not NULL, they are the same ptr with different 127 * reg->type. In particular, bpf_sk_release(tp) is also 128 * allowed and has the same effect as bpf_sk_release(sk). 129 */ 130 u32 ref_obj_id; 131 /* For scalar types (SCALAR_VALUE), this represents our knowledge of 132 * the actual value. 133 * For pointer types, this represents the variable part of the offset 134 * from the pointed-to object, and is shared with all bpf_reg_states 135 * with the same id as us. 136 */ 137 struct tnum var_off; 138 /* Used to determine if any memory access using this register will 139 * result in a bad access. 140 * These refer to the same value as var_off, not necessarily the actual 141 * contents of the register. 142 */ 143 s64 smin_value; /* minimum possible (s64)value */ 144 s64 smax_value; /* maximum possible (s64)value */ 145 u64 umin_value; /* minimum possible (u64)value */ 146 u64 umax_value; /* maximum possible (u64)value */ 147 s32 s32_min_value; /* minimum possible (s32)value */ 148 s32 s32_max_value; /* maximum possible (s32)value */ 149 u32 u32_min_value; /* minimum possible (u32)value */ 150 u32 u32_max_value; /* maximum possible (u32)value */ 151 /* parentage chain for liveness checking */ 152 struct bpf_reg_state *parent; 153 /* Inside the callee two registers can be both PTR_TO_STACK like 154 * R1=fp-8 and R2=fp-8, but one of them points to this function stack 155 * while another to the caller's stack. To differentiate them 'frameno' 156 * is used which is an index in bpf_verifier_state->frame[] array 157 * pointing to bpf_func_state. 158 */ 159 u32 frameno; 160 /* Tracks subreg definition. The stored value is the insn_idx of the 161 * writing insn. This is safe because subreg_def is used before any insn 162 * patching which only happens after main verification finished. 163 */ 164 s32 subreg_def; 165 enum bpf_reg_liveness live; 166 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ 167 bool precise; 168}; 169 170enum bpf_stack_slot_type { 171 STACK_INVALID, /* nothing was stored in this stack slot */ 172 STACK_SPILL, /* register spilled into stack */ 173 STACK_MISC, /* BPF program wrote some data into this slot */ 174 STACK_ZERO, /* BPF program wrote constant zero */ 175}; 176 177#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 178 179struct bpf_stack_state { 180 struct bpf_reg_state spilled_ptr; 181 u8 slot_type[BPF_REG_SIZE]; 182}; 183 184struct bpf_reference_state { 185 /* Track each reference created with a unique id, even if the same 186 * instruction creates the reference multiple times (eg, via CALL). 187 */ 188 int id; 189 /* Instruction where the allocation of this reference occurred. This 190 * is used purely to inform the user of a reference leak. 191 */ 192 int insn_idx; 193}; 194 195/* state of the program: 196 * type of all registers and stack info 197 */ 198struct bpf_func_state { 199 struct bpf_reg_state regs[MAX_BPF_REG]; 200 /* index of call instruction that called into this func */ 201 int callsite; 202 /* stack frame number of this function state from pov of 203 * enclosing bpf_verifier_state. 204 * 0 = main function, 1 = first callee. 205 */ 206 u32 frameno; 207 /* subprog number == index within subprog_info 208 * zero == main subprog 209 */ 210 u32 subprogno; 211 /* Every bpf_timer_start will increment async_entry_cnt. 212 * It's used to distinguish: 213 * void foo(void) { for(;;); } 214 * void foo(void) { bpf_timer_set_callback(,foo); } 215 */ 216 u32 async_entry_cnt; 217 bool in_callback_fn; 218 bool in_async_callback_fn; 219 220 /* The following fields should be last. See copy_func_state() */ 221 int acquired_refs; 222 struct bpf_reference_state *refs; 223 int allocated_stack; 224 struct bpf_stack_state *stack; 225}; 226 227struct bpf_idx_pair { 228 u32 prev_idx; 229 u32 idx; 230}; 231 232struct bpf_id_pair { 233 u32 old; 234 u32 cur; 235}; 236 237/* Maximum number of register states that can exist at once */ 238#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 239#define MAX_CALL_FRAMES 8 240struct bpf_verifier_state { 241 /* call stack tracking */ 242 struct bpf_func_state *frame[MAX_CALL_FRAMES]; 243 struct bpf_verifier_state *parent; 244 /* 245 * 'branches' field is the number of branches left to explore: 246 * 0 - all possible paths from this state reached bpf_exit or 247 * were safely pruned 248 * 1 - at least one path is being explored. 249 * This state hasn't reached bpf_exit 250 * 2 - at least two paths are being explored. 251 * This state is an immediate parent of two children. 252 * One is fallthrough branch with branches==1 and another 253 * state is pushed into stack (to be explored later) also with 254 * branches==1. The parent of this state has branches==1. 255 * The verifier state tree connected via 'parent' pointer looks like: 256 * 1 257 * 1 258 * 2 -> 1 (first 'if' pushed into stack) 259 * 1 260 * 2 -> 1 (second 'if' pushed into stack) 261 * 1 262 * 1 263 * 1 bpf_exit. 264 * 265 * Once do_check() reaches bpf_exit, it calls update_branch_counts() 266 * and the verifier state tree will look: 267 * 1 268 * 1 269 * 2 -> 1 (first 'if' pushed into stack) 270 * 1 271 * 1 -> 1 (second 'if' pushed into stack) 272 * 0 273 * 0 274 * 0 bpf_exit. 275 * After pop_stack() the do_check() will resume at second 'if'. 276 * 277 * If is_state_visited() sees a state with branches > 0 it means 278 * there is a loop. If such state is exactly equal to the current state 279 * it's an infinite loop. Note states_equal() checks for states 280 * equvalency, so two states being 'states_equal' does not mean 281 * infinite loop. The exact comparison is provided by 282 * states_maybe_looping() function. It's a stronger pre-check and 283 * much faster than states_equal(). 284 * 285 * This algorithm may not find all possible infinite loops or 286 * loop iteration count may be too high. 287 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. 288 */ 289 u32 branches; 290 u32 insn_idx; 291 u32 curframe; 292 u32 active_spin_lock; 293 bool speculative; 294 295 /* first and last insn idx of this verifier state */ 296 u32 first_insn_idx; 297 u32 last_insn_idx; 298 /* jmp history recorded from first to last. 299 * backtracking is using it to go from last to first. 300 * For most states jmp_history_cnt is [0-3]. 301 * For loops can go up to ~40. 302 */ 303 struct bpf_idx_pair *jmp_history; 304 u32 jmp_history_cnt; 305}; 306 307#define bpf_get_spilled_reg(slot, frame) \ 308 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ 309 (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ 310 ? &frame->stack[slot].spilled_ptr : NULL) 311 312/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ 313#define bpf_for_each_spilled_reg(iter, frame, reg) \ 314 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ 315 iter < frame->allocated_stack / BPF_REG_SIZE; \ 316 iter++, reg = bpf_get_spilled_reg(iter, frame)) 317 318/* linked list of verifier states used to prune search */ 319struct bpf_verifier_state_list { 320 struct bpf_verifier_state state; 321 struct bpf_verifier_state_list *next; 322 int miss_cnt, hit_cnt; 323}; 324 325/* Possible states for alu_state member. */ 326#define BPF_ALU_SANITIZE_SRC (1U << 0) 327#define BPF_ALU_SANITIZE_DST (1U << 1) 328#define BPF_ALU_NEG_VALUE (1U << 2) 329#define BPF_ALU_NON_POINTER (1U << 3) 330#define BPF_ALU_IMMEDIATE (1U << 4) 331#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ 332 BPF_ALU_SANITIZE_DST) 333 334struct bpf_insn_aux_data { 335 union { 336 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 337 unsigned long map_ptr_state; /* pointer/poison value for maps */ 338 s32 call_imm; /* saved imm field of call insn */ 339 u32 alu_limit; /* limit for add/sub register with pointer */ 340 struct { 341 u32 map_index; /* index into used_maps[] */ 342 u32 map_off; /* offset from value base address */ 343 }; 344 struct { 345 enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ 346 union { 347 struct { 348 struct btf *btf; 349 u32 btf_id; /* btf_id for struct typed var */ 350 }; 351 u32 mem_size; /* mem_size for non-struct typed var */ 352 }; 353 } btf_var; 354 }; 355 u64 map_key_state; /* constant (32 bit) key tracking for maps */ 356 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 357 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ 358 bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ 359 bool zext_dst; /* this insn zero extends dst reg */ 360 u8 alu_state; /* used in combination with alu_limit */ 361 362 /* below fields are initialized once */ 363 unsigned int orig_idx; /* original instruction index */ 364 bool prune_point; 365}; 366 367#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 368#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */ 369 370#define BPF_VERIFIER_TMP_LOG_SIZE 1024 371 372struct bpf_verifier_log { 373 u32 level; 374 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; 375 char __user *ubuf; 376 u32 len_used; 377 u32 len_total; 378}; 379 380static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) 381{ 382 return log->len_used >= log->len_total - 1; 383} 384 385#define BPF_LOG_LEVEL1 1 386#define BPF_LOG_LEVEL2 2 387#define BPF_LOG_STATS 4 388#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) 389#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) 390#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ 391 392static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) 393{ 394 return log && 395 ((log->level && log->ubuf && !bpf_verifier_log_full(log)) || 396 log->level == BPF_LOG_KERNEL); 397} 398 399#define BPF_MAX_SUBPROGS 256 400 401struct bpf_subprog_info { 402 /* 'start' has to be the first field otherwise find_subprog() won't work */ 403 u32 start; /* insn idx of function entry point */ 404 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ 405 u16 stack_depth; /* max. stack depth used by this function */ 406 bool has_tail_call; 407 bool tail_call_reachable; 408 bool has_ld_abs; 409 bool is_async_cb; 410}; 411 412/* single container for all structs 413 * one verifier_env per bpf_check() call 414 */ 415struct bpf_verifier_env { 416 u32 insn_idx; 417 u32 prev_insn_idx; 418 struct bpf_prog *prog; /* eBPF program being verified */ 419 const struct bpf_verifier_ops *ops; 420 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 421 int stack_size; /* number of states to be processed */ 422 bool strict_alignment; /* perform strict pointer alignment checks */ 423 bool test_state_freq; /* test verifier with different pruning frequency */ 424 struct bpf_verifier_state *cur_state; /* current verifier state */ 425 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 426 struct bpf_verifier_state_list *free_list; 427 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 428 struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */ 429 u32 used_map_cnt; /* number of used maps */ 430 u32 used_btf_cnt; /* number of used BTF objects */ 431 u32 id_gen; /* used to generate unique reg IDs */ 432 bool explore_alu_limits; 433 bool allow_ptr_leaks; 434 bool allow_uninit_stack; 435 bool allow_ptr_to_map_access; 436 bool bpf_capable; 437 bool bypass_spec_v1; 438 bool bypass_spec_v4; 439 bool seen_direct_write; 440 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 441 const struct bpf_line_info *prev_linfo; 442 struct bpf_verifier_log log; 443 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; 444 struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; 445 struct { 446 int *insn_state; 447 int *insn_stack; 448 int cur_stack; 449 } cfg; 450 u32 pass_cnt; /* number of times do_check() was called */ 451 u32 subprog_cnt; 452 /* number of instructions analyzed by the verifier */ 453 u32 prev_insn_processed, insn_processed; 454 /* number of jmps, calls, exits analyzed so far */ 455 u32 prev_jmps_processed, jmps_processed; 456 /* total verification time */ 457 u64 verification_time; 458 /* maximum number of verifier states kept in 'branching' instructions */ 459 u32 max_states_per_insn; 460 /* total number of allocated verifier states */ 461 u32 total_states; 462 /* some states are freed during program analysis. 463 * this is peak number of states. this number dominates kernel 464 * memory consumption during verification 465 */ 466 u32 peak_states; 467 /* longest register parentage chain walked for liveness marking */ 468 u32 longest_mark_read_walk; 469 bpfptr_t fd_array; 470}; 471 472__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, 473 const char *fmt, va_list args); 474__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 475 const char *fmt, ...); 476__printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 477 const char *fmt, ...); 478 479static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) 480{ 481 struct bpf_verifier_state *cur = env->cur_state; 482 483 return cur->frame[cur->curframe]; 484} 485 486static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) 487{ 488 return cur_func(env)->regs; 489} 490 491int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); 492int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 493 int insn_idx, int prev_insn_idx); 494int bpf_prog_offload_finalize(struct bpf_verifier_env *env); 495void 496bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, 497 struct bpf_insn *insn); 498void 499bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); 500 501int check_ctx_reg(struct bpf_verifier_env *env, 502 const struct bpf_reg_state *reg, int regno); 503int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 504 u32 regno, u32 mem_size); 505 506/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ 507static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, 508 struct btf *btf, u32 btf_id) 509{ 510 if (tgt_prog) 511 return ((u64)tgt_prog->aux->id << 32) | btf_id; 512 else 513 return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id; 514} 515 516/* unpack the IDs from the key as constructed above */ 517static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id) 518{ 519 if (obj_id) 520 *obj_id = key >> 32; 521 if (btf_id) 522 *btf_id = key & 0x7FFFFFFF; 523} 524 525int bpf_check_attach_target(struct bpf_verifier_log *log, 526 const struct bpf_prog *prog, 527 const struct bpf_prog *tgt_prog, 528 u32 btf_id, 529 struct bpf_attach_target_info *tgt_info); 530 531#endif /* _LINUX_BPF_VERIFIER_H */ 532