linux/include/linux/bpf_verifier.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 */
   4#ifndef _LINUX_BPF_VERIFIER_H
   5#define _LINUX_BPF_VERIFIER_H 1
   6
   7#include <linux/bpf.h> /* for enum bpf_reg_type */
   8#include <linux/filter.h> /* for MAX_BPF_STACK */
   9#include <linux/tnum.h>
  10
  11/* Maximum variable offset umax_value permitted when resolving memory accesses.
  12 * In practice this is far bigger than any realistic pointer offset; this limit
  13 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
  14 */
  15#define BPF_MAX_VAR_OFF (1 << 29)
  16/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
  17 * that converting umax_value to int cannot overflow.
  18 */
  19#define BPF_MAX_VAR_SIZ (1 << 29)
  20
  21/* Liveness marks, used for registers and spilled-regs (in stack slots).
  22 * Read marks propagate upwards until they find a write mark; they record that
  23 * "one of this state's descendants read this reg" (and therefore the reg is
  24 * relevant for states_equal() checks).
  25 * Write marks collect downwards and do not propagate; they record that "the
  26 * straight-line code that reached this state (from its parent) wrote this reg"
  27 * (and therefore that reads propagated from this state or its descendants
  28 * should not propagate to its parent).
  29 * A state with a write mark can receive read marks; it just won't propagate
  30 * them to its parent, since the write mark is a property, not of the state,
  31 * but of the link between it and its parent.  See mark_reg_read() and
  32 * mark_stack_slot_read() in kernel/bpf/verifier.c.
  33 */
  34enum bpf_reg_liveness {
  35        REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
  36        REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
  37        REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
  38        REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
  39        REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
  40        REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
  41};
  42
  43struct bpf_reg_state {
  44        /* Ordering of fields matters.  See states_equal() */
  45        enum bpf_reg_type type;
  46        union {
  47                /* valid when type == PTR_TO_PACKET */
  48                u16 range;
  49
  50                /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
  51                 *   PTR_TO_MAP_VALUE_OR_NULL
  52                 */
  53                struct bpf_map *map_ptr;
  54
  55                /* Max size from any of the above. */
  56                unsigned long raw;
  57        };
  58        /* Fixed part of pointer offset, pointer types only */
  59        s32 off;
  60        /* For PTR_TO_PACKET, used to find other pointers with the same variable
  61         * offset, so they can share range knowledge.
  62         * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
  63         * came from, when one is tested for != NULL.
  64         * For PTR_TO_SOCKET this is used to share which pointers retain the
  65         * same reference to the socket, to determine proper reference freeing.
  66         */
  67        u32 id;
  68        /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
  69         * from a pointer-cast helper, bpf_sk_fullsock() and
  70         * bpf_tcp_sock().
  71         *
  72         * Consider the following where "sk" is a reference counted
  73         * pointer returned from "sk = bpf_sk_lookup_tcp();":
  74         *
  75         * 1: sk = bpf_sk_lookup_tcp();
  76         * 2: if (!sk) { return 0; }
  77         * 3: fullsock = bpf_sk_fullsock(sk);
  78         * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
  79         * 5: tp = bpf_tcp_sock(fullsock);
  80         * 6: if (!tp) { bpf_sk_release(sk); return 0; }
  81         * 7: bpf_sk_release(sk);
  82         * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
  83         *
  84         * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
  85         * "tp" ptr should be invalidated also.  In order to do that,
  86         * the reg holding "fullsock" and "sk" need to remember
  87         * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
  88         * such that the verifier can reset all regs which have
  89         * ref_obj_id matching the sk_reg->id.
  90         *
  91         * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
  92         * sk_reg->id will stay as NULL-marking purpose only.
  93         * After NULL-marking is done, sk_reg->id can be reset to 0.
  94         *
  95         * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
  96         * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
  97         *
  98         * After "tp = bpf_tcp_sock(fullsock);" at line 5,
  99         * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
 100         * which is the same as sk_reg->ref_obj_id.
 101         *
 102         * From the verifier perspective, if sk, fullsock and tp
 103         * are not NULL, they are the same ptr with different
 104         * reg->type.  In particular, bpf_sk_release(tp) is also
 105         * allowed and has the same effect as bpf_sk_release(sk).
 106         */
 107        u32 ref_obj_id;
 108        /* For scalar types (SCALAR_VALUE), this represents our knowledge of
 109         * the actual value.
 110         * For pointer types, this represents the variable part of the offset
 111         * from the pointed-to object, and is shared with all bpf_reg_states
 112         * with the same id as us.
 113         */
 114        struct tnum var_off;
 115        /* Used to determine if any memory access using this register will
 116         * result in a bad access.
 117         * These refer to the same value as var_off, not necessarily the actual
 118         * contents of the register.
 119         */
 120        s64 smin_value; /* minimum possible (s64)value */
 121        s64 smax_value; /* maximum possible (s64)value */
 122        u64 umin_value; /* minimum possible (u64)value */
 123        u64 umax_value; /* maximum possible (u64)value */
 124        /* parentage chain for liveness checking */
 125        struct bpf_reg_state *parent;
 126        /* Inside the callee two registers can be both PTR_TO_STACK like
 127         * R1=fp-8 and R2=fp-8, but one of them points to this function stack
 128         * while another to the caller's stack. To differentiate them 'frameno'
 129         * is used which is an index in bpf_verifier_state->frame[] array
 130         * pointing to bpf_func_state.
 131         */
 132        u32 frameno;
 133        /* Tracks subreg definition. The stored value is the insn_idx of the
 134         * writing insn. This is safe because subreg_def is used before any insn
 135         * patching which only happens after main verification finished.
 136         */
 137        s32 subreg_def;
 138        enum bpf_reg_liveness live;
 139        /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
 140        bool precise;
 141};
 142
 143enum bpf_stack_slot_type {
 144        STACK_INVALID,    /* nothing was stored in this stack slot */
 145        STACK_SPILL,      /* register spilled into stack */
 146        STACK_MISC,       /* BPF program wrote some data into this slot */
 147        STACK_ZERO,       /* BPF program wrote constant zero */
 148};
 149
 150#define BPF_REG_SIZE 8  /* size of eBPF register in bytes */
 151
 152struct bpf_stack_state {
 153        struct bpf_reg_state spilled_ptr;
 154        u8 slot_type[BPF_REG_SIZE];
 155};
 156
 157struct bpf_reference_state {
 158        /* Track each reference created with a unique id, even if the same
 159         * instruction creates the reference multiple times (eg, via CALL).
 160         */
 161        int id;
 162        /* Instruction where the allocation of this reference occurred. This
 163         * is used purely to inform the user of a reference leak.
 164         */
 165        int insn_idx;
 166};
 167
 168/* state of the program:
 169 * type of all registers and stack info
 170 */
 171struct bpf_func_state {
 172        struct bpf_reg_state regs[MAX_BPF_REG];
 173        /* index of call instruction that called into this func */
 174        int callsite;
 175        /* stack frame number of this function state from pov of
 176         * enclosing bpf_verifier_state.
 177         * 0 = main function, 1 = first callee.
 178         */
 179        u32 frameno;
 180        /* subprog number == index within subprog_stack_depth
 181         * zero == main subprog
 182         */
 183        u32 subprogno;
 184
 185        /* The following fields should be last. See copy_func_state() */
 186        int acquired_refs;
 187        struct bpf_reference_state *refs;
 188        int allocated_stack;
 189        struct bpf_stack_state *stack;
 190};
 191
 192struct bpf_idx_pair {
 193        u32 prev_idx;
 194        u32 idx;
 195};
 196
 197#define MAX_CALL_FRAMES 8
 198struct bpf_verifier_state {
 199        /* call stack tracking */
 200        struct bpf_func_state *frame[MAX_CALL_FRAMES];
 201        struct bpf_verifier_state *parent;
 202        /*
 203         * 'branches' field is the number of branches left to explore:
 204         * 0 - all possible paths from this state reached bpf_exit or
 205         * were safely pruned
 206         * 1 - at least one path is being explored.
 207         * This state hasn't reached bpf_exit
 208         * 2 - at least two paths are being explored.
 209         * This state is an immediate parent of two children.
 210         * One is fallthrough branch with branches==1 and another
 211         * state is pushed into stack (to be explored later) also with
 212         * branches==1. The parent of this state has branches==1.
 213         * The verifier state tree connected via 'parent' pointer looks like:
 214         * 1
 215         * 1
 216         * 2 -> 1 (first 'if' pushed into stack)
 217         * 1
 218         * 2 -> 1 (second 'if' pushed into stack)
 219         * 1
 220         * 1
 221         * 1 bpf_exit.
 222         *
 223         * Once do_check() reaches bpf_exit, it calls update_branch_counts()
 224         * and the verifier state tree will look:
 225         * 1
 226         * 1
 227         * 2 -> 1 (first 'if' pushed into stack)
 228         * 1
 229         * 1 -> 1 (second 'if' pushed into stack)
 230         * 0
 231         * 0
 232         * 0 bpf_exit.
 233         * After pop_stack() the do_check() will resume at second 'if'.
 234         *
 235         * If is_state_visited() sees a state with branches > 0 it means
 236         * there is a loop. If such state is exactly equal to the current state
 237         * it's an infinite loop. Note states_equal() checks for states
 238         * equvalency, so two states being 'states_equal' does not mean
 239         * infinite loop. The exact comparison is provided by
 240         * states_maybe_looping() function. It's a stronger pre-check and
 241         * much faster than states_equal().
 242         *
 243         * This algorithm may not find all possible infinite loops or
 244         * loop iteration count may be too high.
 245         * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
 246         */
 247        u32 branches;
 248        u32 insn_idx;
 249        u32 curframe;
 250        u32 active_spin_lock;
 251        bool speculative;
 252
 253        /* first and last insn idx of this verifier state */
 254        u32 first_insn_idx;
 255        u32 last_insn_idx;
 256        /* jmp history recorded from first to last.
 257         * backtracking is using it to go from last to first.
 258         * For most states jmp_history_cnt is [0-3].
 259         * For loops can go up to ~40.
 260         */
 261        struct bpf_idx_pair *jmp_history;
 262        u32 jmp_history_cnt;
 263};
 264
 265#define bpf_get_spilled_reg(slot, frame)                                \
 266        (((slot < frame->allocated_stack / BPF_REG_SIZE) &&             \
 267          (frame->stack[slot].slot_type[0] == STACK_SPILL))             \
 268         ? &frame->stack[slot].spilled_ptr : NULL)
 269
 270/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
 271#define bpf_for_each_spilled_reg(iter, frame, reg)                      \
 272        for (iter = 0, reg = bpf_get_spilled_reg(iter, frame);          \
 273             iter < frame->allocated_stack / BPF_REG_SIZE;              \
 274             iter++, reg = bpf_get_spilled_reg(iter, frame))
 275
 276/* linked list of verifier states used to prune search */
 277struct bpf_verifier_state_list {
 278        struct bpf_verifier_state state;
 279        struct bpf_verifier_state_list *next;
 280        int miss_cnt, hit_cnt;
 281};
 282
 283/* Possible states for alu_state member. */
 284#define BPF_ALU_SANITIZE_SRC            1U
 285#define BPF_ALU_SANITIZE_DST            2U
 286#define BPF_ALU_NEG_VALUE               (1U << 2)
 287#define BPF_ALU_NON_POINTER             (1U << 3)
 288#define BPF_ALU_SANITIZE                (BPF_ALU_SANITIZE_SRC | \
 289                                         BPF_ALU_SANITIZE_DST)
 290
 291struct bpf_insn_aux_data {
 292        union {
 293                enum bpf_reg_type ptr_type;     /* pointer type for load/store insns */
 294                unsigned long map_state;        /* pointer/poison value for maps */
 295                s32 call_imm;                   /* saved imm field of call insn */
 296                u32 alu_limit;                  /* limit for add/sub register with pointer */
 297                struct {
 298                        u32 map_index;          /* index into used_maps[] */
 299                        u32 map_off;            /* offset from value base address */
 300                };
 301        };
 302        int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
 303        int sanitize_stack_off; /* stack slot to be cleared */
 304        bool seen; /* this insn was processed by the verifier */
 305        bool zext_dst; /* this insn zero extends dst reg */
 306        u8 alu_state; /* used in combination with alu_limit */
 307        bool prune_point;
 308        unsigned int orig_idx; /* original instruction index */
 309};
 310
 311#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
 312
 313#define BPF_VERIFIER_TMP_LOG_SIZE       1024
 314
 315struct bpf_verifier_log {
 316        u32 level;
 317        char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
 318        char __user *ubuf;
 319        u32 len_used;
 320        u32 len_total;
 321};
 322
 323static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
 324{
 325        return log->len_used >= log->len_total - 1;
 326}
 327
 328#define BPF_LOG_LEVEL1  1
 329#define BPF_LOG_LEVEL2  2
 330#define BPF_LOG_STATS   4
 331#define BPF_LOG_LEVEL   (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
 332#define BPF_LOG_MASK    (BPF_LOG_LEVEL | BPF_LOG_STATS)
 333
 334static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
 335{
 336        return log->level && log->ubuf && !bpf_verifier_log_full(log);
 337}
 338
 339#define BPF_MAX_SUBPROGS 256
 340
 341struct bpf_subprog_info {
 342        u32 start; /* insn idx of function entry point */
 343        u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
 344        u16 stack_depth; /* max. stack depth used by this function */
 345};
 346
 347/* single container for all structs
 348 * one verifier_env per bpf_check() call
 349 */
 350struct bpf_verifier_env {
 351        u32 insn_idx;
 352        u32 prev_insn_idx;
 353        struct bpf_prog *prog;          /* eBPF program being verified */
 354        const struct bpf_verifier_ops *ops;
 355        struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
 356        int stack_size;                 /* number of states to be processed */
 357        bool strict_alignment;          /* perform strict pointer alignment checks */
 358        struct bpf_verifier_state *cur_state; /* current verifier state */
 359        struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
 360        struct bpf_verifier_state_list *free_list;
 361        struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
 362        u32 used_map_cnt;               /* number of used maps */
 363        u32 id_gen;                     /* used to generate unique reg IDs */
 364        bool allow_ptr_leaks;
 365        bool seen_direct_write;
 366        struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
 367        const struct bpf_line_info *prev_linfo;
 368        struct bpf_verifier_log log;
 369        struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
 370        struct {
 371                int *insn_state;
 372                int *insn_stack;
 373                int cur_stack;
 374        } cfg;
 375        u32 subprog_cnt;
 376        /* number of instructions analyzed by the verifier */
 377        u32 prev_insn_processed, insn_processed;
 378        /* number of jmps, calls, exits analyzed so far */
 379        u32 prev_jmps_processed, jmps_processed;
 380        /* total verification time */
 381        u64 verification_time;
 382        /* maximum number of verifier states kept in 'branching' instructions */
 383        u32 max_states_per_insn;
 384        /* total number of allocated verifier states */
 385        u32 total_states;
 386        /* some states are freed during program analysis.
 387         * this is peak number of states. this number dominates kernel
 388         * memory consumption during verification
 389         */
 390        u32 peak_states;
 391        /* longest register parentage chain walked for liveness marking */
 392        u32 longest_mark_read_walk;
 393};
 394
 395__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
 396                                      const char *fmt, va_list args);
 397__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
 398                                           const char *fmt, ...);
 399
 400static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
 401{
 402        struct bpf_verifier_state *cur = env->cur_state;
 403
 404        return cur->frame[cur->curframe];
 405}
 406
 407static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
 408{
 409        return cur_func(env)->regs;
 410}
 411
 412int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
 413int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
 414                                 int insn_idx, int prev_insn_idx);
 415int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
 416void
 417bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
 418                              struct bpf_insn *insn);
 419void
 420bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
 421
 422#endif /* _LINUX_BPF_VERIFIER_H */
 423