linux/include/linux/filter.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Linux Socket Filter Data Structures
   4 */
   5#ifndef __LINUX_FILTER_H__
   6#define __LINUX_FILTER_H__
   7
   8#include <stdarg.h>
   9
  10#include <linux/atomic.h>
  11#include <linux/refcount.h>
  12#include <linux/compat.h>
  13#include <linux/skbuff.h>
  14#include <linux/linkage.h>
  15#include <linux/printk.h>
  16#include <linux/workqueue.h>
  17#include <linux/sched.h>
  18#include <linux/capability.h>
  19#include <linux/cryptohash.h>
  20#include <linux/set_memory.h>
  21#include <linux/kallsyms.h>
  22
  23#include <net/sch_generic.h>
  24
  25#include <uapi/linux/filter.h>
  26#include <uapi/linux/bpf.h>
  27
  28struct sk_buff;
  29struct sock;
  30struct seccomp_data;
  31struct bpf_prog_aux;
  32struct xdp_rxq_info;
  33
  34/* ArgX, context and stack frame pointer register positions. Note,
  35 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
  36 * calls in BPF_CALL instruction.
  37 */
  38#define BPF_REG_ARG1    BPF_REG_1
  39#define BPF_REG_ARG2    BPF_REG_2
  40#define BPF_REG_ARG3    BPF_REG_3
  41#define BPF_REG_ARG4    BPF_REG_4
  42#define BPF_REG_ARG5    BPF_REG_5
  43#define BPF_REG_CTX     BPF_REG_6
  44#define BPF_REG_FP      BPF_REG_10
  45
  46/* Additional register mappings for converted user programs. */
  47#define BPF_REG_A       BPF_REG_0
  48#define BPF_REG_X       BPF_REG_7
  49#define BPF_REG_TMP     BPF_REG_8
  50
  51/* Kernel hidden auxiliary/helper register for hardening step.
  52 * Only used by eBPF JITs. It's nothing more than a temporary
  53 * register that JITs use internally, only that here it's part
  54 * of eBPF instructions that have been rewritten for blinding
  55 * constants. See JIT pre-step in bpf_jit_blind_constants().
  56 */
  57#define BPF_REG_AX              MAX_BPF_REG
  58#define MAX_BPF_JIT_REG         (MAX_BPF_REG + 1)
  59
  60/* unused opcode to mark special call to bpf_tail_call() helper */
  61#define BPF_TAIL_CALL   0xf0
  62
  63/* unused opcode to mark call to interpreter with arguments */
  64#define BPF_CALL_ARGS   0xe0
  65
  66/* As per nm, we expose JITed images as text (code) section for
  67 * kallsyms. That way, tools like perf can find it to match
  68 * addresses.
  69 */
  70#define BPF_SYM_ELF_TYPE        't'
  71
  72/* BPF program can access up to 512 bytes of stack space. */
  73#define MAX_BPF_STACK   512
  74
  75/* Helper macros for filter block array initializers. */
  76
  77/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
  78
  79#define BPF_ALU64_REG(OP, DST, SRC)                             \
  80        ((struct bpf_insn) {                                    \
  81                .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,        \
  82                .dst_reg = DST,                                 \
  83                .src_reg = SRC,                                 \
  84                .off   = 0,                                     \
  85                .imm   = 0 })
  86
  87#define BPF_ALU32_REG(OP, DST, SRC)                             \
  88        ((struct bpf_insn) {                                    \
  89                .code  = BPF_ALU | BPF_OP(OP) | BPF_X,          \
  90                .dst_reg = DST,                                 \
  91                .src_reg = SRC,                                 \
  92                .off   = 0,                                     \
  93                .imm   = 0 })
  94
  95/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
  96
  97#define BPF_ALU64_IMM(OP, DST, IMM)                             \
  98        ((struct bpf_insn) {                                    \
  99                .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,        \
 100                .dst_reg = DST,                                 \
 101                .src_reg = 0,                                   \
 102                .off   = 0,                                     \
 103                .imm   = IMM })
 104
 105#define BPF_ALU32_IMM(OP, DST, IMM)                             \
 106        ((struct bpf_insn) {                                    \
 107                .code  = BPF_ALU | BPF_OP(OP) | BPF_K,          \
 108                .dst_reg = DST,                                 \
 109                .src_reg = 0,                                   \
 110                .off   = 0,                                     \
 111                .imm   = IMM })
 112
 113/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
 114
 115#define BPF_ENDIAN(TYPE, DST, LEN)                              \
 116        ((struct bpf_insn) {                                    \
 117                .code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),     \
 118                .dst_reg = DST,                                 \
 119                .src_reg = 0,                                   \
 120                .off   = 0,                                     \
 121                .imm   = LEN })
 122
 123/* Short form of mov, dst_reg = src_reg */
 124
 125#define BPF_MOV64_REG(DST, SRC)                                 \
 126        ((struct bpf_insn) {                                    \
 127                .code  = BPF_ALU64 | BPF_MOV | BPF_X,           \
 128                .dst_reg = DST,                                 \
 129                .src_reg = SRC,                                 \
 130                .off   = 0,                                     \
 131                .imm   = 0 })
 132
 133#define BPF_MOV32_REG(DST, SRC)                                 \
 134        ((struct bpf_insn) {                                    \
 135                .code  = BPF_ALU | BPF_MOV | BPF_X,             \
 136                .dst_reg = DST,                                 \
 137                .src_reg = SRC,                                 \
 138                .off   = 0,                                     \
 139                .imm   = 0 })
 140
 141/* Short form of mov, dst_reg = imm32 */
 142
 143#define BPF_MOV64_IMM(DST, IMM)                                 \
 144        ((struct bpf_insn) {                                    \
 145                .code  = BPF_ALU64 | BPF_MOV | BPF_K,           \
 146                .dst_reg = DST,                                 \
 147                .src_reg = 0,                                   \
 148                .off   = 0,                                     \
 149                .imm   = IMM })
 150
 151#define BPF_MOV32_IMM(DST, IMM)                                 \
 152        ((struct bpf_insn) {                                    \
 153                .code  = BPF_ALU | BPF_MOV | BPF_K,             \
 154                .dst_reg = DST,                                 \
 155                .src_reg = 0,                                   \
 156                .off   = 0,                                     \
 157                .imm   = IMM })
 158
 159/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
 160#define BPF_LD_IMM64(DST, IMM)                                  \
 161        BPF_LD_IMM64_RAW(DST, 0, IMM)
 162
 163#define BPF_LD_IMM64_RAW(DST, SRC, IMM)                         \
 164        ((struct bpf_insn) {                                    \
 165                .code  = BPF_LD | BPF_DW | BPF_IMM,             \
 166                .dst_reg = DST,                                 \
 167                .src_reg = SRC,                                 \
 168                .off   = 0,                                     \
 169                .imm   = (__u32) (IMM) }),                      \
 170        ((struct bpf_insn) {                                    \
 171                .code  = 0, /* zero is reserved opcode */       \
 172                .dst_reg = 0,                                   \
 173                .src_reg = 0,                                   \
 174                .off   = 0,                                     \
 175                .imm   = ((__u64) (IMM)) >> 32 })
 176
 177/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
 178#define BPF_LD_MAP_FD(DST, MAP_FD)                              \
 179        BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
 180
 181/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
 182
 183#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)                      \
 184        ((struct bpf_insn) {                                    \
 185                .code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),   \
 186                .dst_reg = DST,                                 \
 187                .src_reg = SRC,                                 \
 188                .off   = 0,                                     \
 189                .imm   = IMM })
 190
 191#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)                      \
 192        ((struct bpf_insn) {                                    \
 193                .code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),     \
 194                .dst_reg = DST,                                 \
 195                .src_reg = SRC,                                 \
 196                .off   = 0,                                     \
 197                .imm   = IMM })
 198
 199/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
 200
 201#define BPF_LD_ABS(SIZE, IMM)                                   \
 202        ((struct bpf_insn) {                                    \
 203                .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,     \
 204                .dst_reg = 0,                                   \
 205                .src_reg = 0,                                   \
 206                .off   = 0,                                     \
 207                .imm   = IMM })
 208
 209/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
 210
 211#define BPF_LD_IND(SIZE, SRC, IMM)                              \
 212        ((struct bpf_insn) {                                    \
 213                .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,     \
 214                .dst_reg = 0,                                   \
 215                .src_reg = SRC,                                 \
 216                .off   = 0,                                     \
 217                .imm   = IMM })
 218
 219/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
 220
 221#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)                        \
 222        ((struct bpf_insn) {                                    \
 223                .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
 224                .dst_reg = DST,                                 \
 225                .src_reg = SRC,                                 \
 226                .off   = OFF,                                   \
 227                .imm   = 0 })
 228
 229/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
 230
 231#define BPF_STX_MEM(SIZE, DST, SRC, OFF)                        \
 232        ((struct bpf_insn) {                                    \
 233                .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
 234                .dst_reg = DST,                                 \
 235                .src_reg = SRC,                                 \
 236                .off   = OFF,                                   \
 237                .imm   = 0 })
 238
 239/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
 240
 241#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                       \
 242        ((struct bpf_insn) {                                    \
 243                .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
 244                .dst_reg = DST,                                 \
 245                .src_reg = SRC,                                 \
 246                .off   = OFF,                                   \
 247                .imm   = 0 })
 248
 249/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 250
 251#define BPF_ST_MEM(SIZE, DST, OFF, IMM)                         \
 252        ((struct bpf_insn) {                                    \
 253                .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,     \
 254                .dst_reg = DST,                                 \
 255                .src_reg = 0,                                   \
 256                .off   = OFF,                                   \
 257                .imm   = IMM })
 258
 259/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
 260
 261#define BPF_JMP_REG(OP, DST, SRC, OFF)                          \
 262        ((struct bpf_insn) {                                    \
 263                .code  = BPF_JMP | BPF_OP(OP) | BPF_X,          \
 264                .dst_reg = DST,                                 \
 265                .src_reg = SRC,                                 \
 266                .off   = OFF,                                   \
 267                .imm   = 0 })
 268
 269/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
 270
 271#define BPF_JMP_IMM(OP, DST, IMM, OFF)                          \
 272        ((struct bpf_insn) {                                    \
 273                .code  = BPF_JMP | BPF_OP(OP) | BPF_K,          \
 274                .dst_reg = DST,                                 \
 275                .src_reg = 0,                                   \
 276                .off   = OFF,                                   \
 277                .imm   = IMM })
 278
 279/* Unconditional jumps, goto pc + off16 */
 280
 281#define BPF_JMP_A(OFF)                                          \
 282        ((struct bpf_insn) {                                    \
 283                .code  = BPF_JMP | BPF_JA,                      \
 284                .dst_reg = 0,                                   \
 285                .src_reg = 0,                                   \
 286                .off   = OFF,                                   \
 287                .imm   = 0 })
 288
 289/* Function call */
 290
 291#define BPF_EMIT_CALL(FUNC)                                     \
 292        ((struct bpf_insn) {                                    \
 293                .code  = BPF_JMP | BPF_CALL,                    \
 294                .dst_reg = 0,                                   \
 295                .src_reg = 0,                                   \
 296                .off   = 0,                                     \
 297                .imm   = ((FUNC) - __bpf_call_base) })
 298
 299/* Raw code statement block */
 300
 301#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)                  \
 302        ((struct bpf_insn) {                                    \
 303                .code  = CODE,                                  \
 304                .dst_reg = DST,                                 \
 305                .src_reg = SRC,                                 \
 306                .off   = OFF,                                   \
 307                .imm   = IMM })
 308
 309/* Program exit */
 310
 311#define BPF_EXIT_INSN()                                         \
 312        ((struct bpf_insn) {                                    \
 313                .code  = BPF_JMP | BPF_EXIT,                    \
 314                .dst_reg = 0,                                   \
 315                .src_reg = 0,                                   \
 316                .off   = 0,                                     \
 317                .imm   = 0 })
 318
 319/* Internal classic blocks for direct assignment */
 320
 321#define __BPF_STMT(CODE, K)                                     \
 322        ((struct sock_filter) BPF_STMT(CODE, K))
 323
 324#define __BPF_JUMP(CODE, K, JT, JF)                             \
 325        ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
 326
 327#define bytes_to_bpf_size(bytes)                                \
 328({                                                              \
 329        int bpf_size = -EINVAL;                                 \
 330                                                                \
 331        if (bytes == sizeof(u8))                                \
 332                bpf_size = BPF_B;                               \
 333        else if (bytes == sizeof(u16))                          \
 334                bpf_size = BPF_H;                               \
 335        else if (bytes == sizeof(u32))                          \
 336                bpf_size = BPF_W;                               \
 337        else if (bytes == sizeof(u64))                          \
 338                bpf_size = BPF_DW;                              \
 339                                                                \
 340        bpf_size;                                               \
 341})
 342
 343#define bpf_size_to_bytes(bpf_size)                             \
 344({                                                              \
 345        int bytes = -EINVAL;                                    \
 346                                                                \
 347        if (bpf_size == BPF_B)                                  \
 348                bytes = sizeof(u8);                             \
 349        else if (bpf_size == BPF_H)                             \
 350                bytes = sizeof(u16);                            \
 351        else if (bpf_size == BPF_W)                             \
 352                bytes = sizeof(u32);                            \
 353        else if (bpf_size == BPF_DW)                            \
 354                bytes = sizeof(u64);                            \
 355                                                                \
 356        bytes;                                                  \
 357})
 358
 359#define BPF_SIZEOF(type)                                        \
 360        ({                                                      \
 361                const int __size = bytes_to_bpf_size(sizeof(type)); \
 362                BUILD_BUG_ON(__size < 0);                       \
 363                __size;                                         \
 364        })
 365
 366#define BPF_FIELD_SIZEOF(type, field)                           \
 367        ({                                                      \
 368                const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
 369                BUILD_BUG_ON(__size < 0);                       \
 370                __size;                                         \
 371        })
 372
 373#define BPF_LDST_BYTES(insn)                                    \
 374        ({                                                      \
 375                const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
 376                WARN_ON(__size < 0);                            \
 377                __size;                                         \
 378        })
 379
 380#define __BPF_MAP_0(m, v, ...) v
 381#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
 382#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
 383#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
 384#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
 385#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
 386
 387#define __BPF_REG_0(...) __BPF_PAD(5)
 388#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
 389#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
 390#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
 391#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
 392#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
 393
 394#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
 395#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
 396
 397#define __BPF_CAST(t, a)                                                       \
 398        (__force t)                                                            \
 399        (__force                                                               \
 400         typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long),      \
 401                                      (unsigned long)0, (t)0))) a
 402#define __BPF_V void
 403#define __BPF_N
 404
 405#define __BPF_DECL_ARGS(t, a) t   a
 406#define __BPF_DECL_REGS(t, a) u64 a
 407
 408#define __BPF_PAD(n)                                                           \
 409        __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
 410                  u64, __ur_3, u64, __ur_4, u64, __ur_5)
 411
 412#define BPF_CALL_x(x, name, ...)                                               \
 413        static __always_inline                                                 \
 414        u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
 415        u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));         \
 416        u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))          \
 417        {                                                                      \
 418                return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
 419        }                                                                      \
 420        static __always_inline                                                 \
 421        u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
 422
 423#define BPF_CALL_0(name, ...)   BPF_CALL_x(0, name, __VA_ARGS__)
 424#define BPF_CALL_1(name, ...)   BPF_CALL_x(1, name, __VA_ARGS__)
 425#define BPF_CALL_2(name, ...)   BPF_CALL_x(2, name, __VA_ARGS__)
 426#define BPF_CALL_3(name, ...)   BPF_CALL_x(3, name, __VA_ARGS__)
 427#define BPF_CALL_4(name, ...)   BPF_CALL_x(4, name, __VA_ARGS__)
 428#define BPF_CALL_5(name, ...)   BPF_CALL_x(5, name, __VA_ARGS__)
 429
 430#define bpf_ctx_range(TYPE, MEMBER)                                             \
 431        offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
 432#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)                              \
 433        offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
 434
 435#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)                            \
 436        ({                                                                      \
 437                BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE));             \
 438                *(PTR_SIZE) = (SIZE);                                           \
 439                offsetof(TYPE, MEMBER);                                         \
 440        })
 441
 442#ifdef CONFIG_COMPAT
 443/* A struct sock_filter is architecture independent. */
 444struct compat_sock_fprog {
 445        u16             len;
 446        compat_uptr_t   filter; /* struct sock_filter * */
 447};
 448#endif
 449
 450struct sock_fprog_kern {
 451        u16                     len;
 452        struct sock_filter      *filter;
 453};
 454
 455struct bpf_binary_header {
 456        unsigned int pages;
 457        u8 image[];
 458};
 459
 460struct bpf_prog {
 461        u16                     pages;          /* Number of allocated pages */
 462        u16                     jited:1,        /* Is our filter JIT'ed? */
 463                                jit_requested:1,/* archs need to JIT the prog */
 464                                locked:1,       /* Program image locked? */
 465                                gpl_compatible:1, /* Is filter GPL compatible? */
 466                                cb_access:1,    /* Is control block accessed? */
 467                                dst_needed:1,   /* Do we need dst entry? */
 468                                blinded:1,      /* Was blinded */
 469                                is_func:1,      /* program is a bpf function */
 470                                kprobe_override:1; /* Do we override a kprobe? */
 471        enum bpf_prog_type      type;           /* Type of BPF program */
 472        enum bpf_attach_type    expected_attach_type; /* For some prog types */
 473        u32                     len;            /* Number of filter blocks */
 474        u32                     jited_len;      /* Size of jited insns in bytes */
 475        u8                      tag[BPF_TAG_SIZE];
 476        struct bpf_prog_aux     *aux;           /* Auxiliary fields */
 477        struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
 478        unsigned int            (*bpf_func)(const void *ctx,
 479                                            const struct bpf_insn *insn);
 480        /* Instructions for interpreter */
 481        union {
 482                struct sock_filter      insns[0];
 483                struct bpf_insn         insnsi[0];
 484        };
 485};
 486
 487struct sk_filter {
 488        refcount_t      refcnt;
 489        struct rcu_head rcu;
 490        struct bpf_prog *prog;
 491};
 492
 493#define BPF_PROG_RUN(filter, ctx)  (*(filter)->bpf_func)(ctx, (filter)->insnsi)
 494
 495#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 496
 497struct bpf_skb_data_end {
 498        struct qdisc_skb_cb qdisc_cb;
 499        void *data_meta;
 500        void *data_end;
 501};
 502
 503struct xdp_buff {
 504        void *data;
 505        void *data_end;
 506        void *data_meta;
 507        void *data_hard_start;
 508        struct xdp_rxq_info *rxq;
 509};
 510
 511struct sk_msg_buff {
 512        void *data;
 513        void *data_end;
 514        __u32 apply_bytes;
 515        __u32 cork_bytes;
 516        int sg_copybreak;
 517        int sg_start;
 518        int sg_curr;
 519        int sg_end;
 520        struct scatterlist sg_data[MAX_SKB_FRAGS];
 521        bool sg_copy[MAX_SKB_FRAGS];
 522        __u32 key;
 523        __u32 flags;
 524        struct bpf_map *map;
 525        struct sk_buff *skb;
 526        struct list_head list;
 527};
 528
 529/* Compute the linear packet data range [data, data_end) which
 530 * will be accessed by various program types (cls_bpf, act_bpf,
 531 * lwt, ...). Subsystems allowing direct data access must (!)
 532 * ensure that cb[] area can be written to when BPF program is
 533 * invoked (otherwise cb[] save/restore is necessary).
 534 */
 535static inline void bpf_compute_data_pointers(struct sk_buff *skb)
 536{
 537        struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
 538
 539        BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
 540        cb->data_meta = skb->data - skb_metadata_len(skb);
 541        cb->data_end  = skb->data + skb_headlen(skb);
 542}
 543
 544static inline u8 *bpf_skb_cb(struct sk_buff *skb)
 545{
 546        /* eBPF programs may read/write skb->cb[] area to transfer meta
 547         * data between tail calls. Since this also needs to work with
 548         * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
 549         *
 550         * In some socket filter cases, the cb unfortunately needs to be
 551         * saved/restored so that protocol specific skb->cb[] data won't
 552         * be lost. In any case, due to unpriviledged eBPF programs
 553         * attached to sockets, we need to clear the bpf_skb_cb() area
 554         * to not leak previous contents to user space.
 555         */
 556        BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
 557        BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
 558                     FIELD_SIZEOF(struct qdisc_skb_cb, data));
 559
 560        return qdisc_skb_cb(skb)->data;
 561}
 562
 563static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
 564                                       struct sk_buff *skb)
 565{
 566        u8 *cb_data = bpf_skb_cb(skb);
 567        u8 cb_saved[BPF_SKB_CB_LEN];
 568        u32 res;
 569
 570        if (unlikely(prog->cb_access)) {
 571                memcpy(cb_saved, cb_data, sizeof(cb_saved));
 572                memset(cb_data, 0, sizeof(cb_saved));
 573        }
 574
 575        res = BPF_PROG_RUN(prog, skb);
 576
 577        if (unlikely(prog->cb_access))
 578                memcpy(cb_data, cb_saved, sizeof(cb_saved));
 579
 580        return res;
 581}
 582
 583static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
 584                                        struct sk_buff *skb)
 585{
 586        u8 *cb_data = bpf_skb_cb(skb);
 587
 588        if (unlikely(prog->cb_access))
 589                memset(cb_data, 0, BPF_SKB_CB_LEN);
 590
 591        return BPF_PROG_RUN(prog, skb);
 592}
 593
 594static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
 595                                            struct xdp_buff *xdp)
 596{
 597        /* Caller needs to hold rcu_read_lock() (!), otherwise program
 598         * can be released while still running, or map elements could be
 599         * freed early while still having concurrent users. XDP fastpath
 600         * already takes rcu_read_lock() when fetching the program, so
 601         * it's not necessary here anymore.
 602         */
 603        return BPF_PROG_RUN(prog, xdp);
 604}
 605
 606static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
 607{
 608        return prog->len * sizeof(struct bpf_insn);
 609}
 610
 611static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
 612{
 613        return round_up(bpf_prog_insn_size(prog) +
 614                        sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
 615}
 616
 617static inline unsigned int bpf_prog_size(unsigned int proglen)
 618{
 619        return max(sizeof(struct bpf_prog),
 620                   offsetof(struct bpf_prog, insns[proglen]));
 621}
 622
 623static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
 624{
 625        /* When classic BPF programs have been loaded and the arch
 626         * does not have a classic BPF JIT (anymore), they have been
 627         * converted via bpf_migrate_filter() to eBPF and thus always
 628         * have an unspec program type.
 629         */
 630        return prog->type == BPF_PROG_TYPE_UNSPEC;
 631}
 632
 633static inline bool
 634bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
 635{
 636        bool off_ok;
 637#ifdef __LITTLE_ENDIAN
 638        off_ok = (off & (size_default - 1)) == 0;
 639#else
 640        off_ok = (off & (size_default - 1)) + size == size_default;
 641#endif
 642        return off_ok && size <= size_default && (size & (size - 1)) == 0;
 643}
 644
 645#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 646
 647#ifdef CONFIG_ARCH_HAS_SET_MEMORY
 648static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 649{
 650        fp->locked = 1;
 651        WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
 652}
 653
 654static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 655{
 656        if (fp->locked) {
 657                WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
 658                /* In case set_memory_rw() fails, we want to be the first
 659                 * to crash here instead of some random place later on.
 660                 */
 661                fp->locked = 0;
 662        }
 663}
 664
 665static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 666{
 667        WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
 668}
 669
 670static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
 671{
 672        WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
 673}
 674#else
 675static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 676{
 677}
 678
 679static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 680{
 681}
 682
 683static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 684{
 685}
 686
 687static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
 688{
 689}
 690#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
 691
 692static inline struct bpf_binary_header *
 693bpf_jit_binary_hdr(const struct bpf_prog *fp)
 694{
 695        unsigned long real_start = (unsigned long)fp->bpf_func;
 696        unsigned long addr = real_start & PAGE_MASK;
 697
 698        return (void *)addr;
 699}
 700
 701int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
 702static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
 703{
 704        return sk_filter_trim_cap(sk, skb, 1);
 705}
 706
 707struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
 708void bpf_prog_free(struct bpf_prog *fp);
 709
 710bool bpf_opcode_in_insntable(u8 code);
 711
 712struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
 713struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 714                                  gfp_t gfp_extra_flags);
 715void __bpf_prog_free(struct bpf_prog *fp);
 716
 717static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
 718{
 719        bpf_prog_unlock_ro(fp);
 720        __bpf_prog_free(fp);
 721}
 722
 723typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
 724                                       unsigned int flen);
 725
 726int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
 727int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
 728                              bpf_aux_classic_check_t trans, bool save_orig);
 729void bpf_prog_destroy(struct bpf_prog *fp);
 730
 731int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 732int sk_attach_bpf(u32 ufd, struct sock *sk);
 733int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 734int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
 735int sk_detach_filter(struct sock *sk);
 736int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
 737                  unsigned int len);
 738
 739bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
 740void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 741
 742u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 743#define __bpf_call_base_args \
 744        ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
 745         __bpf_call_base)
 746
 747struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 748void bpf_jit_compile(struct bpf_prog *prog);
 749bool bpf_helper_changes_pkt_data(void *func);
 750
 751static inline bool bpf_dump_raw_ok(void)
 752{
 753        /* Reconstruction of call-sites is dependent on kallsyms,
 754         * thus make dump the same restriction.
 755         */
 756        return kallsyms_show_value() == 1;
 757}
 758
 759struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 760                                       const struct bpf_insn *patch, u32 len);
 761
 762/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
 763 * same cpu context. Further for best results no more than a single map
 764 * for the do_redirect/do_flush pair should be used. This limitation is
 765 * because we only track one map and force a flush when the map changes.
 766 * This does not appear to be a real limitation for existing software.
 767 */
 768int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
 769                            struct bpf_prog *prog);
 770int xdp_do_redirect(struct net_device *dev,
 771                    struct xdp_buff *xdp,
 772                    struct bpf_prog *prog);
 773void xdp_do_flush_map(void);
 774
 775/* Drivers not supporting XDP metadata can use this helper, which
 776 * rejects any room expansion for metadata as a result.
 777 */
 778static __always_inline void
 779xdp_set_data_meta_invalid(struct xdp_buff *xdp)
 780{
 781        xdp->data_meta = xdp->data + 1;
 782}
 783
 784static __always_inline bool
 785xdp_data_meta_unsupported(const struct xdp_buff *xdp)
 786{
 787        return unlikely(xdp->data_meta > xdp->data);
 788}
 789
 790void bpf_warn_invalid_xdp_action(u32 act);
 791
 792struct sock *do_sk_redirect_map(struct sk_buff *skb);
 793struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
 794
 795#ifdef CONFIG_BPF_JIT
 796extern int bpf_jit_enable;
 797extern int bpf_jit_harden;
 798extern int bpf_jit_kallsyms;
 799
 800typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
 801
 802struct bpf_binary_header *
 803bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 804                     unsigned int alignment,
 805                     bpf_jit_fill_hole_t bpf_fill_ill_insns);
 806void bpf_jit_binary_free(struct bpf_binary_header *hdr);
 807
 808void bpf_jit_free(struct bpf_prog *fp);
 809
 810struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
 811void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
 812
 813static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 814                                u32 pass, void *image)
 815{
 816        pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
 817               proglen, pass, image, current->comm, task_pid_nr(current));
 818
 819        if (image)
 820                print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
 821                               16, 1, image, proglen, false);
 822}
 823
 824static inline bool bpf_jit_is_ebpf(void)
 825{
 826# ifdef CONFIG_HAVE_EBPF_JIT
 827        return true;
 828# else
 829        return false;
 830# endif
 831}
 832
 833static inline bool ebpf_jit_enabled(void)
 834{
 835        return bpf_jit_enable && bpf_jit_is_ebpf();
 836}
 837
 838static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
 839{
 840        return fp->jited && bpf_jit_is_ebpf();
 841}
 842
 843static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
 844{
 845        /* These are the prerequisites, should someone ever have the
 846         * idea to call blinding outside of them, we make sure to
 847         * bail out.
 848         */
 849        if (!bpf_jit_is_ebpf())
 850                return false;
 851        if (!prog->jit_requested)
 852                return false;
 853        if (!bpf_jit_harden)
 854                return false;
 855        if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
 856                return false;
 857
 858        return true;
 859}
 860
 861static inline bool bpf_jit_kallsyms_enabled(void)
 862{
 863        /* There are a couple of corner cases where kallsyms should
 864         * not be enabled f.e. on hardening.
 865         */
 866        if (bpf_jit_harden)
 867                return false;
 868        if (!bpf_jit_kallsyms)
 869                return false;
 870        if (bpf_jit_kallsyms == 1)
 871                return true;
 872
 873        return false;
 874}
 875
 876const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 877                                 unsigned long *off, char *sym);
 878bool is_bpf_text_address(unsigned long addr);
 879int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 880                    char *sym);
 881
 882static inline const char *
 883bpf_address_lookup(unsigned long addr, unsigned long *size,
 884                   unsigned long *off, char **modname, char *sym)
 885{
 886        const char *ret = __bpf_address_lookup(addr, size, off, sym);
 887
 888        if (ret && modname)
 889                *modname = NULL;
 890        return ret;
 891}
 892
 893void bpf_prog_kallsyms_add(struct bpf_prog *fp);
 894void bpf_prog_kallsyms_del(struct bpf_prog *fp);
 895
 896#else /* CONFIG_BPF_JIT */
 897
 898static inline bool ebpf_jit_enabled(void)
 899{
 900        return false;
 901}
 902
 903static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
 904{
 905        return false;
 906}
 907
 908static inline void bpf_jit_free(struct bpf_prog *fp)
 909{
 910        bpf_prog_unlock_free(fp);
 911}
 912
 913static inline bool bpf_jit_kallsyms_enabled(void)
 914{
 915        return false;
 916}
 917
 918static inline const char *
 919__bpf_address_lookup(unsigned long addr, unsigned long *size,
 920                     unsigned long *off, char *sym)
 921{
 922        return NULL;
 923}
 924
 925static inline bool is_bpf_text_address(unsigned long addr)
 926{
 927        return false;
 928}
 929
 930static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
 931                                  char *type, char *sym)
 932{
 933        return -ERANGE;
 934}
 935
 936static inline const char *
 937bpf_address_lookup(unsigned long addr, unsigned long *size,
 938                   unsigned long *off, char **modname, char *sym)
 939{
 940        return NULL;
 941}
 942
 943static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 944{
 945}
 946
 947static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 948{
 949}
 950#endif /* CONFIG_BPF_JIT */
 951
 952#define BPF_ANC         BIT(15)
 953
 954static inline bool bpf_needs_clear_a(const struct sock_filter *first)
 955{
 956        switch (first->code) {
 957        case BPF_RET | BPF_K:
 958        case BPF_LD | BPF_W | BPF_LEN:
 959                return false;
 960
 961        case BPF_LD | BPF_W | BPF_ABS:
 962        case BPF_LD | BPF_H | BPF_ABS:
 963        case BPF_LD | BPF_B | BPF_ABS:
 964                if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
 965                        return true;
 966                return false;
 967
 968        default:
 969                return true;
 970        }
 971}
 972
 973static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
 974{
 975        BUG_ON(ftest->code & BPF_ANC);
 976
 977        switch (ftest->code) {
 978        case BPF_LD | BPF_W | BPF_ABS:
 979        case BPF_LD | BPF_H | BPF_ABS:
 980        case BPF_LD | BPF_B | BPF_ABS:
 981#define BPF_ANCILLARY(CODE)     case SKF_AD_OFF + SKF_AD_##CODE:        \
 982                                return BPF_ANC | SKF_AD_##CODE
 983                switch (ftest->k) {
 984                BPF_ANCILLARY(PROTOCOL);
 985                BPF_ANCILLARY(PKTTYPE);
 986                BPF_ANCILLARY(IFINDEX);
 987                BPF_ANCILLARY(NLATTR);
 988                BPF_ANCILLARY(NLATTR_NEST);
 989                BPF_ANCILLARY(MARK);
 990                BPF_ANCILLARY(QUEUE);
 991                BPF_ANCILLARY(HATYPE);
 992                BPF_ANCILLARY(RXHASH);
 993                BPF_ANCILLARY(CPU);
 994                BPF_ANCILLARY(ALU_XOR_X);
 995                BPF_ANCILLARY(VLAN_TAG);
 996                BPF_ANCILLARY(VLAN_TAG_PRESENT);
 997                BPF_ANCILLARY(PAY_OFFSET);
 998                BPF_ANCILLARY(RANDOM);
 999                BPF_ANCILLARY(VLAN_TPID);
1000                }
1001                /* Fallthrough. */
1002        default:
1003                return ftest->code;
1004        }
1005}
1006
1007void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
1008                                           int k, unsigned int size);
1009
1010static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
1011                                     unsigned int size, void *buffer)
1012{
1013        if (k >= 0)
1014                return skb_header_pointer(skb, k, size, buffer);
1015
1016        return bpf_internal_load_pointer_neg_helper(skb, k, size);
1017}
1018
1019static inline int bpf_tell_extensions(void)
1020{
1021        return SKF_AD_MAX;
1022}
1023
1024struct bpf_sock_addr_kern {
1025        struct sock *sk;
1026        struct sockaddr *uaddr;
1027        /* Temporary "register" to make indirect stores to nested structures
1028         * defined above. We need three registers to make such a store, but
1029         * only two (src and dst) are available at convert_ctx_access time
1030         */
1031        u64 tmp_reg;
1032};
1033
1034struct bpf_sock_ops_kern {
1035        struct  sock *sk;
1036        u32     op;
1037        union {
1038                u32 args[4];
1039                u32 reply;
1040                u32 replylong[4];
1041        };
1042        u32     is_fullsock;
1043        u64     temp;                   /* temp and everything after is not
1044                                         * initialized to 0 before calling
1045                                         * the BPF program. New fields that
1046                                         * should be initialized to 0 should
1047                                         * be inserted before temp.
1048                                         * temp is scratch storage used by
1049                                         * sock_ops_convert_ctx_access
1050                                         * as temporary storage of a register.
1051                                         */
1052};
1053
1054#endif /* __LINUX_FILTER_H__ */
1055