linux/include/linux/filter.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Linux Socket Filter Data Structures
   4 */
   5#ifndef __LINUX_FILTER_H__
   6#define __LINUX_FILTER_H__
   7
   8#include <linux/atomic.h>
   9#include <linux/bpf.h>
  10#include <linux/refcount.h>
  11#include <linux/compat.h>
  12#include <linux/skbuff.h>
  13#include <linux/linkage.h>
  14#include <linux/printk.h>
  15#include <linux/workqueue.h>
  16#include <linux/sched.h>
  17#include <linux/capability.h>
  18#include <linux/set_memory.h>
  19#include <linux/kallsyms.h>
  20#include <linux/if_vlan.h>
  21#include <linux/vmalloc.h>
  22#include <linux/sockptr.h>
  23#include <crypto/sha1.h>
  24#include <linux/u64_stats_sync.h>
  25
  26#include <net/sch_generic.h>
  27
  28#include <asm/byteorder.h>
  29#include <uapi/linux/filter.h>
  30
  31struct sk_buff;
  32struct sock;
  33struct seccomp_data;
  34struct bpf_prog_aux;
  35struct xdp_rxq_info;
  36struct xdp_buff;
  37struct sock_reuseport;
  38struct ctl_table;
  39struct ctl_table_header;
  40
  41/* ArgX, context and stack frame pointer register positions. Note,
  42 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
  43 * calls in BPF_CALL instruction.
  44 */
  45#define BPF_REG_ARG1    BPF_REG_1
  46#define BPF_REG_ARG2    BPF_REG_2
  47#define BPF_REG_ARG3    BPF_REG_3
  48#define BPF_REG_ARG4    BPF_REG_4
  49#define BPF_REG_ARG5    BPF_REG_5
  50#define BPF_REG_CTX     BPF_REG_6
  51#define BPF_REG_FP      BPF_REG_10
  52
  53/* Additional register mappings for converted user programs. */
  54#define BPF_REG_A       BPF_REG_0
  55#define BPF_REG_X       BPF_REG_7
  56#define BPF_REG_TMP     BPF_REG_2       /* scratch reg */
  57#define BPF_REG_D       BPF_REG_8       /* data, callee-saved */
  58#define BPF_REG_H       BPF_REG_9       /* hlen, callee-saved */
  59
  60/* Kernel hidden auxiliary/helper register. */
  61#define BPF_REG_AX              MAX_BPF_REG
  62#define MAX_BPF_EXT_REG         (MAX_BPF_REG + 1)
  63#define MAX_BPF_JIT_REG         MAX_BPF_EXT_REG
  64
  65/* unused opcode to mark special call to bpf_tail_call() helper */
  66#define BPF_TAIL_CALL   0xf0
  67
  68/* unused opcode to mark special load instruction. Same as BPF_ABS */
  69#define BPF_PROBE_MEM   0x20
  70
  71/* unused opcode to mark call to interpreter with arguments */
  72#define BPF_CALL_ARGS   0xe0
  73
  74/* unused opcode to mark speculation barrier for mitigating
  75 * Speculative Store Bypass
  76 */
  77#define BPF_NOSPEC      0xc0
  78
  79/* As per nm, we expose JITed images as text (code) section for
  80 * kallsyms. That way, tools like perf can find it to match
  81 * addresses.
  82 */
  83#define BPF_SYM_ELF_TYPE        't'
  84
  85/* BPF program can access up to 512 bytes of stack space. */
  86#define MAX_BPF_STACK   512
  87
  88/* Helper macros for filter block array initializers. */
  89
  90/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
  91
  92#define BPF_ALU64_REG(OP, DST, SRC)                             \
  93        ((struct bpf_insn) {                                    \
  94                .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,        \
  95                .dst_reg = DST,                                 \
  96                .src_reg = SRC,                                 \
  97                .off   = 0,                                     \
  98                .imm   = 0 })
  99
 100#define BPF_ALU32_REG(OP, DST, SRC)                             \
 101        ((struct bpf_insn) {                                    \
 102                .code  = BPF_ALU | BPF_OP(OP) | BPF_X,          \
 103                .dst_reg = DST,                                 \
 104                .src_reg = SRC,                                 \
 105                .off   = 0,                                     \
 106                .imm   = 0 })
 107
 108/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
 109
 110#define BPF_ALU64_IMM(OP, DST, IMM)                             \
 111        ((struct bpf_insn) {                                    \
 112                .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,        \
 113                .dst_reg = DST,                                 \
 114                .src_reg = 0,                                   \
 115                .off   = 0,                                     \
 116                .imm   = IMM })
 117
 118#define BPF_ALU32_IMM(OP, DST, IMM)                             \
 119        ((struct bpf_insn) {                                    \
 120                .code  = BPF_ALU | BPF_OP(OP) | BPF_K,          \
 121                .dst_reg = DST,                                 \
 122                .src_reg = 0,                                   \
 123                .off   = 0,                                     \
 124                .imm   = IMM })
 125
 126/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
 127
 128#define BPF_ENDIAN(TYPE, DST, LEN)                              \
 129        ((struct bpf_insn) {                                    \
 130                .code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),     \
 131                .dst_reg = DST,                                 \
 132                .src_reg = 0,                                   \
 133                .off   = 0,                                     \
 134                .imm   = LEN })
 135
 136/* Short form of mov, dst_reg = src_reg */
 137
 138#define BPF_MOV64_REG(DST, SRC)                                 \
 139        ((struct bpf_insn) {                                    \
 140                .code  = BPF_ALU64 | BPF_MOV | BPF_X,           \
 141                .dst_reg = DST,                                 \
 142                .src_reg = SRC,                                 \
 143                .off   = 0,                                     \
 144                .imm   = 0 })
 145
 146#define BPF_MOV32_REG(DST, SRC)                                 \
 147        ((struct bpf_insn) {                                    \
 148                .code  = BPF_ALU | BPF_MOV | BPF_X,             \
 149                .dst_reg = DST,                                 \
 150                .src_reg = SRC,                                 \
 151                .off   = 0,                                     \
 152                .imm   = 0 })
 153
 154/* Short form of mov, dst_reg = imm32 */
 155
 156#define BPF_MOV64_IMM(DST, IMM)                                 \
 157        ((struct bpf_insn) {                                    \
 158                .code  = BPF_ALU64 | BPF_MOV | BPF_K,           \
 159                .dst_reg = DST,                                 \
 160                .src_reg = 0,                                   \
 161                .off   = 0,                                     \
 162                .imm   = IMM })
 163
 164#define BPF_MOV32_IMM(DST, IMM)                                 \
 165        ((struct bpf_insn) {                                    \
 166                .code  = BPF_ALU | BPF_MOV | BPF_K,             \
 167                .dst_reg = DST,                                 \
 168                .src_reg = 0,                                   \
 169                .off   = 0,                                     \
 170                .imm   = IMM })
 171
 172/* Special form of mov32, used for doing explicit zero extension on dst. */
 173#define BPF_ZEXT_REG(DST)                                       \
 174        ((struct bpf_insn) {                                    \
 175                .code  = BPF_ALU | BPF_MOV | BPF_X,             \
 176                .dst_reg = DST,                                 \
 177                .src_reg = DST,                                 \
 178                .off   = 0,                                     \
 179                .imm   = 1 })
 180
 181static inline bool insn_is_zext(const struct bpf_insn *insn)
 182{
 183        return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
 184}
 185
 186/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
 187#define BPF_LD_IMM64(DST, IMM)                                  \
 188        BPF_LD_IMM64_RAW(DST, 0, IMM)
 189
 190#define BPF_LD_IMM64_RAW(DST, SRC, IMM)                         \
 191        ((struct bpf_insn) {                                    \
 192                .code  = BPF_LD | BPF_DW | BPF_IMM,             \
 193                .dst_reg = DST,                                 \
 194                .src_reg = SRC,                                 \
 195                .off   = 0,                                     \
 196                .imm   = (__u32) (IMM) }),                      \
 197        ((struct bpf_insn) {                                    \
 198                .code  = 0, /* zero is reserved opcode */       \
 199                .dst_reg = 0,                                   \
 200                .src_reg = 0,                                   \
 201                .off   = 0,                                     \
 202                .imm   = ((__u64) (IMM)) >> 32 })
 203
 204/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
 205#define BPF_LD_MAP_FD(DST, MAP_FD)                              \
 206        BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
 207
 208/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
 209
 210#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)                      \
 211        ((struct bpf_insn) {                                    \
 212                .code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),   \
 213                .dst_reg = DST,                                 \
 214                .src_reg = SRC,                                 \
 215                .off   = 0,                                     \
 216                .imm   = IMM })
 217
 218#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)                      \
 219        ((struct bpf_insn) {                                    \
 220                .code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),     \
 221                .dst_reg = DST,                                 \
 222                .src_reg = SRC,                                 \
 223                .off   = 0,                                     \
 224                .imm   = IMM })
 225
 226/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
 227
 228#define BPF_LD_ABS(SIZE, IMM)                                   \
 229        ((struct bpf_insn) {                                    \
 230                .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,     \
 231                .dst_reg = 0,                                   \
 232                .src_reg = 0,                                   \
 233                .off   = 0,                                     \
 234                .imm   = IMM })
 235
 236/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
 237
 238#define BPF_LD_IND(SIZE, SRC, IMM)                              \
 239        ((struct bpf_insn) {                                    \
 240                .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,     \
 241                .dst_reg = 0,                                   \
 242                .src_reg = SRC,                                 \
 243                .off   = 0,                                     \
 244                .imm   = IMM })
 245
 246/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
 247
 248#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)                        \
 249        ((struct bpf_insn) {                                    \
 250                .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
 251                .dst_reg = DST,                                 \
 252                .src_reg = SRC,                                 \
 253                .off   = OFF,                                   \
 254                .imm   = 0 })
 255
 256/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
 257
 258#define BPF_STX_MEM(SIZE, DST, SRC, OFF)                        \
 259        ((struct bpf_insn) {                                    \
 260                .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
 261                .dst_reg = DST,                                 \
 262                .src_reg = SRC,                                 \
 263                .off   = OFF,                                   \
 264                .imm   = 0 })
 265
 266
 267/*
 268 * Atomic operations:
 269 *
 270 *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
 271 *   BPF_AND                  *(uint *) (dst_reg + off16) &= src_reg
 272 *   BPF_OR                   *(uint *) (dst_reg + off16) |= src_reg
 273 *   BPF_XOR                  *(uint *) (dst_reg + off16) ^= src_reg
 274 *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
 275 *   BPF_AND | BPF_FETCH      src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
 276 *   BPF_OR | BPF_FETCH       src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
 277 *   BPF_XOR | BPF_FETCH      src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
 278 *   BPF_XCHG                 src_reg = atomic_xchg(dst_reg + off16, src_reg)
 279 *   BPF_CMPXCHG              r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
 280 */
 281
 282#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)                  \
 283        ((struct bpf_insn) {                                    \
 284                .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
 285                .dst_reg = DST,                                 \
 286                .src_reg = SRC,                                 \
 287                .off   = OFF,                                   \
 288                .imm   = OP })
 289
 290/* Legacy alias */
 291#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
 292
 293/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 294
 295#define BPF_ST_MEM(SIZE, DST, OFF, IMM)                         \
 296        ((struct bpf_insn) {                                    \
 297                .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,     \
 298                .dst_reg = DST,                                 \
 299                .src_reg = 0,                                   \
 300                .off   = OFF,                                   \
 301                .imm   = IMM })
 302
 303/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
 304
 305#define BPF_JMP_REG(OP, DST, SRC, OFF)                          \
 306        ((struct bpf_insn) {                                    \
 307                .code  = BPF_JMP | BPF_OP(OP) | BPF_X,          \
 308                .dst_reg = DST,                                 \
 309                .src_reg = SRC,                                 \
 310                .off   = OFF,                                   \
 311                .imm   = 0 })
 312
 313/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
 314
 315#define BPF_JMP_IMM(OP, DST, IMM, OFF)                          \
 316        ((struct bpf_insn) {                                    \
 317                .code  = BPF_JMP | BPF_OP(OP) | BPF_K,          \
 318                .dst_reg = DST,                                 \
 319                .src_reg = 0,                                   \
 320                .off   = OFF,                                   \
 321                .imm   = IMM })
 322
 323/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
 324
 325#define BPF_JMP32_REG(OP, DST, SRC, OFF)                        \
 326        ((struct bpf_insn) {                                    \
 327                .code  = BPF_JMP32 | BPF_OP(OP) | BPF_X,        \
 328                .dst_reg = DST,                                 \
 329                .src_reg = SRC,                                 \
 330                .off   = OFF,                                   \
 331                .imm   = 0 })
 332
 333/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
 334
 335#define BPF_JMP32_IMM(OP, DST, IMM, OFF)                        \
 336        ((struct bpf_insn) {                                    \
 337                .code  = BPF_JMP32 | BPF_OP(OP) | BPF_K,        \
 338                .dst_reg = DST,                                 \
 339                .src_reg = 0,                                   \
 340                .off   = OFF,                                   \
 341                .imm   = IMM })
 342
 343/* Unconditional jumps, goto pc + off16 */
 344
 345#define BPF_JMP_A(OFF)                                          \
 346        ((struct bpf_insn) {                                    \
 347                .code  = BPF_JMP | BPF_JA,                      \
 348                .dst_reg = 0,                                   \
 349                .src_reg = 0,                                   \
 350                .off   = OFF,                                   \
 351                .imm   = 0 })
 352
 353/* Relative call */
 354
 355#define BPF_CALL_REL(TGT)                                       \
 356        ((struct bpf_insn) {                                    \
 357                .code  = BPF_JMP | BPF_CALL,                    \
 358                .dst_reg = 0,                                   \
 359                .src_reg = BPF_PSEUDO_CALL,                     \
 360                .off   = 0,                                     \
 361                .imm   = TGT })
 362
 363/* Convert function address to BPF immediate */
 364
 365#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
 366
 367#define BPF_EMIT_CALL(FUNC)                                     \
 368        ((struct bpf_insn) {                                    \
 369                .code  = BPF_JMP | BPF_CALL,                    \
 370                .dst_reg = 0,                                   \
 371                .src_reg = 0,                                   \
 372                .off   = 0,                                     \
 373                .imm   = BPF_CALL_IMM(FUNC) })
 374
 375/* Raw code statement block */
 376
 377#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)                  \
 378        ((struct bpf_insn) {                                    \
 379                .code  = CODE,                                  \
 380                .dst_reg = DST,                                 \
 381                .src_reg = SRC,                                 \
 382                .off   = OFF,                                   \
 383                .imm   = IMM })
 384
 385/* Program exit */
 386
 387#define BPF_EXIT_INSN()                                         \
 388        ((struct bpf_insn) {                                    \
 389                .code  = BPF_JMP | BPF_EXIT,                    \
 390                .dst_reg = 0,                                   \
 391                .src_reg = 0,                                   \
 392                .off   = 0,                                     \
 393                .imm   = 0 })
 394
 395/* Speculation barrier */
 396
 397#define BPF_ST_NOSPEC()                                         \
 398        ((struct bpf_insn) {                                    \
 399                .code  = BPF_ST | BPF_NOSPEC,                   \
 400                .dst_reg = 0,                                   \
 401                .src_reg = 0,                                   \
 402                .off   = 0,                                     \
 403                .imm   = 0 })
 404
 405/* Internal classic blocks for direct assignment */
 406
 407#define __BPF_STMT(CODE, K)                                     \
 408        ((struct sock_filter) BPF_STMT(CODE, K))
 409
 410#define __BPF_JUMP(CODE, K, JT, JF)                             \
 411        ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
 412
 413#define bytes_to_bpf_size(bytes)                                \
 414({                                                              \
 415        int bpf_size = -EINVAL;                                 \
 416                                                                \
 417        if (bytes == sizeof(u8))                                \
 418                bpf_size = BPF_B;                               \
 419        else if (bytes == sizeof(u16))                          \
 420                bpf_size = BPF_H;                               \
 421        else if (bytes == sizeof(u32))                          \
 422                bpf_size = BPF_W;                               \
 423        else if (bytes == sizeof(u64))                          \
 424                bpf_size = BPF_DW;                              \
 425                                                                \
 426        bpf_size;                                               \
 427})
 428
 429#define bpf_size_to_bytes(bpf_size)                             \
 430({                                                              \
 431        int bytes = -EINVAL;                                    \
 432                                                                \
 433        if (bpf_size == BPF_B)                                  \
 434                bytes = sizeof(u8);                             \
 435        else if (bpf_size == BPF_H)                             \
 436                bytes = sizeof(u16);                            \
 437        else if (bpf_size == BPF_W)                             \
 438                bytes = sizeof(u32);                            \
 439        else if (bpf_size == BPF_DW)                            \
 440                bytes = sizeof(u64);                            \
 441                                                                \
 442        bytes;                                                  \
 443})
 444
 445#define BPF_SIZEOF(type)                                        \
 446        ({                                                      \
 447                const int __size = bytes_to_bpf_size(sizeof(type)); \
 448                BUILD_BUG_ON(__size < 0);                       \
 449                __size;                                         \
 450        })
 451
 452#define BPF_FIELD_SIZEOF(type, field)                           \
 453        ({                                                      \
 454                const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
 455                BUILD_BUG_ON(__size < 0);                       \
 456                __size;                                         \
 457        })
 458
 459#define BPF_LDST_BYTES(insn)                                    \
 460        ({                                                      \
 461                const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
 462                WARN_ON(__size < 0);                            \
 463                __size;                                         \
 464        })
 465
 466#define __BPF_MAP_0(m, v, ...) v
 467#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
 468#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
 469#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
 470#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
 471#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
 472
 473#define __BPF_REG_0(...) __BPF_PAD(5)
 474#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
 475#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
 476#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
 477#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
 478#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
 479
 480#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
 481#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
 482
 483#define __BPF_CAST(t, a)                                                       \
 484        (__force t)                                                            \
 485        (__force                                                               \
 486         typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long),      \
 487                                      (unsigned long)0, (t)0))) a
 488#define __BPF_V void
 489#define __BPF_N
 490
 491#define __BPF_DECL_ARGS(t, a) t   a
 492#define __BPF_DECL_REGS(t, a) u64 a
 493
 494#define __BPF_PAD(n)                                                           \
 495        __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
 496                  u64, __ur_3, u64, __ur_4, u64, __ur_5)
 497
 498#define BPF_CALL_x(x, name, ...)                                               \
 499        static __always_inline                                                 \
 500        u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
 501        typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
 502        u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));         \
 503        u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))          \
 504        {                                                                      \
 505                return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
 506        }                                                                      \
 507        static __always_inline                                                 \
 508        u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
 509
 510#define BPF_CALL_0(name, ...)   BPF_CALL_x(0, name, __VA_ARGS__)
 511#define BPF_CALL_1(name, ...)   BPF_CALL_x(1, name, __VA_ARGS__)
 512#define BPF_CALL_2(name, ...)   BPF_CALL_x(2, name, __VA_ARGS__)
 513#define BPF_CALL_3(name, ...)   BPF_CALL_x(3, name, __VA_ARGS__)
 514#define BPF_CALL_4(name, ...)   BPF_CALL_x(4, name, __VA_ARGS__)
 515#define BPF_CALL_5(name, ...)   BPF_CALL_x(5, name, __VA_ARGS__)
 516
 517#define bpf_ctx_range(TYPE, MEMBER)                                             \
 518        offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
 519#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)                              \
 520        offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
 521#if BITS_PER_LONG == 64
 522# define bpf_ctx_range_ptr(TYPE, MEMBER)                                        \
 523        offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
 524#else
 525# define bpf_ctx_range_ptr(TYPE, MEMBER)                                        \
 526        offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
 527#endif /* BITS_PER_LONG == 64 */
 528
 529#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)                            \
 530        ({                                                                      \
 531                BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE));             \
 532                *(PTR_SIZE) = (SIZE);                                           \
 533                offsetof(TYPE, MEMBER);                                         \
 534        })
 535
 536/* A struct sock_filter is architecture independent. */
 537struct compat_sock_fprog {
 538        u16             len;
 539        compat_uptr_t   filter; /* struct sock_filter * */
 540};
 541
 542struct sock_fprog_kern {
 543        u16                     len;
 544        struct sock_filter      *filter;
 545};
 546
 547/* Some arches need doubleword alignment for their instructions and/or data */
 548#define BPF_IMAGE_ALIGNMENT 8
 549
 550struct bpf_binary_header {
 551        u32 size;
 552        u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
 553};
 554
 555struct bpf_prog_stats {
 556        u64_stats_t cnt;
 557        u64_stats_t nsecs;
 558        u64_stats_t misses;
 559        struct u64_stats_sync syncp;
 560} __aligned(2 * sizeof(u64));
 561
 562struct bpf_prog {
 563        u16                     pages;          /* Number of allocated pages */
 564        u16                     jited:1,        /* Is our filter JIT'ed? */
 565                                jit_requested:1,/* archs need to JIT the prog */
 566                                gpl_compatible:1, /* Is filter GPL compatible? */
 567                                cb_access:1,    /* Is control block accessed? */
 568                                dst_needed:1,   /* Do we need dst entry? */
 569                                blinding_requested:1, /* needs constant blinding */
 570                                blinded:1,      /* Was blinded */
 571                                is_func:1,      /* program is a bpf function */
 572                                kprobe_override:1, /* Do we override a kprobe? */
 573                                has_callchain_buf:1, /* callchain buffer allocated? */
 574                                enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
 575                                call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
 576                                call_get_func_ip:1, /* Do we call get_func_ip() */
 577                                tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
 578        enum bpf_prog_type      type;           /* Type of BPF program */
 579        enum bpf_attach_type    expected_attach_type; /* For some prog types */
 580        u32                     len;            /* Number of filter blocks */
 581        u32                     jited_len;      /* Size of jited insns in bytes */
 582        u8                      tag[BPF_TAG_SIZE];
 583        struct bpf_prog_stats __percpu *stats;
 584        int __percpu            *active;
 585        unsigned int            (*bpf_func)(const void *ctx,
 586                                            const struct bpf_insn *insn);
 587        struct bpf_prog_aux     *aux;           /* Auxiliary fields */
 588        struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
 589        /* Instructions for interpreter */
 590        union {
 591                DECLARE_FLEX_ARRAY(struct sock_filter, insns);
 592                DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
 593        };
 594};
 595
 596struct sk_filter {
 597        refcount_t      refcnt;
 598        struct rcu_head rcu;
 599        struct bpf_prog *prog;
 600};
 601
 602DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 603
 604typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
 605                                          const struct bpf_insn *insnsi,
 606                                          unsigned int (*bpf_func)(const void *,
 607                                                                   const struct bpf_insn *));
 608
 609static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
 610                                          const void *ctx,
 611                                          bpf_dispatcher_fn dfunc)
 612{
 613        u32 ret;
 614
 615        cant_migrate();
 616        if (static_branch_unlikely(&bpf_stats_enabled_key)) {
 617                struct bpf_prog_stats *stats;
 618                u64 start = sched_clock();
 619                unsigned long flags;
 620
 621                ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
 622                stats = this_cpu_ptr(prog->stats);
 623                flags = u64_stats_update_begin_irqsave(&stats->syncp);
 624                u64_stats_inc(&stats->cnt);
 625                u64_stats_add(&stats->nsecs, sched_clock() - start);
 626                u64_stats_update_end_irqrestore(&stats->syncp, flags);
 627        } else {
 628                ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
 629        }
 630        return ret;
 631}
 632
 633static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
 634{
 635        return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
 636}
 637
 638/*
 639 * Use in preemptible and therefore migratable context to make sure that
 640 * the execution of the BPF program runs on one CPU.
 641 *
 642 * This uses migrate_disable/enable() explicitly to document that the
 643 * invocation of a BPF program does not require reentrancy protection
 644 * against a BPF program which is invoked from a preempting task.
 645 */
 646static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
 647                                          const void *ctx)
 648{
 649        u32 ret;
 650
 651        migrate_disable();
 652        ret = bpf_prog_run(prog, ctx);
 653        migrate_enable();
 654        return ret;
 655}
 656
 657#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 658
 659struct bpf_skb_data_end {
 660        struct qdisc_skb_cb qdisc_cb;
 661        void *data_meta;
 662        void *data_end;
 663};
 664
 665struct bpf_nh_params {
 666        u32 nh_family;
 667        union {
 668                u32 ipv4_nh;
 669                struct in6_addr ipv6_nh;
 670        };
 671};
 672
 673struct bpf_redirect_info {
 674        u32 flags;
 675        u32 tgt_index;
 676        void *tgt_value;
 677        struct bpf_map *map;
 678        u32 map_id;
 679        enum bpf_map_type map_type;
 680        u32 kern_flags;
 681        struct bpf_nh_params nh;
 682};
 683
 684DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
 685
 686/* flags for bpf_redirect_info kern_flags */
 687#define BPF_RI_F_RF_NO_DIRECT   BIT(0)  /* no napi_direct on return_frame */
 688
 689/* Compute the linear packet data range [data, data_end) which
 690 * will be accessed by various program types (cls_bpf, act_bpf,
 691 * lwt, ...). Subsystems allowing direct data access must (!)
 692 * ensure that cb[] area can be written to when BPF program is
 693 * invoked (otherwise cb[] save/restore is necessary).
 694 */
 695static inline void bpf_compute_data_pointers(struct sk_buff *skb)
 696{
 697        struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
 698
 699        BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
 700        cb->data_meta = skb->data - skb_metadata_len(skb);
 701        cb->data_end  = skb->data + skb_headlen(skb);
 702}
 703
 704/* Similar to bpf_compute_data_pointers(), except that save orginal
 705 * data in cb->data and cb->meta_data for restore.
 706 */
 707static inline void bpf_compute_and_save_data_end(
 708        struct sk_buff *skb, void **saved_data_end)
 709{
 710        struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
 711
 712        *saved_data_end = cb->data_end;
 713        cb->data_end  = skb->data + skb_headlen(skb);
 714}
 715
 716/* Restore data saved by bpf_compute_data_pointers(). */
 717static inline void bpf_restore_data_end(
 718        struct sk_buff *skb, void *saved_data_end)
 719{
 720        struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
 721
 722        cb->data_end = saved_data_end;
 723}
 724
 725static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
 726{
 727        /* eBPF programs may read/write skb->cb[] area to transfer meta
 728         * data between tail calls. Since this also needs to work with
 729         * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
 730         *
 731         * In some socket filter cases, the cb unfortunately needs to be
 732         * saved/restored so that protocol specific skb->cb[] data won't
 733         * be lost. In any case, due to unpriviledged eBPF programs
 734         * attached to sockets, we need to clear the bpf_skb_cb() area
 735         * to not leak previous contents to user space.
 736         */
 737        BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
 738        BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
 739                     sizeof_field(struct qdisc_skb_cb, data));
 740
 741        return qdisc_skb_cb(skb)->data;
 742}
 743
 744/* Must be invoked with migration disabled */
 745static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
 746                                         const void *ctx)
 747{
 748        const struct sk_buff *skb = ctx;
 749        u8 *cb_data = bpf_skb_cb(skb);
 750        u8 cb_saved[BPF_SKB_CB_LEN];
 751        u32 res;
 752
 753        if (unlikely(prog->cb_access)) {
 754                memcpy(cb_saved, cb_data, sizeof(cb_saved));
 755                memset(cb_data, 0, sizeof(cb_saved));
 756        }
 757
 758        res = bpf_prog_run(prog, skb);
 759
 760        if (unlikely(prog->cb_access))
 761                memcpy(cb_data, cb_saved, sizeof(cb_saved));
 762
 763        return res;
 764}
 765
 766static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
 767                                       struct sk_buff *skb)
 768{
 769        u32 res;
 770
 771        migrate_disable();
 772        res = __bpf_prog_run_save_cb(prog, skb);
 773        migrate_enable();
 774        return res;
 775}
 776
 777static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
 778                                        struct sk_buff *skb)
 779{
 780        u8 *cb_data = bpf_skb_cb(skb);
 781        u32 res;
 782
 783        if (unlikely(prog->cb_access))
 784                memset(cb_data, 0, BPF_SKB_CB_LEN);
 785
 786        res = bpf_prog_run_pin_on_cpu(prog, skb);
 787        return res;
 788}
 789
 790DECLARE_BPF_DISPATCHER(xdp)
 791
 792DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
 793
 794u32 xdp_master_redirect(struct xdp_buff *xdp);
 795
 796static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
 797                                            struct xdp_buff *xdp)
 798{
 799        /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
 800         * under local_bh_disable(), which provides the needed RCU protection
 801         * for accessing map entries.
 802         */
 803        u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
 804
 805        if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
 806                if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
 807                        act = xdp_master_redirect(xdp);
 808        }
 809
 810        return act;
 811}
 812
 813void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
 814
 815static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
 816{
 817        return prog->len * sizeof(struct bpf_insn);
 818}
 819
 820static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
 821{
 822        return round_up(bpf_prog_insn_size(prog) +
 823                        sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
 824}
 825
 826static inline unsigned int bpf_prog_size(unsigned int proglen)
 827{
 828        return max(sizeof(struct bpf_prog),
 829                   offsetof(struct bpf_prog, insns[proglen]));
 830}
 831
 832static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
 833{
 834        /* When classic BPF programs have been loaded and the arch
 835         * does not have a classic BPF JIT (anymore), they have been
 836         * converted via bpf_migrate_filter() to eBPF and thus always
 837         * have an unspec program type.
 838         */
 839        return prog->type == BPF_PROG_TYPE_UNSPEC;
 840}
 841
 842static inline u32 bpf_ctx_off_adjust_machine(u32 size)
 843{
 844        const u32 size_machine = sizeof(unsigned long);
 845
 846        if (size > size_machine && size % size_machine == 0)
 847                size = size_machine;
 848
 849        return size;
 850}
 851
 852static inline bool
 853bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
 854{
 855        return size <= size_default && (size & (size - 1)) == 0;
 856}
 857
 858static inline u8
 859bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
 860{
 861        u8 access_off = off & (size_default - 1);
 862
 863#ifdef __LITTLE_ENDIAN
 864        return access_off;
 865#else
 866        return size_default - (access_off + size);
 867#endif
 868}
 869
 870#define bpf_ctx_wide_access_ok(off, size, type, field)                  \
 871        (size == sizeof(__u64) &&                                       \
 872        off >= offsetof(type, field) &&                                 \
 873        off + sizeof(__u64) <= offsetofend(type, field) &&              \
 874        off % sizeof(__u64) == 0)
 875
 876#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 877
 878static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 879{
 880#ifndef CONFIG_BPF_JIT_ALWAYS_ON
 881        if (!fp->jited) {
 882                set_vm_flush_reset_perms(fp);
 883                set_memory_ro((unsigned long)fp, fp->pages);
 884        }
 885#endif
 886}
 887
 888static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 889{
 890        set_vm_flush_reset_perms(hdr);
 891        set_memory_ro((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
 892        set_memory_x((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
 893}
 894
 895int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
 896static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
 897{
 898        return sk_filter_trim_cap(sk, skb, 1);
 899}
 900
 901struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
 902void bpf_prog_free(struct bpf_prog *fp);
 903
 904bool bpf_opcode_in_insntable(u8 code);
 905
 906void bpf_prog_free_linfo(struct bpf_prog *prog);
 907void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
 908                               const u32 *insn_to_jit_off);
 909int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
 910void bpf_prog_jit_attempt_done(struct bpf_prog *prog);
 911
 912struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
 913struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
 914struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 915                                  gfp_t gfp_extra_flags);
 916void __bpf_prog_free(struct bpf_prog *fp);
 917
 918static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
 919{
 920        __bpf_prog_free(fp);
 921}
 922
 923typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
 924                                       unsigned int flen);
 925
 926int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
 927int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
 928                              bpf_aux_classic_check_t trans, bool save_orig);
 929void bpf_prog_destroy(struct bpf_prog *fp);
 930
 931int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 932int sk_attach_bpf(u32 ufd, struct sock *sk);
 933int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 934int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
 935void sk_reuseport_prog_free(struct bpf_prog *prog);
 936int sk_detach_filter(struct sock *sk);
 937int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
 938                  unsigned int len);
 939
 940bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
 941void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 942
 943u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 944#define __bpf_call_base_args \
 945        ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
 946         (void *)__bpf_call_base)
 947
 948struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 949void bpf_jit_compile(struct bpf_prog *prog);
 950bool bpf_jit_needs_zext(void);
 951bool bpf_jit_supports_kfunc_call(void);
 952bool bpf_helper_changes_pkt_data(void *func);
 953
 954static inline bool bpf_dump_raw_ok(const struct cred *cred)
 955{
 956        /* Reconstruction of call-sites is dependent on kallsyms,
 957         * thus make dump the same restriction.
 958         */
 959        return kallsyms_show_value(cred);
 960}
 961
 962struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 963                                       const struct bpf_insn *patch, u32 len);
 964int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
 965
 966void bpf_clear_redirect_map(struct bpf_map *map);
 967
 968static inline bool xdp_return_frame_no_direct(void)
 969{
 970        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
 971
 972        return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
 973}
 974
 975static inline void xdp_set_return_frame_no_direct(void)
 976{
 977        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
 978
 979        ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
 980}
 981
 982static inline void xdp_clear_return_frame_no_direct(void)
 983{
 984        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
 985
 986        ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
 987}
 988
 989static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
 990                                 unsigned int pktlen)
 991{
 992        unsigned int len;
 993
 994        if (unlikely(!(fwd->flags & IFF_UP)))
 995                return -ENETDOWN;
 996
 997        len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
 998        if (pktlen > len)
 999                return -EMSGSIZE;
1000
1001        return 0;
1002}
1003
1004/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
1005 * same cpu context. Further for best results no more than a single map
1006 * for the do_redirect/do_flush pair should be used. This limitation is
1007 * because we only track one map and force a flush when the map changes.
1008 * This does not appear to be a real limitation for existing software.
1009 */
1010int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
1011                            struct xdp_buff *xdp, struct bpf_prog *prog);
1012int xdp_do_redirect(struct net_device *dev,
1013                    struct xdp_buff *xdp,
1014                    struct bpf_prog *prog);
1015int xdp_do_redirect_frame(struct net_device *dev,
1016                          struct xdp_buff *xdp,
1017                          struct xdp_frame *xdpf,
1018                          struct bpf_prog *prog);
1019void xdp_do_flush(void);
1020
1021/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
1022 * it is no longer only flushing maps. Keep this define for compatibility
1023 * until all drivers are updated - do not use xdp_do_flush_map() in new code!
1024 */
1025#define xdp_do_flush_map xdp_do_flush
1026
1027void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
1028
1029#ifdef CONFIG_INET
1030struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
1031                                  struct bpf_prog *prog, struct sk_buff *skb,
1032                                  struct sock *migrating_sk,
1033                                  u32 hash);
1034#else
1035static inline struct sock *
1036bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
1037                     struct bpf_prog *prog, struct sk_buff *skb,
1038                     struct sock *migrating_sk,
1039                     u32 hash)
1040{
1041        return NULL;
1042}
1043#endif
1044
1045#ifdef CONFIG_BPF_JIT
1046extern int bpf_jit_enable;
1047extern int bpf_jit_harden;
1048extern int bpf_jit_kallsyms;
1049extern long bpf_jit_limit;
1050extern long bpf_jit_limit_max;
1051
1052typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
1053
1054struct bpf_binary_header *
1055bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1056                     unsigned int alignment,
1057                     bpf_jit_fill_hole_t bpf_fill_ill_insns);
1058void bpf_jit_binary_free(struct bpf_binary_header *hdr);
1059u64 bpf_jit_alloc_exec_limit(void);
1060void *bpf_jit_alloc_exec(unsigned long size);
1061void bpf_jit_free_exec(void *addr);
1062void bpf_jit_free(struct bpf_prog *fp);
1063
1064struct bpf_binary_header *
1065bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
1066                          unsigned int alignment,
1067                          struct bpf_binary_header **rw_hdr,
1068                          u8 **rw_image,
1069                          bpf_jit_fill_hole_t bpf_fill_ill_insns);
1070int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1071                                 struct bpf_binary_header *ro_header,
1072                                 struct bpf_binary_header *rw_header);
1073void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1074                              struct bpf_binary_header *rw_header);
1075
1076int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
1077                                struct bpf_jit_poke_descriptor *poke);
1078
1079int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1080                          const struct bpf_insn *insn, bool extra_pass,
1081                          u64 *func_addr, bool *func_addr_fixed);
1082
1083struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
1084void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
1085
1086static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
1087                                u32 pass, void *image)
1088{
1089        pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
1090               proglen, pass, image, current->comm, task_pid_nr(current));
1091
1092        if (image)
1093                print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
1094                               16, 1, image, proglen, false);
1095}
1096
1097static inline bool bpf_jit_is_ebpf(void)
1098{
1099# ifdef CONFIG_HAVE_EBPF_JIT
1100        return true;
1101# else
1102        return false;
1103# endif
1104}
1105
1106static inline bool ebpf_jit_enabled(void)
1107{
1108        return bpf_jit_enable && bpf_jit_is_ebpf();
1109}
1110
1111static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1112{
1113        return fp->jited && bpf_jit_is_ebpf();
1114}
1115
1116static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1117{
1118        /* These are the prerequisites, should someone ever have the
1119         * idea to call blinding outside of them, we make sure to
1120         * bail out.
1121         */
1122        if (!bpf_jit_is_ebpf())
1123                return false;
1124        if (!prog->jit_requested)
1125                return false;
1126        if (!bpf_jit_harden)
1127                return false;
1128        if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
1129                return false;
1130
1131        return true;
1132}
1133
1134static inline bool bpf_jit_kallsyms_enabled(void)
1135{
1136        /* There are a couple of corner cases where kallsyms should
1137         * not be enabled f.e. on hardening.
1138         */
1139        if (bpf_jit_harden)
1140                return false;
1141        if (!bpf_jit_kallsyms)
1142                return false;
1143        if (bpf_jit_kallsyms == 1)
1144                return true;
1145
1146        return false;
1147}
1148
1149const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
1150                                 unsigned long *off, char *sym);
1151bool is_bpf_text_address(unsigned long addr);
1152int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
1153                    char *sym);
1154
1155static inline const char *
1156bpf_address_lookup(unsigned long addr, unsigned long *size,
1157                   unsigned long *off, char **modname, char *sym)
1158{
1159        const char *ret = __bpf_address_lookup(addr, size, off, sym);
1160
1161        if (ret && modname)
1162                *modname = NULL;
1163        return ret;
1164}
1165
1166void bpf_prog_kallsyms_add(struct bpf_prog *fp);
1167void bpf_prog_kallsyms_del(struct bpf_prog *fp);
1168
1169#else /* CONFIG_BPF_JIT */
1170
1171static inline bool ebpf_jit_enabled(void)
1172{
1173        return false;
1174}
1175
1176static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1177{
1178        return false;
1179}
1180
1181static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1182{
1183        return false;
1184}
1185
1186static inline int
1187bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
1188                            struct bpf_jit_poke_descriptor *poke)
1189{
1190        return -ENOTSUPP;
1191}
1192
1193static inline void bpf_jit_free(struct bpf_prog *fp)
1194{
1195        bpf_prog_unlock_free(fp);
1196}
1197
1198static inline bool bpf_jit_kallsyms_enabled(void)
1199{
1200        return false;
1201}
1202
1203static inline const char *
1204__bpf_address_lookup(unsigned long addr, unsigned long *size,
1205                     unsigned long *off, char *sym)
1206{
1207        return NULL;
1208}
1209
1210static inline bool is_bpf_text_address(unsigned long addr)
1211{
1212        return false;
1213}
1214
1215static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
1216                                  char *type, char *sym)
1217{
1218        return -ERANGE;
1219}
1220
1221static inline const char *
1222bpf_address_lookup(unsigned long addr, unsigned long *size,
1223                   unsigned long *off, char **modname, char *sym)
1224{
1225        return NULL;
1226}
1227
1228static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
1229{
1230}
1231
1232static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
1233{
1234}
1235
1236#endif /* CONFIG_BPF_JIT */
1237
1238void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
1239
1240#define BPF_ANC         BIT(15)
1241
1242static inline bool bpf_needs_clear_a(const struct sock_filter *first)
1243{
1244        switch (first->code) {
1245        case BPF_RET | BPF_K:
1246        case BPF_LD | BPF_W | BPF_LEN:
1247                return false;
1248
1249        case BPF_LD | BPF_W | BPF_ABS:
1250        case BPF_LD | BPF_H | BPF_ABS:
1251        case BPF_LD | BPF_B | BPF_ABS:
1252                if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
1253                        return true;
1254                return false;
1255
1256        default:
1257                return true;
1258        }
1259}
1260
1261static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
1262{
1263        BUG_ON(ftest->code & BPF_ANC);
1264
1265        switch (ftest->code) {
1266        case BPF_LD | BPF_W | BPF_ABS:
1267        case BPF_LD | BPF_H | BPF_ABS:
1268        case BPF_LD | BPF_B | BPF_ABS:
1269#define BPF_ANCILLARY(CODE)     case SKF_AD_OFF + SKF_AD_##CODE:        \
1270                                return BPF_ANC | SKF_AD_##CODE
1271                switch (ftest->k) {
1272                BPF_ANCILLARY(PROTOCOL);
1273                BPF_ANCILLARY(PKTTYPE);
1274                BPF_ANCILLARY(IFINDEX);
1275                BPF_ANCILLARY(NLATTR);
1276                BPF_ANCILLARY(NLATTR_NEST);
1277                BPF_ANCILLARY(MARK);
1278                BPF_ANCILLARY(QUEUE);
1279                BPF_ANCILLARY(HATYPE);
1280                BPF_ANCILLARY(RXHASH);
1281                BPF_ANCILLARY(CPU);
1282                BPF_ANCILLARY(ALU_XOR_X);
1283                BPF_ANCILLARY(VLAN_TAG);
1284                BPF_ANCILLARY(VLAN_TAG_PRESENT);
1285                BPF_ANCILLARY(PAY_OFFSET);
1286                BPF_ANCILLARY(RANDOM);
1287                BPF_ANCILLARY(VLAN_TPID);
1288                }
1289                fallthrough;
1290        default:
1291                return ftest->code;
1292        }
1293}
1294
1295void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
1296                                           int k, unsigned int size);
1297
1298static inline int bpf_tell_extensions(void)
1299{
1300        return SKF_AD_MAX;
1301}
1302
1303struct bpf_sock_addr_kern {
1304        struct sock *sk;
1305        struct sockaddr *uaddr;
1306        /* Temporary "register" to make indirect stores to nested structures
1307         * defined above. We need three registers to make such a store, but
1308         * only two (src and dst) are available at convert_ctx_access time
1309         */
1310        u64 tmp_reg;
1311        void *t_ctx;    /* Attach type specific context. */
1312};
1313
1314struct bpf_sock_ops_kern {
1315        struct  sock *sk;
1316        union {
1317                u32 args[4];
1318                u32 reply;
1319                u32 replylong[4];
1320        };
1321        struct sk_buff  *syn_skb;
1322        struct sk_buff  *skb;
1323        void    *skb_data_end;
1324        u8      op;
1325        u8      is_fullsock;
1326        u8      remaining_opt_len;
1327        u64     temp;                   /* temp and everything after is not
1328                                         * initialized to 0 before calling
1329                                         * the BPF program. New fields that
1330                                         * should be initialized to 0 should
1331                                         * be inserted before temp.
1332                                         * temp is scratch storage used by
1333                                         * sock_ops_convert_ctx_access
1334                                         * as temporary storage of a register.
1335                                         */
1336};
1337
1338struct bpf_sysctl_kern {
1339        struct ctl_table_header *head;
1340        struct ctl_table *table;
1341        void *cur_val;
1342        size_t cur_len;
1343        void *new_val;
1344        size_t new_len;
1345        int new_updated;
1346        int write;
1347        loff_t *ppos;
1348        /* Temporary "register" for indirect stores to ppos. */
1349        u64 tmp_reg;
1350};
1351
1352#define BPF_SOCKOPT_KERN_BUF_SIZE       32
1353struct bpf_sockopt_buf {
1354        u8              data[BPF_SOCKOPT_KERN_BUF_SIZE];
1355};
1356
1357struct bpf_sockopt_kern {
1358        struct sock     *sk;
1359        u8              *optval;
1360        u8              *optval_end;
1361        s32             level;
1362        s32             optname;
1363        s32             optlen;
1364        /* for retval in struct bpf_cg_run_ctx */
1365        struct task_struct *current_task;
1366        /* Temporary "register" for indirect stores to ppos. */
1367        u64             tmp_reg;
1368};
1369
1370int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
1371
1372struct bpf_sk_lookup_kern {
1373        u16             family;
1374        u16             protocol;
1375        __be16          sport;
1376        u16             dport;
1377        struct {
1378                __be32 saddr;
1379                __be32 daddr;
1380        } v4;
1381        struct {
1382                const struct in6_addr *saddr;
1383                const struct in6_addr *daddr;
1384        } v6;
1385        struct sock     *selected_sk;
1386        u32             ingress_ifindex;
1387        bool            no_reuseport;
1388};
1389
1390extern struct static_key_false bpf_sk_lookup_enabled;
1391
1392/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup.
1393 *
1394 * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and
1395 * SK_DROP. Their meaning is as follows:
1396 *
1397 *  SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result
1398 *  SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup
1399 *  SK_DROP                           : terminate lookup with -ECONNREFUSED
1400 *
1401 * This macro aggregates return values and selected sockets from
1402 * multiple BPF programs according to following rules in order:
1403 *
1404 *  1. If any program returned SK_PASS and a non-NULL ctx.selected_sk,
1405 *     macro result is SK_PASS and last ctx.selected_sk is used.
1406 *  2. If any program returned SK_DROP return value,
1407 *     macro result is SK_DROP.
1408 *  3. Otherwise result is SK_PASS and ctx.selected_sk is NULL.
1409 *
1410 * Caller must ensure that the prog array is non-NULL, and that the
1411 * array as well as the programs it contains remain valid.
1412 */
1413#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func)                  \
1414        ({                                                              \
1415                struct bpf_sk_lookup_kern *_ctx = &(ctx);               \
1416                struct bpf_prog_array_item *_item;                      \
1417                struct sock *_selected_sk = NULL;                       \
1418                bool _no_reuseport = false;                             \
1419                struct bpf_prog *_prog;                                 \
1420                bool _all_pass = true;                                  \
1421                u32 _ret;                                               \
1422                                                                        \
1423                migrate_disable();                                      \
1424                _item = &(array)->items[0];                             \
1425                while ((_prog = READ_ONCE(_item->prog))) {              \
1426                        /* restore most recent selection */             \
1427                        _ctx->selected_sk = _selected_sk;               \
1428                        _ctx->no_reuseport = _no_reuseport;             \
1429                                                                        \
1430                        _ret = func(_prog, _ctx);                       \
1431                        if (_ret == SK_PASS && _ctx->selected_sk) {     \
1432                                /* remember last non-NULL socket */     \
1433                                _selected_sk = _ctx->selected_sk;       \
1434                                _no_reuseport = _ctx->no_reuseport;     \
1435                        } else if (_ret == SK_DROP && _all_pass) {      \
1436                                _all_pass = false;                      \
1437                        }                                               \
1438                        _item++;                                        \
1439                }                                                       \
1440                _ctx->selected_sk = _selected_sk;                       \
1441                _ctx->no_reuseport = _no_reuseport;                     \
1442                migrate_enable();                                       \
1443                _all_pass || _selected_sk ? SK_PASS : SK_DROP;          \
1444         })
1445
1446static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
1447                                        const __be32 saddr, const __be16 sport,
1448                                        const __be32 daddr, const u16 dport,
1449                                        const int ifindex, struct sock **psk)
1450{
1451        struct bpf_prog_array *run_array;
1452        struct sock *selected_sk = NULL;
1453        bool no_reuseport = false;
1454
1455        rcu_read_lock();
1456        run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
1457        if (run_array) {
1458                struct bpf_sk_lookup_kern ctx = {
1459                        .family         = AF_INET,
1460                        .protocol       = protocol,
1461                        .v4.saddr       = saddr,
1462                        .v4.daddr       = daddr,
1463                        .sport          = sport,
1464                        .dport          = dport,
1465                        .ingress_ifindex        = ifindex,
1466                };
1467                u32 act;
1468
1469                act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
1470                if (act == SK_PASS) {
1471                        selected_sk = ctx.selected_sk;
1472                        no_reuseport = ctx.no_reuseport;
1473                } else {
1474                        selected_sk = ERR_PTR(-ECONNREFUSED);
1475                }
1476        }
1477        rcu_read_unlock();
1478        *psk = selected_sk;
1479        return no_reuseport;
1480}
1481
1482#if IS_ENABLED(CONFIG_IPV6)
1483static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
1484                                        const struct in6_addr *saddr,
1485                                        const __be16 sport,
1486                                        const struct in6_addr *daddr,
1487                                        const u16 dport,
1488                                        const int ifindex, struct sock **psk)
1489{
1490        struct bpf_prog_array *run_array;
1491        struct sock *selected_sk = NULL;
1492        bool no_reuseport = false;
1493
1494        rcu_read_lock();
1495        run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
1496        if (run_array) {
1497                struct bpf_sk_lookup_kern ctx = {
1498                        .family         = AF_INET6,
1499                        .protocol       = protocol,
1500                        .v6.saddr       = saddr,
1501                        .v6.daddr       = daddr,
1502                        .sport          = sport,
1503                        .dport          = dport,
1504                        .ingress_ifindex        = ifindex,
1505                };
1506                u32 act;
1507
1508                act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
1509                if (act == SK_PASS) {
1510                        selected_sk = ctx.selected_sk;
1511                        no_reuseport = ctx.no_reuseport;
1512                } else {
1513                        selected_sk = ERR_PTR(-ECONNREFUSED);
1514                }
1515        }
1516        rcu_read_unlock();
1517        *psk = selected_sk;
1518        return no_reuseport;
1519}
1520#endif /* IS_ENABLED(CONFIG_IPV6) */
1521
1522static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex,
1523                                                  u64 flags, const u64 flag_mask,
1524                                                  void *lookup_elem(struct bpf_map *map, u32 key))
1525{
1526        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
1527        const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
1528
1529        /* Lower bits of the flags are used as return code on lookup failure */
1530        if (unlikely(flags & ~(action_mask | flag_mask)))
1531                return XDP_ABORTED;
1532
1533        ri->tgt_value = lookup_elem(map, ifindex);
1534        if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
1535                /* If the lookup fails we want to clear out the state in the
1536                 * redirect_info struct completely, so that if an eBPF program
1537                 * performs multiple lookups, the last one always takes
1538                 * precedence.
1539                 */
1540                ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
1541                ri->map_type = BPF_MAP_TYPE_UNSPEC;
1542                return flags & action_mask;
1543        }
1544
1545        ri->tgt_index = ifindex;
1546        ri->map_id = map->id;
1547        ri->map_type = map->map_type;
1548
1549        if (flags & BPF_F_BROADCAST) {
1550                WRITE_ONCE(ri->map, map);
1551                ri->flags = flags;
1552        } else {
1553                WRITE_ONCE(ri->map, NULL);
1554                ri->flags = 0;
1555        }
1556
1557        return XDP_REDIRECT;
1558}
1559
1560#endif /* __LINUX_FILTER_H__ */
1561