linux/include/linux/filter.h
<<
>>
Prefs
   1/*
   2 * Linux Socket Filter Data Structures
   3 */
   4#ifndef __LINUX_FILTER_H__
   5#define __LINUX_FILTER_H__
   6
   7#include <stdarg.h>
   8
   9#include <linux/atomic.h>
  10#include <linux/compat.h>
  11#include <linux/skbuff.h>
  12#include <linux/linkage.h>
  13#include <linux/printk.h>
  14#include <linux/workqueue.h>
  15#include <linux/sched.h>
  16#include <net/sch_generic.h>
  17
  18#include <asm/cacheflush.h>
  19
  20#include <uapi/linux/filter.h>
  21#include <uapi/linux/bpf.h>
  22
  23struct sk_buff;
  24struct sock;
  25struct seccomp_data;
  26struct bpf_prog_aux;
  27
  28/* ArgX, context and stack frame pointer register positions. Note,
  29 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
  30 * calls in BPF_CALL instruction.
  31 */
  32#define BPF_REG_ARG1    BPF_REG_1
  33#define BPF_REG_ARG2    BPF_REG_2
  34#define BPF_REG_ARG3    BPF_REG_3
  35#define BPF_REG_ARG4    BPF_REG_4
  36#define BPF_REG_ARG5    BPF_REG_5
  37#define BPF_REG_CTX     BPF_REG_6
  38#define BPF_REG_FP      BPF_REG_10
  39
  40/* Additional register mappings for converted user programs. */
  41#define BPF_REG_A       BPF_REG_0
  42#define BPF_REG_X       BPF_REG_7
  43#define BPF_REG_TMP     BPF_REG_8
  44
  45/* BPF program can access up to 512 bytes of stack space. */
  46#define MAX_BPF_STACK   512
  47
  48/* Helper macros for filter block array initializers. */
  49
  50/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
  51
  52#define BPF_ALU64_REG(OP, DST, SRC)                             \
  53        ((struct bpf_insn) {                                    \
  54                .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,        \
  55                .dst_reg = DST,                                 \
  56                .src_reg = SRC,                                 \
  57                .off   = 0,                                     \
  58                .imm   = 0 })
  59
  60#define BPF_ALU32_REG(OP, DST, SRC)                             \
  61        ((struct bpf_insn) {                                    \
  62                .code  = BPF_ALU | BPF_OP(OP) | BPF_X,          \
  63                .dst_reg = DST,                                 \
  64                .src_reg = SRC,                                 \
  65                .off   = 0,                                     \
  66                .imm   = 0 })
  67
  68/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
  69
  70#define BPF_ALU64_IMM(OP, DST, IMM)                             \
  71        ((struct bpf_insn) {                                    \
  72                .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,        \
  73                .dst_reg = DST,                                 \
  74                .src_reg = 0,                                   \
  75                .off   = 0,                                     \
  76                .imm   = IMM })
  77
  78#define BPF_ALU32_IMM(OP, DST, IMM)                             \
  79        ((struct bpf_insn) {                                    \
  80                .code  = BPF_ALU | BPF_OP(OP) | BPF_K,          \
  81                .dst_reg = DST,                                 \
  82                .src_reg = 0,                                   \
  83                .off   = 0,                                     \
  84                .imm   = IMM })
  85
  86/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
  87
  88#define BPF_ENDIAN(TYPE, DST, LEN)                              \
  89        ((struct bpf_insn) {                                    \
  90                .code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),     \
  91                .dst_reg = DST,                                 \
  92                .src_reg = 0,                                   \
  93                .off   = 0,                                     \
  94                .imm   = LEN })
  95
  96/* Short form of mov, dst_reg = src_reg */
  97
  98#define BPF_MOV64_REG(DST, SRC)                                 \
  99        ((struct bpf_insn) {                                    \
 100                .code  = BPF_ALU64 | BPF_MOV | BPF_X,           \
 101                .dst_reg = DST,                                 \
 102                .src_reg = SRC,                                 \
 103                .off   = 0,                                     \
 104                .imm   = 0 })
 105
 106#define BPF_MOV32_REG(DST, SRC)                                 \
 107        ((struct bpf_insn) {                                    \
 108                .code  = BPF_ALU | BPF_MOV | BPF_X,             \
 109                .dst_reg = DST,                                 \
 110                .src_reg = SRC,                                 \
 111                .off   = 0,                                     \
 112                .imm   = 0 })
 113
 114/* Short form of mov, dst_reg = imm32 */
 115
 116#define BPF_MOV64_IMM(DST, IMM)                                 \
 117        ((struct bpf_insn) {                                    \
 118                .code  = BPF_ALU64 | BPF_MOV | BPF_K,           \
 119                .dst_reg = DST,                                 \
 120                .src_reg = 0,                                   \
 121                .off   = 0,                                     \
 122                .imm   = IMM })
 123
 124#define BPF_MOV32_IMM(DST, IMM)                                 \
 125        ((struct bpf_insn) {                                    \
 126                .code  = BPF_ALU | BPF_MOV | BPF_K,             \
 127                .dst_reg = DST,                                 \
 128                .src_reg = 0,                                   \
 129                .off   = 0,                                     \
 130                .imm   = IMM })
 131
 132/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
 133#define BPF_LD_IMM64(DST, IMM)                                  \
 134        BPF_LD_IMM64_RAW(DST, 0, IMM)
 135
 136#define BPF_LD_IMM64_RAW(DST, SRC, IMM)                         \
 137        ((struct bpf_insn) {                                    \
 138                .code  = BPF_LD | BPF_DW | BPF_IMM,             \
 139                .dst_reg = DST,                                 \
 140                .src_reg = SRC,                                 \
 141                .off   = 0,                                     \
 142                .imm   = (__u32) (IMM) }),                      \
 143        ((struct bpf_insn) {                                    \
 144                .code  = 0, /* zero is reserved opcode */       \
 145                .dst_reg = 0,                                   \
 146                .src_reg = 0,                                   \
 147                .off   = 0,                                     \
 148                .imm   = ((__u64) (IMM)) >> 32 })
 149
 150/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
 151#define BPF_LD_MAP_FD(DST, MAP_FD)                              \
 152        BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
 153
 154/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
 155
 156#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)                      \
 157        ((struct bpf_insn) {                                    \
 158                .code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),   \
 159                .dst_reg = DST,                                 \
 160                .src_reg = SRC,                                 \
 161                .off   = 0,                                     \
 162                .imm   = IMM })
 163
 164#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)                      \
 165        ((struct bpf_insn) {                                    \
 166                .code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),     \
 167                .dst_reg = DST,                                 \
 168                .src_reg = SRC,                                 \
 169                .off   = 0,                                     \
 170                .imm   = IMM })
 171
 172/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
 173
 174#define BPF_LD_ABS(SIZE, IMM)                                   \
 175        ((struct bpf_insn) {                                    \
 176                .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,     \
 177                .dst_reg = 0,                                   \
 178                .src_reg = 0,                                   \
 179                .off   = 0,                                     \
 180                .imm   = IMM })
 181
 182/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
 183
 184#define BPF_LD_IND(SIZE, SRC, IMM)                              \
 185        ((struct bpf_insn) {                                    \
 186                .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,     \
 187                .dst_reg = 0,                                   \
 188                .src_reg = SRC,                                 \
 189                .off   = 0,                                     \
 190                .imm   = IMM })
 191
 192/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
 193
 194#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)                        \
 195        ((struct bpf_insn) {                                    \
 196                .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
 197                .dst_reg = DST,                                 \
 198                .src_reg = SRC,                                 \
 199                .off   = OFF,                                   \
 200                .imm   = 0 })
 201
 202/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
 203
 204#define BPF_STX_MEM(SIZE, DST, SRC, OFF)                        \
 205        ((struct bpf_insn) {                                    \
 206                .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
 207                .dst_reg = DST,                                 \
 208                .src_reg = SRC,                                 \
 209                .off   = OFF,                                   \
 210                .imm   = 0 })
 211
 212/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
 213
 214#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                       \
 215        ((struct bpf_insn) {                                    \
 216                .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
 217                .dst_reg = DST,                                 \
 218                .src_reg = SRC,                                 \
 219                .off   = OFF,                                   \
 220                .imm   = 0 })
 221
 222/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 223
 224#define BPF_ST_MEM(SIZE, DST, OFF, IMM)                         \
 225        ((struct bpf_insn) {                                    \
 226                .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,     \
 227                .dst_reg = DST,                                 \
 228                .src_reg = 0,                                   \
 229                .off   = OFF,                                   \
 230                .imm   = IMM })
 231
 232/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
 233
 234#define BPF_JMP_REG(OP, DST, SRC, OFF)                          \
 235        ((struct bpf_insn) {                                    \
 236                .code  = BPF_JMP | BPF_OP(OP) | BPF_X,          \
 237                .dst_reg = DST,                                 \
 238                .src_reg = SRC,                                 \
 239                .off   = OFF,                                   \
 240                .imm   = 0 })
 241
 242/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
 243
 244#define BPF_JMP_IMM(OP, DST, IMM, OFF)                          \
 245        ((struct bpf_insn) {                                    \
 246                .code  = BPF_JMP | BPF_OP(OP) | BPF_K,          \
 247                .dst_reg = DST,                                 \
 248                .src_reg = 0,                                   \
 249                .off   = OFF,                                   \
 250                .imm   = IMM })
 251
 252/* Function call */
 253
 254#define BPF_EMIT_CALL(FUNC)                                     \
 255        ((struct bpf_insn) {                                    \
 256                .code  = BPF_JMP | BPF_CALL,                    \
 257                .dst_reg = 0,                                   \
 258                .src_reg = 0,                                   \
 259                .off   = 0,                                     \
 260                .imm   = ((FUNC) - __bpf_call_base) })
 261
 262/* Raw code statement block */
 263
 264#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)                  \
 265        ((struct bpf_insn) {                                    \
 266                .code  = CODE,                                  \
 267                .dst_reg = DST,                                 \
 268                .src_reg = SRC,                                 \
 269                .off   = OFF,                                   \
 270                .imm   = IMM })
 271
 272/* Program exit */
 273
 274#define BPF_EXIT_INSN()                                         \
 275        ((struct bpf_insn) {                                    \
 276                .code  = BPF_JMP | BPF_EXIT,                    \
 277                .dst_reg = 0,                                   \
 278                .src_reg = 0,                                   \
 279                .off   = 0,                                     \
 280                .imm   = 0 })
 281
 282/* Internal classic blocks for direct assignment */
 283
 284#define __BPF_STMT(CODE, K)                                     \
 285        ((struct sock_filter) BPF_STMT(CODE, K))
 286
 287#define __BPF_JUMP(CODE, K, JT, JF)                             \
 288        ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
 289
 290#define bytes_to_bpf_size(bytes)                                \
 291({                                                              \
 292        int bpf_size = -EINVAL;                                 \
 293                                                                \
 294        if (bytes == sizeof(u8))                                \
 295                bpf_size = BPF_B;                               \
 296        else if (bytes == sizeof(u16))                          \
 297                bpf_size = BPF_H;                               \
 298        else if (bytes == sizeof(u32))                          \
 299                bpf_size = BPF_W;                               \
 300        else if (bytes == sizeof(u64))                          \
 301                bpf_size = BPF_DW;                              \
 302                                                                \
 303        bpf_size;                                               \
 304})
 305
 306#ifdef CONFIG_COMPAT
 307/* A struct sock_filter is architecture independent. */
 308struct compat_sock_fprog {
 309        u16             len;
 310        compat_uptr_t   filter; /* struct sock_filter * */
 311};
 312#endif
 313
 314struct sock_fprog_kern {
 315        u16                     len;
 316        struct sock_filter      *filter;
 317};
 318
 319struct bpf_binary_header {
 320        unsigned int pages;
 321        u8 image[];
 322};
 323
 324struct bpf_prog {
 325        u16                     pages;          /* Number of allocated pages */
 326        kmemcheck_bitfield_begin(meta);
 327        u16                     jited:1,        /* Is our filter JIT'ed? */
 328                                gpl_compatible:1, /* Is filter GPL compatible? */
 329                                cb_access:1,    /* Is control block accessed? */
 330                                dst_needed:1;   /* Do we need dst entry? */
 331        kmemcheck_bitfield_end(meta);
 332        u32                     len;            /* Number of filter blocks */
 333        enum bpf_prog_type      type;           /* Type of BPF program */
 334        struct bpf_prog_aux     *aux;           /* Auxiliary fields */
 335        struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
 336        unsigned int            (*bpf_func)(const struct sk_buff *skb,
 337                                            const struct bpf_insn *filter);
 338        /* Instructions for interpreter */
 339        union {
 340                struct sock_filter      insns[0];
 341                struct bpf_insn         insnsi[0];
 342        };
 343};
 344
 345struct sk_filter {
 346        atomic_t        refcnt;
 347        struct rcu_head rcu;
 348        struct bpf_prog *prog;
 349};
 350
 351#define BPF_PROG_RUN(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)
 352
 353#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 354
 355static inline u8 *bpf_skb_cb(struct sk_buff *skb)
 356{
 357        /* eBPF programs may read/write skb->cb[] area to transfer meta
 358         * data between tail calls. Since this also needs to work with
 359         * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
 360         *
 361         * In some socket filter cases, the cb unfortunately needs to be
 362         * saved/restored so that protocol specific skb->cb[] data won't
 363         * be lost. In any case, due to unpriviledged eBPF programs
 364         * attached to sockets, we need to clear the bpf_skb_cb() area
 365         * to not leak previous contents to user space.
 366         */
 367        BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
 368        BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
 369                     FIELD_SIZEOF(struct qdisc_skb_cb, data));
 370
 371        return qdisc_skb_cb(skb)->data;
 372}
 373
 374static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
 375                                       struct sk_buff *skb)
 376{
 377        u8 *cb_data = bpf_skb_cb(skb);
 378        u8 cb_saved[BPF_SKB_CB_LEN];
 379        u32 res;
 380
 381        if (unlikely(prog->cb_access)) {
 382                memcpy(cb_saved, cb_data, sizeof(cb_saved));
 383                memset(cb_data, 0, sizeof(cb_saved));
 384        }
 385
 386        res = BPF_PROG_RUN(prog, skb);
 387
 388        if (unlikely(prog->cb_access))
 389                memcpy(cb_data, cb_saved, sizeof(cb_saved));
 390
 391        return res;
 392}
 393
 394static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
 395                                        struct sk_buff *skb)
 396{
 397        u8 *cb_data = bpf_skb_cb(skb);
 398
 399        if (unlikely(prog->cb_access))
 400                memset(cb_data, 0, BPF_SKB_CB_LEN);
 401
 402        return BPF_PROG_RUN(prog, skb);
 403}
 404
 405static inline unsigned int bpf_prog_size(unsigned int proglen)
 406{
 407        return max(sizeof(struct bpf_prog),
 408                   offsetof(struct bpf_prog, insns[proglen]));
 409}
 410
 411static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
 412{
 413        /* When classic BPF programs have been loaded and the arch
 414         * does not have a classic BPF JIT (anymore), they have been
 415         * converted via bpf_migrate_filter() to eBPF and thus always
 416         * have an unspec program type.
 417         */
 418        return prog->type == BPF_PROG_TYPE_UNSPEC;
 419}
 420
 421#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 422
 423#ifdef CONFIG_DEBUG_SET_MODULE_RONX
 424static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 425{
 426        set_memory_ro((unsigned long)fp, fp->pages);
 427}
 428
 429static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 430{
 431        set_memory_rw((unsigned long)fp, fp->pages);
 432}
 433#else
 434static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 435{
 436}
 437
 438static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 439{
 440}
 441#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
 442
 443int sk_filter(struct sock *sk, struct sk_buff *skb);
 444
 445int bpf_prog_select_runtime(struct bpf_prog *fp);
 446void bpf_prog_free(struct bpf_prog *fp);
 447
 448struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
 449struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 450                                  gfp_t gfp_extra_flags);
 451void __bpf_prog_free(struct bpf_prog *fp);
 452
 453static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
 454{
 455        bpf_prog_unlock_ro(fp);
 456        __bpf_prog_free(fp);
 457}
 458
 459typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
 460                                       unsigned int flen);
 461
 462int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
 463int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
 464                              bpf_aux_classic_check_t trans, bool save_orig);
 465void bpf_prog_destroy(struct bpf_prog *fp);
 466
 467int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 468int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
 469                       bool locked);
 470int sk_attach_bpf(u32 ufd, struct sock *sk);
 471int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 472int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
 473int sk_detach_filter(struct sock *sk);
 474int __sk_detach_filter(struct sock *sk, bool locked);
 475
 476int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
 477                  unsigned int len);
 478
 479bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
 480void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 481
 482u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 483void bpf_int_jit_compile(struct bpf_prog *fp);
 484bool bpf_helper_changes_skb_data(void *func);
 485
 486#ifdef CONFIG_BPF_JIT
 487typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
 488
 489struct bpf_binary_header *
 490bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 491                     unsigned int alignment,
 492                     bpf_jit_fill_hole_t bpf_fill_ill_insns);
 493void bpf_jit_binary_free(struct bpf_binary_header *hdr);
 494
 495void bpf_jit_compile(struct bpf_prog *fp);
 496void bpf_jit_free(struct bpf_prog *fp);
 497
 498static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 499                                u32 pass, void *image)
 500{
 501        pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
 502               proglen, pass, image, current->comm, task_pid_nr(current));
 503
 504        if (image)
 505                print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
 506                               16, 1, image, proglen, false);
 507}
 508#else
 509static inline void bpf_jit_compile(struct bpf_prog *fp)
 510{
 511}
 512
 513static inline void bpf_jit_free(struct bpf_prog *fp)
 514{
 515        bpf_prog_unlock_free(fp);
 516}
 517#endif /* CONFIG_BPF_JIT */
 518
 519#define BPF_ANC         BIT(15)
 520
 521static inline bool bpf_needs_clear_a(const struct sock_filter *first)
 522{
 523        switch (first->code) {
 524        case BPF_RET | BPF_K:
 525        case BPF_LD | BPF_W | BPF_LEN:
 526                return false;
 527
 528        case BPF_LD | BPF_W | BPF_ABS:
 529        case BPF_LD | BPF_H | BPF_ABS:
 530        case BPF_LD | BPF_B | BPF_ABS:
 531                if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
 532                        return true;
 533                return false;
 534
 535        default:
 536                return true;
 537        }
 538}
 539
 540static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
 541{
 542        BUG_ON(ftest->code & BPF_ANC);
 543
 544        switch (ftest->code) {
 545        case BPF_LD | BPF_W | BPF_ABS:
 546        case BPF_LD | BPF_H | BPF_ABS:
 547        case BPF_LD | BPF_B | BPF_ABS:
 548#define BPF_ANCILLARY(CODE)     case SKF_AD_OFF + SKF_AD_##CODE:        \
 549                                return BPF_ANC | SKF_AD_##CODE
 550                switch (ftest->k) {
 551                BPF_ANCILLARY(PROTOCOL);
 552                BPF_ANCILLARY(PKTTYPE);
 553                BPF_ANCILLARY(IFINDEX);
 554                BPF_ANCILLARY(NLATTR);
 555                BPF_ANCILLARY(NLATTR_NEST);
 556                BPF_ANCILLARY(MARK);
 557                BPF_ANCILLARY(QUEUE);
 558                BPF_ANCILLARY(HATYPE);
 559                BPF_ANCILLARY(RXHASH);
 560                BPF_ANCILLARY(CPU);
 561                BPF_ANCILLARY(ALU_XOR_X);
 562                BPF_ANCILLARY(VLAN_TAG);
 563                BPF_ANCILLARY(VLAN_TAG_PRESENT);
 564                BPF_ANCILLARY(PAY_OFFSET);
 565                BPF_ANCILLARY(RANDOM);
 566                BPF_ANCILLARY(VLAN_TPID);
 567                }
 568                /* Fallthrough. */
 569        default:
 570                return ftest->code;
 571        }
 572}
 573
 574void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
 575                                           int k, unsigned int size);
 576
 577static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
 578                                     unsigned int size, void *buffer)
 579{
 580        if (k >= 0)
 581                return skb_header_pointer(skb, k, size, buffer);
 582
 583        return bpf_internal_load_pointer_neg_helper(skb, k, size);
 584}
 585
 586static inline int bpf_tell_extensions(void)
 587{
 588        return SKF_AD_MAX;
 589}
 590
 591#endif /* __LINUX_FILTER_H__ */
 592