1
2
3
4#ifndef __LINUX_FILTER_H__
5#define __LINUX_FILTER_H__
6
7#include <stdarg.h>
8
9#include <linux/atomic.h>
10#include <linux/compat.h>
11#include <linux/skbuff.h>
12#include <linux/linkage.h>
13#include <linux/printk.h>
14#include <linux/workqueue.h>
15#include <linux/sched.h>
16#include <linux/capability.h>
17
18#include <net/sch_generic.h>
19
20#include <asm/cacheflush.h>
21
22#include <uapi/linux/filter.h>
23#include <uapi/linux/bpf.h>
24
25struct sk_buff;
26struct sock;
27struct seccomp_data;
28struct bpf_prog_aux;
29
30
31
32
33
34#define BPF_REG_ARG1 BPF_REG_1
35#define BPF_REG_ARG2 BPF_REG_2
36#define BPF_REG_ARG3 BPF_REG_3
37#define BPF_REG_ARG4 BPF_REG_4
38#define BPF_REG_ARG5 BPF_REG_5
39#define BPF_REG_CTX BPF_REG_6
40#define BPF_REG_FP BPF_REG_10
41
42
43#define BPF_REG_A BPF_REG_0
44#define BPF_REG_X BPF_REG_7
45#define BPF_REG_TMP BPF_REG_8
46
47
48
49
50
51
52
53#define BPF_REG_AX MAX_BPF_REG
54#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
55
56
57#define MAX_BPF_STACK 512
58
59
60
61
62
63#define BPF_ALU64_REG(OP, DST, SRC) \
64 ((struct bpf_insn) { \
65 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
66 .dst_reg = DST, \
67 .src_reg = SRC, \
68 .off = 0, \
69 .imm = 0 })
70
71#define BPF_ALU32_REG(OP, DST, SRC) \
72 ((struct bpf_insn) { \
73 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
74 .dst_reg = DST, \
75 .src_reg = SRC, \
76 .off = 0, \
77 .imm = 0 })
78
79
80
81#define BPF_ALU64_IMM(OP, DST, IMM) \
82 ((struct bpf_insn) { \
83 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
84 .dst_reg = DST, \
85 .src_reg = 0, \
86 .off = 0, \
87 .imm = IMM })
88
89#define BPF_ALU32_IMM(OP, DST, IMM) \
90 ((struct bpf_insn) { \
91 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
92 .dst_reg = DST, \
93 .src_reg = 0, \
94 .off = 0, \
95 .imm = IMM })
96
97
98
99#define BPF_ENDIAN(TYPE, DST, LEN) \
100 ((struct bpf_insn) { \
101 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
102 .dst_reg = DST, \
103 .src_reg = 0, \
104 .off = 0, \
105 .imm = LEN })
106
107
108
109#define BPF_MOV64_REG(DST, SRC) \
110 ((struct bpf_insn) { \
111 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
112 .dst_reg = DST, \
113 .src_reg = SRC, \
114 .off = 0, \
115 .imm = 0 })
116
117#define BPF_MOV32_REG(DST, SRC) \
118 ((struct bpf_insn) { \
119 .code = BPF_ALU | BPF_MOV | BPF_X, \
120 .dst_reg = DST, \
121 .src_reg = SRC, \
122 .off = 0, \
123 .imm = 0 })
124
125
126
127#define BPF_MOV64_IMM(DST, IMM) \
128 ((struct bpf_insn) { \
129 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
130 .dst_reg = DST, \
131 .src_reg = 0, \
132 .off = 0, \
133 .imm = IMM })
134
135#define BPF_MOV32_IMM(DST, IMM) \
136 ((struct bpf_insn) { \
137 .code = BPF_ALU | BPF_MOV | BPF_K, \
138 .dst_reg = DST, \
139 .src_reg = 0, \
140 .off = 0, \
141 .imm = IMM })
142
143
144#define BPF_LD_IMM64(DST, IMM) \
145 BPF_LD_IMM64_RAW(DST, 0, IMM)
146
147#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
148 ((struct bpf_insn) { \
149 .code = BPF_LD | BPF_DW | BPF_IMM, \
150 .dst_reg = DST, \
151 .src_reg = SRC, \
152 .off = 0, \
153 .imm = (__u32) (IMM) }), \
154 ((struct bpf_insn) { \
155 .code = 0, \
156 .dst_reg = 0, \
157 .src_reg = 0, \
158 .off = 0, \
159 .imm = ((__u64) (IMM)) >> 32 })
160
161
162#define BPF_LD_MAP_FD(DST, MAP_FD) \
163 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
164
165
166
167#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
168 ((struct bpf_insn) { \
169 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
170 .dst_reg = DST, \
171 .src_reg = SRC, \
172 .off = 0, \
173 .imm = IMM })
174
175#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
176 ((struct bpf_insn) { \
177 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
178 .dst_reg = DST, \
179 .src_reg = SRC, \
180 .off = 0, \
181 .imm = IMM })
182
183
184
185#define BPF_LD_ABS(SIZE, IMM) \
186 ((struct bpf_insn) { \
187 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
188 .dst_reg = 0, \
189 .src_reg = 0, \
190 .off = 0, \
191 .imm = IMM })
192
193
194
195#define BPF_LD_IND(SIZE, SRC, IMM) \
196 ((struct bpf_insn) { \
197 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
198 .dst_reg = 0, \
199 .src_reg = SRC, \
200 .off = 0, \
201 .imm = IMM })
202
203
204
205#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
206 ((struct bpf_insn) { \
207 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
208 .dst_reg = DST, \
209 .src_reg = SRC, \
210 .off = OFF, \
211 .imm = 0 })
212
213
214
215#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
216 ((struct bpf_insn) { \
217 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
218 .dst_reg = DST, \
219 .src_reg = SRC, \
220 .off = OFF, \
221 .imm = 0 })
222
223
224
225#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
226 ((struct bpf_insn) { \
227 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
228 .dst_reg = DST, \
229 .src_reg = SRC, \
230 .off = OFF, \
231 .imm = 0 })
232
233
234
235#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
236 ((struct bpf_insn) { \
237 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
238 .dst_reg = DST, \
239 .src_reg = 0, \
240 .off = OFF, \
241 .imm = IMM })
242
243
244
245#define BPF_JMP_REG(OP, DST, SRC, OFF) \
246 ((struct bpf_insn) { \
247 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
248 .dst_reg = DST, \
249 .src_reg = SRC, \
250 .off = OFF, \
251 .imm = 0 })
252
253
254
255#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
256 ((struct bpf_insn) { \
257 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
258 .dst_reg = DST, \
259 .src_reg = 0, \
260 .off = OFF, \
261 .imm = IMM })
262
263
264
265#define BPF_EMIT_CALL(FUNC) \
266 ((struct bpf_insn) { \
267 .code = BPF_JMP | BPF_CALL, \
268 .dst_reg = 0, \
269 .src_reg = 0, \
270 .off = 0, \
271 .imm = ((FUNC) - __bpf_call_base) })
272
273
274
275#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
276 ((struct bpf_insn) { \
277 .code = CODE, \
278 .dst_reg = DST, \
279 .src_reg = SRC, \
280 .off = OFF, \
281 .imm = IMM })
282
283
284
285#define BPF_EXIT_INSN() \
286 ((struct bpf_insn) { \
287 .code = BPF_JMP | BPF_EXIT, \
288 .dst_reg = 0, \
289 .src_reg = 0, \
290 .off = 0, \
291 .imm = 0 })
292
293
294
295#define __BPF_STMT(CODE, K) \
296 ((struct sock_filter) BPF_STMT(CODE, K))
297
298#define __BPF_JUMP(CODE, K, JT, JF) \
299 ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
300
301#define bytes_to_bpf_size(bytes) \
302({ \
303 int bpf_size = -EINVAL; \
304 \
305 if (bytes == sizeof(u8)) \
306 bpf_size = BPF_B; \
307 else if (bytes == sizeof(u16)) \
308 bpf_size = BPF_H; \
309 else if (bytes == sizeof(u32)) \
310 bpf_size = BPF_W; \
311 else if (bytes == sizeof(u64)) \
312 bpf_size = BPF_DW; \
313 \
314 bpf_size; \
315})
316
317#define BPF_SIZEOF(type) \
318 ({ \
319 const int __size = bytes_to_bpf_size(sizeof(type)); \
320 BUILD_BUG_ON(__size < 0); \
321 __size; \
322 })
323
324#define BPF_FIELD_SIZEOF(type, field) \
325 ({ \
326 const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
327 BUILD_BUG_ON(__size < 0); \
328 __size; \
329 })
330
331#define __BPF_MAP_0(m, v, ...) v
332#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
333#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
334#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
335#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
336#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
337
338#define __BPF_REG_0(...) __BPF_PAD(5)
339#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
340#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
341#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
342#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
343#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
344
345#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
346#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
347
348#define __BPF_CAST(t, a) \
349 (__force t) \
350 (__force \
351 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
352 (unsigned long)0, (t)0))) a
353#define __BPF_V void
354#define __BPF_N
355
356#define __BPF_DECL_ARGS(t, a) t a
357#define __BPF_DECL_REGS(t, a) u64 a
358
359#define __BPF_PAD(n) \
360 __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
361 u64, __ur_3, u64, __ur_4, u64, __ur_5)
362
363#define BPF_CALL_x(x, name, ...) \
364 static __always_inline \
365 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
366 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
367 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
368 { \
369 return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
370 } \
371 static __always_inline \
372 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
373
374#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
375#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
376#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
377#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
378#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
379#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
380
381#ifdef CONFIG_COMPAT
382
383struct compat_sock_fprog {
384 u16 len;
385 compat_uptr_t filter;
386};
387#endif
388
389struct sock_fprog_kern {
390 u16 len;
391 struct sock_filter *filter;
392};
393
394struct bpf_binary_header {
395 unsigned int pages;
396 u8 image[];
397};
398
399struct bpf_prog {
400 u16 pages;
401 kmemcheck_bitfield_begin(meta);
402 u16 jited:1,
403 gpl_compatible:1,
404 cb_access:1,
405 dst_needed:1;
406 kmemcheck_bitfield_end(meta);
407 u32 len;
408 enum bpf_prog_type type;
409 struct bpf_prog_aux *aux;
410 struct sock_fprog_kern *orig_prog;
411 unsigned int (*bpf_func)(const struct sk_buff *skb,
412 const struct bpf_insn *filter);
413
414 union {
415 struct sock_filter insns[0];
416 struct bpf_insn insnsi[0];
417 };
418};
419
420struct sk_filter {
421 atomic_t refcnt;
422 struct rcu_head rcu;
423 struct bpf_prog *prog;
424};
425
426#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
427
428#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
429
430struct bpf_skb_data_end {
431 struct qdisc_skb_cb qdisc_cb;
432 void *data_end;
433};
434
435struct xdp_buff {
436 void *data;
437 void *data_end;
438};
439
440
441
442
443static inline void bpf_compute_data_end(struct sk_buff *skb)
444{
445 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
446
447 BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
448 cb->data_end = skb->data + skb_headlen(skb);
449}
450
451static inline u8 *bpf_skb_cb(struct sk_buff *skb)
452{
453
454
455
456
457
458
459
460
461
462
463 BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
464 BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
465 FIELD_SIZEOF(struct qdisc_skb_cb, data));
466
467 return qdisc_skb_cb(skb)->data;
468}
469
470static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
471 struct sk_buff *skb)
472{
473 u8 *cb_data = bpf_skb_cb(skb);
474 u8 cb_saved[BPF_SKB_CB_LEN];
475 u32 res;
476
477 if (unlikely(prog->cb_access)) {
478 memcpy(cb_saved, cb_data, sizeof(cb_saved));
479 memset(cb_data, 0, sizeof(cb_saved));
480 }
481
482 res = BPF_PROG_RUN(prog, skb);
483
484 if (unlikely(prog->cb_access))
485 memcpy(cb_data, cb_saved, sizeof(cb_saved));
486
487 return res;
488}
489
490static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
491 struct sk_buff *skb)
492{
493 u8 *cb_data = bpf_skb_cb(skb);
494
495 if (unlikely(prog->cb_access))
496 memset(cb_data, 0, BPF_SKB_CB_LEN);
497
498 return BPF_PROG_RUN(prog, skb);
499}
500
501static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
502 struct xdp_buff *xdp)
503{
504 u32 ret;
505
506 rcu_read_lock();
507 ret = BPF_PROG_RUN(prog, (void *)xdp);
508 rcu_read_unlock();
509
510 return ret;
511}
512
513static inline unsigned int bpf_prog_size(unsigned int proglen)
514{
515 return max(sizeof(struct bpf_prog),
516 offsetof(struct bpf_prog, insns[proglen]));
517}
518
519static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
520{
521
522
523
524
525
526 return prog->type == BPF_PROG_TYPE_UNSPEC;
527}
528
529#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
530
531#ifdef CONFIG_DEBUG_SET_MODULE_RONX
532static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
533{
534 set_memory_ro((unsigned long)fp, fp->pages);
535}
536
537static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
538{
539 set_memory_rw((unsigned long)fp, fp->pages);
540}
541#else
542static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
543{
544}
545
546static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
547{
548}
549#endif
550
551int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
552static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
553{
554 return sk_filter_trim_cap(sk, skb, 1);
555}
556
557struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
558void bpf_prog_free(struct bpf_prog *fp);
559
560struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
561struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
562 gfp_t gfp_extra_flags);
563void __bpf_prog_free(struct bpf_prog *fp);
564
565static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
566{
567 bpf_prog_unlock_ro(fp);
568 __bpf_prog_free(fp);
569}
570
571typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
572 unsigned int flen);
573
574int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
575int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
576 bpf_aux_classic_check_t trans, bool save_orig);
577void bpf_prog_destroy(struct bpf_prog *fp);
578
579int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
580int sk_attach_bpf(u32 ufd, struct sock *sk);
581int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
582int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
583int sk_detach_filter(struct sock *sk);
584int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
585 unsigned int len);
586
587bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
588void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
589
590u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
591
592struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
593bool bpf_helper_changes_skb_data(void *func);
594
595struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
596 const struct bpf_insn *patch, u32 len);
597void bpf_warn_invalid_xdp_action(u32 act);
598
599#ifdef CONFIG_BPF_JIT
600extern int bpf_jit_enable;
601extern int bpf_jit_harden;
602
603typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
604
605struct bpf_binary_header *
606bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
607 unsigned int alignment,
608 bpf_jit_fill_hole_t bpf_fill_ill_insns);
609void bpf_jit_binary_free(struct bpf_binary_header *hdr);
610
611void bpf_jit_compile(struct bpf_prog *fp);
612void bpf_jit_free(struct bpf_prog *fp);
613
614struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
615void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
616
617static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
618 u32 pass, void *image)
619{
620 pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
621 proglen, pass, image, current->comm, task_pid_nr(current));
622
623 if (image)
624 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
625 16, 1, image, proglen, false);
626}
627
628static inline bool bpf_jit_is_ebpf(void)
629{
630# ifdef CONFIG_HAVE_EBPF_JIT
631 return true;
632# else
633 return false;
634# endif
635}
636
637static inline bool bpf_jit_blinding_enabled(void)
638{
639
640
641
642
643 if (!bpf_jit_is_ebpf())
644 return false;
645 if (!bpf_jit_enable)
646 return false;
647 if (!bpf_jit_harden)
648 return false;
649 if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
650 return false;
651
652 return true;
653}
654#else
655static inline void bpf_jit_compile(struct bpf_prog *fp)
656{
657}
658
659static inline void bpf_jit_free(struct bpf_prog *fp)
660{
661 bpf_prog_unlock_free(fp);
662}
663#endif
664
665#define BPF_ANC BIT(15)
666
667static inline bool bpf_needs_clear_a(const struct sock_filter *first)
668{
669 switch (first->code) {
670 case BPF_RET | BPF_K:
671 case BPF_LD | BPF_W | BPF_LEN:
672 return false;
673
674 case BPF_LD | BPF_W | BPF_ABS:
675 case BPF_LD | BPF_H | BPF_ABS:
676 case BPF_LD | BPF_B | BPF_ABS:
677 if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
678 return true;
679 return false;
680
681 default:
682 return true;
683 }
684}
685
686static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
687{
688 BUG_ON(ftest->code & BPF_ANC);
689
690 switch (ftest->code) {
691 case BPF_LD | BPF_W | BPF_ABS:
692 case BPF_LD | BPF_H | BPF_ABS:
693 case BPF_LD | BPF_B | BPF_ABS:
694#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
695 return BPF_ANC | SKF_AD_##CODE
696 switch (ftest->k) {
697 BPF_ANCILLARY(PROTOCOL);
698 BPF_ANCILLARY(PKTTYPE);
699 BPF_ANCILLARY(IFINDEX);
700 BPF_ANCILLARY(NLATTR);
701 BPF_ANCILLARY(NLATTR_NEST);
702 BPF_ANCILLARY(MARK);
703 BPF_ANCILLARY(QUEUE);
704 BPF_ANCILLARY(HATYPE);
705 BPF_ANCILLARY(RXHASH);
706 BPF_ANCILLARY(CPU);
707 BPF_ANCILLARY(ALU_XOR_X);
708 BPF_ANCILLARY(VLAN_TAG);
709 BPF_ANCILLARY(VLAN_TAG_PRESENT);
710 BPF_ANCILLARY(PAY_OFFSET);
711 BPF_ANCILLARY(RANDOM);
712 BPF_ANCILLARY(VLAN_TPID);
713 }
714
715 default:
716 return ftest->code;
717 }
718}
719
720void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
721 int k, unsigned int size);
722
723static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
724 unsigned int size, void *buffer)
725{
726 if (k >= 0)
727 return skb_header_pointer(skb, k, size, buffer);
728
729 return bpf_internal_load_pointer_neg_helper(skb, k, size);
730}
731
732static inline int bpf_tell_extensions(void)
733{
734 return SKF_AD_MAX;
735}
736
737#endif
738