1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/mm.h>
27#include <linux/fcntl.h>
28#include <linux/socket.h>
29#include <linux/sock_diag.h>
30#include <linux/in.h>
31#include <linux/inet.h>
32#include <linux/netdevice.h>
33#include <linux/if_packet.h>
34#include <linux/if_arp.h>
35#include <linux/gfp.h>
36#include <net/inet_common.h>
37#include <net/ip.h>
38#include <net/protocol.h>
39#include <net/netlink.h>
40#include <linux/skbuff.h>
41#include <net/sock.h>
42#include <net/flow_dissector.h>
43#include <linux/errno.h>
44#include <linux/timer.h>
45#include <linux/uaccess.h>
46#include <asm/unaligned.h>
47#include <asm/cmpxchg.h>
48#include <linux/filter.h>
49#include <linux/ratelimit.h>
50#include <linux/seccomp.h>
51#include <linux/if_vlan.h>
52#include <linux/bpf.h>
53#include <net/sch_generic.h>
54#include <net/cls_cgroup.h>
55#include <net/dst_metadata.h>
56#include <net/dst.h>
57#include <net/sock_reuseport.h>
58#include <net/busy_poll.h>
59#include <net/tcp.h>
60#include <linux/bpf_trace.h>
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
76{
77 int err;
78 struct sk_filter *filter;
79
80
81
82
83
84
85 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
86 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
87 return -ENOMEM;
88 }
89 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
90 if (err)
91 return err;
92
93 err = security_sock_rcv_skb(sk, skb);
94 if (err)
95 return err;
96
97 rcu_read_lock();
98 filter = rcu_dereference(sk->sk_filter);
99 if (filter) {
100 struct sock *save_sk = skb->sk;
101 unsigned int pkt_len;
102
103 skb->sk = sk;
104 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
105 skb->sk = save_sk;
106 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
107 }
108 rcu_read_unlock();
109
110 return err;
111}
112EXPORT_SYMBOL(sk_filter_trim_cap);
113
114BPF_CALL_1(__skb_get_pay_offset, struct sk_buff *, skb)
115{
116 return skb_get_poff(skb);
117}
118
119BPF_CALL_3(__skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
120{
121 struct nlattr *nla;
122
123 if (skb_is_nonlinear(skb))
124 return 0;
125
126 if (skb->len < sizeof(struct nlattr))
127 return 0;
128
129 if (a > skb->len - sizeof(struct nlattr))
130 return 0;
131
132 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
133 if (nla)
134 return (void *) nla - (void *) skb->data;
135
136 return 0;
137}
138
139BPF_CALL_3(__skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
140{
141 struct nlattr *nla;
142
143 if (skb_is_nonlinear(skb))
144 return 0;
145
146 if (skb->len < sizeof(struct nlattr))
147 return 0;
148
149 if (a > skb->len - sizeof(struct nlattr))
150 return 0;
151
152 nla = (struct nlattr *) &skb->data[a];
153 if (nla->nla_len > skb->len - a)
154 return 0;
155
156 nla = nla_find_nested(nla, x);
157 if (nla)
158 return (void *) nla - (void *) skb->data;
159
160 return 0;
161}
162
163BPF_CALL_0(__get_raw_cpu_id)
164{
165 return raw_smp_processor_id();
166}
167
168static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
169 .func = __get_raw_cpu_id,
170 .gpl_only = false,
171 .ret_type = RET_INTEGER,
172};
173
174static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
175 struct bpf_insn *insn_buf)
176{
177 struct bpf_insn *insn = insn_buf;
178
179 switch (skb_field) {
180 case SKF_AD_MARK:
181 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
182
183 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
184 offsetof(struct sk_buff, mark));
185 break;
186
187 case SKF_AD_PKTTYPE:
188 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
189 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
190#ifdef __BIG_ENDIAN_BITFIELD
191 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
192#endif
193 break;
194
195 case SKF_AD_QUEUE:
196 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
197
198 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
199 offsetof(struct sk_buff, queue_mapping));
200 break;
201
202 case SKF_AD_VLAN_TAG:
203 case SKF_AD_VLAN_TAG_PRESENT:
204 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
205 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
206
207
208 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
209 offsetof(struct sk_buff, vlan_tci));
210 if (skb_field == SKF_AD_VLAN_TAG) {
211 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
212 ~VLAN_TAG_PRESENT);
213 } else {
214
215 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
216
217 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
218 }
219 break;
220 }
221
222 return insn - insn_buf;
223}
224
225static bool convert_bpf_extensions(struct sock_filter *fp,
226 struct bpf_insn **insnp)
227{
228 struct bpf_insn *insn = *insnp;
229 u32 cnt;
230
231 switch (fp->k) {
232 case SKF_AD_OFF + SKF_AD_PROTOCOL:
233 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
234
235
236 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
237 offsetof(struct sk_buff, protocol));
238
239 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
240 break;
241
242 case SKF_AD_OFF + SKF_AD_PKTTYPE:
243 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
244 insn += cnt - 1;
245 break;
246
247 case SKF_AD_OFF + SKF_AD_IFINDEX:
248 case SKF_AD_OFF + SKF_AD_HATYPE:
249 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
250 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
251
252 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
253 BPF_REG_TMP, BPF_REG_CTX,
254 offsetof(struct sk_buff, dev));
255
256 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
257 *insn++ = BPF_EXIT_INSN();
258 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
259 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
260 offsetof(struct net_device, ifindex));
261 else
262 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
263 offsetof(struct net_device, type));
264 break;
265
266 case SKF_AD_OFF + SKF_AD_MARK:
267 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
268 insn += cnt - 1;
269 break;
270
271 case SKF_AD_OFF + SKF_AD_RXHASH:
272 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
273
274 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
275 offsetof(struct sk_buff, hash));
276 break;
277
278 case SKF_AD_OFF + SKF_AD_QUEUE:
279 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
280 insn += cnt - 1;
281 break;
282
283 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
284 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
285 BPF_REG_A, BPF_REG_CTX, insn);
286 insn += cnt - 1;
287 break;
288
289 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
290 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
291 BPF_REG_A, BPF_REG_CTX, insn);
292 insn += cnt - 1;
293 break;
294
295 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
296 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
297
298
299 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
300 offsetof(struct sk_buff, vlan_proto));
301
302 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
303 break;
304
305 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
306 case SKF_AD_OFF + SKF_AD_NLATTR:
307 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
308 case SKF_AD_OFF + SKF_AD_CPU:
309 case SKF_AD_OFF + SKF_AD_RANDOM:
310
311 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
312
313 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
314
315 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
316
317 switch (fp->k) {
318 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
319 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
320 break;
321 case SKF_AD_OFF + SKF_AD_NLATTR:
322 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
323 break;
324 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
325 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
326 break;
327 case SKF_AD_OFF + SKF_AD_CPU:
328 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
329 break;
330 case SKF_AD_OFF + SKF_AD_RANDOM:
331 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
332 bpf_user_rnd_init_once();
333 break;
334 }
335 break;
336
337 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
338
339 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
340 break;
341
342 default:
343
344
345
346
347 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
348 return false;
349 }
350
351 *insnp = insn;
352 return true;
353}
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373static int bpf_convert_filter(struct sock_filter *prog, int len,
374 struct bpf_prog *new_prog, int *new_len)
375{
376 int new_flen = 0, pass = 0, target, i, stack_off;
377 struct bpf_insn *new_insn, *first_insn = NULL;
378 struct sock_filter *fp;
379 int *addrs = NULL;
380 u8 bpf_src;
381
382 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
383 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
384
385 if (len <= 0 || len > BPF_MAXINSNS)
386 return -EINVAL;
387
388 if (new_prog) {
389 first_insn = new_prog->insnsi;
390 addrs = kcalloc(len, sizeof(*addrs),
391 GFP_KERNEL | __GFP_NOWARN);
392 if (!addrs)
393 return -ENOMEM;
394 }
395
396do_pass:
397 new_insn = first_insn;
398 fp = prog;
399
400
401 if (new_prog) {
402
403
404
405 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
406 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
407
408
409
410
411
412 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
413 } else {
414 new_insn += 3;
415 }
416
417 for (i = 0; i < len; fp++, i++) {
418 struct bpf_insn tmp_insns[6] = { };
419 struct bpf_insn *insn = tmp_insns;
420
421 if (addrs)
422 addrs[i] = new_insn - first_insn;
423
424 switch (fp->code) {
425
426 case BPF_ALU | BPF_ADD | BPF_X:
427 case BPF_ALU | BPF_ADD | BPF_K:
428 case BPF_ALU | BPF_SUB | BPF_X:
429 case BPF_ALU | BPF_SUB | BPF_K:
430 case BPF_ALU | BPF_AND | BPF_X:
431 case BPF_ALU | BPF_AND | BPF_K:
432 case BPF_ALU | BPF_OR | BPF_X:
433 case BPF_ALU | BPF_OR | BPF_K:
434 case BPF_ALU | BPF_LSH | BPF_X:
435 case BPF_ALU | BPF_LSH | BPF_K:
436 case BPF_ALU | BPF_RSH | BPF_X:
437 case BPF_ALU | BPF_RSH | BPF_K:
438 case BPF_ALU | BPF_XOR | BPF_X:
439 case BPF_ALU | BPF_XOR | BPF_K:
440 case BPF_ALU | BPF_MUL | BPF_X:
441 case BPF_ALU | BPF_MUL | BPF_K:
442 case BPF_ALU | BPF_DIV | BPF_X:
443 case BPF_ALU | BPF_DIV | BPF_K:
444 case BPF_ALU | BPF_MOD | BPF_X:
445 case BPF_ALU | BPF_MOD | BPF_K:
446 case BPF_ALU | BPF_NEG:
447 case BPF_LD | BPF_ABS | BPF_W:
448 case BPF_LD | BPF_ABS | BPF_H:
449 case BPF_LD | BPF_ABS | BPF_B:
450 case BPF_LD | BPF_IND | BPF_W:
451 case BPF_LD | BPF_IND | BPF_H:
452 case BPF_LD | BPF_IND | BPF_B:
453
454
455
456
457 if (BPF_CLASS(fp->code) == BPF_LD &&
458 BPF_MODE(fp->code) == BPF_ABS &&
459 convert_bpf_extensions(fp, &insn))
460 break;
461
462 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
463 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
464 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
465
466
467
468 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
469 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
470 *insn++ = BPF_EXIT_INSN();
471 }
472
473 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
474 break;
475
476
477
478
479
480
481
482#define BPF_EMIT_JMP \
483 do { \
484 const s32 off_min = S16_MIN, off_max = S16_MAX; \
485 s32 off; \
486 \
487 if (target >= len || target < 0) \
488 goto err; \
489 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
490 \
491 off -= insn - tmp_insns; \
492 \
493 if (off < off_min || off > off_max) \
494 goto err; \
495 insn->off = off; \
496 } while (0)
497
498 case BPF_JMP | BPF_JA:
499 target = i + fp->k + 1;
500 insn->code = fp->code;
501 BPF_EMIT_JMP;
502 break;
503
504 case BPF_JMP | BPF_JEQ | BPF_K:
505 case BPF_JMP | BPF_JEQ | BPF_X:
506 case BPF_JMP | BPF_JSET | BPF_K:
507 case BPF_JMP | BPF_JSET | BPF_X:
508 case BPF_JMP | BPF_JGT | BPF_K:
509 case BPF_JMP | BPF_JGT | BPF_X:
510 case BPF_JMP | BPF_JGE | BPF_K:
511 case BPF_JMP | BPF_JGE | BPF_X:
512 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
513
514
515
516
517 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
518
519 insn->dst_reg = BPF_REG_A;
520 insn->src_reg = BPF_REG_TMP;
521 bpf_src = BPF_X;
522 } else {
523 insn->dst_reg = BPF_REG_A;
524 insn->imm = fp->k;
525 bpf_src = BPF_SRC(fp->code);
526 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
527 }
528
529
530 if (fp->jf == 0) {
531 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
532 target = i + fp->jt + 1;
533 BPF_EMIT_JMP;
534 break;
535 }
536
537
538 if (fp->jt == 0) {
539 switch (BPF_OP(fp->code)) {
540 case BPF_JEQ:
541 insn->code = BPF_JMP | BPF_JNE | bpf_src;
542 break;
543 case BPF_JGT:
544 insn->code = BPF_JMP | BPF_JLE | bpf_src;
545 break;
546 case BPF_JGE:
547 insn->code = BPF_JMP | BPF_JLT | bpf_src;
548 break;
549 default:
550 goto jmp_rest;
551 }
552
553 target = i + fp->jf + 1;
554 BPF_EMIT_JMP;
555 break;
556 }
557jmp_rest:
558
559 target = i + fp->jt + 1;
560 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
561 BPF_EMIT_JMP;
562 insn++;
563
564 insn->code = BPF_JMP | BPF_JA;
565 target = i + fp->jf + 1;
566 BPF_EMIT_JMP;
567 break;
568
569
570 case BPF_LDX | BPF_MSH | BPF_B:
571
572 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
573
574 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
575
576 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
577
578 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
579
580 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
581
582 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
583 break;
584
585
586
587
588 case BPF_RET | BPF_A:
589 case BPF_RET | BPF_K:
590 if (BPF_RVAL(fp->code) == BPF_K)
591 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
592 0, fp->k);
593 *insn = BPF_EXIT_INSN();
594 break;
595
596
597 case BPF_ST:
598 case BPF_STX:
599 stack_off = fp->k * 4 + 4;
600 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
601 BPF_ST ? BPF_REG_A : BPF_REG_X,
602 -stack_off);
603
604
605
606
607 if (new_prog && new_prog->aux->stack_depth < stack_off)
608 new_prog->aux->stack_depth = stack_off;
609 break;
610
611
612 case BPF_LD | BPF_MEM:
613 case BPF_LDX | BPF_MEM:
614 stack_off = fp->k * 4 + 4;
615 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
616 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
617 -stack_off);
618 break;
619
620
621 case BPF_LD | BPF_IMM:
622 case BPF_LDX | BPF_IMM:
623 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
624 BPF_REG_A : BPF_REG_X, fp->k);
625 break;
626
627
628 case BPF_MISC | BPF_TAX:
629 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
630 break;
631
632
633 case BPF_MISC | BPF_TXA:
634 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
635 break;
636
637
638 case BPF_LD | BPF_W | BPF_LEN:
639 case BPF_LDX | BPF_W | BPF_LEN:
640 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
641 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
642 offsetof(struct sk_buff, len));
643 break;
644
645
646 case BPF_LDX | BPF_ABS | BPF_W:
647
648 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
649 break;
650
651
652 default:
653 goto err;
654 }
655
656 insn++;
657 if (new_prog)
658 memcpy(new_insn, tmp_insns,
659 sizeof(*insn) * (insn - tmp_insns));
660 new_insn += insn - tmp_insns;
661 }
662
663 if (!new_prog) {
664
665 *new_len = new_insn - first_insn;
666 return 0;
667 }
668
669 pass++;
670 if (new_flen != new_insn - first_insn) {
671 new_flen = new_insn - first_insn;
672 if (pass > 2)
673 goto err;
674 goto do_pass;
675 }
676
677 kfree(addrs);
678 BUG_ON(*new_len != new_flen);
679 return 0;
680err:
681 kfree(addrs);
682 return -EINVAL;
683}
684
685
686
687
688
689
690
691
692static int check_load_and_stores(const struct sock_filter *filter, int flen)
693{
694 u16 *masks, memvalid = 0;
695 int pc, ret = 0;
696
697 BUILD_BUG_ON(BPF_MEMWORDS > 16);
698
699 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
700 if (!masks)
701 return -ENOMEM;
702
703 memset(masks, 0xff, flen * sizeof(*masks));
704
705 for (pc = 0; pc < flen; pc++) {
706 memvalid &= masks[pc];
707
708 switch (filter[pc].code) {
709 case BPF_ST:
710 case BPF_STX:
711 memvalid |= (1 << filter[pc].k);
712 break;
713 case BPF_LD | BPF_MEM:
714 case BPF_LDX | BPF_MEM:
715 if (!(memvalid & (1 << filter[pc].k))) {
716 ret = -EINVAL;
717 goto error;
718 }
719 break;
720 case BPF_JMP | BPF_JA:
721
722 masks[pc + 1 + filter[pc].k] &= memvalid;
723 memvalid = ~0;
724 break;
725 case BPF_JMP | BPF_JEQ | BPF_K:
726 case BPF_JMP | BPF_JEQ | BPF_X:
727 case BPF_JMP | BPF_JGE | BPF_K:
728 case BPF_JMP | BPF_JGE | BPF_X:
729 case BPF_JMP | BPF_JGT | BPF_K:
730 case BPF_JMP | BPF_JGT | BPF_X:
731 case BPF_JMP | BPF_JSET | BPF_K:
732 case BPF_JMP | BPF_JSET | BPF_X:
733
734 masks[pc + 1 + filter[pc].jt] &= memvalid;
735 masks[pc + 1 + filter[pc].jf] &= memvalid;
736 memvalid = ~0;
737 break;
738 }
739 }
740error:
741 kfree(masks);
742 return ret;
743}
744
745static bool chk_code_allowed(u16 code_to_probe)
746{
747 static const bool codes[] = {
748
749 [BPF_ALU | BPF_ADD | BPF_K] = true,
750 [BPF_ALU | BPF_ADD | BPF_X] = true,
751 [BPF_ALU | BPF_SUB | BPF_K] = true,
752 [BPF_ALU | BPF_SUB | BPF_X] = true,
753 [BPF_ALU | BPF_MUL | BPF_K] = true,
754 [BPF_ALU | BPF_MUL | BPF_X] = true,
755 [BPF_ALU | BPF_DIV | BPF_K] = true,
756 [BPF_ALU | BPF_DIV | BPF_X] = true,
757 [BPF_ALU | BPF_MOD | BPF_K] = true,
758 [BPF_ALU | BPF_MOD | BPF_X] = true,
759 [BPF_ALU | BPF_AND | BPF_K] = true,
760 [BPF_ALU | BPF_AND | BPF_X] = true,
761 [BPF_ALU | BPF_OR | BPF_K] = true,
762 [BPF_ALU | BPF_OR | BPF_X] = true,
763 [BPF_ALU | BPF_XOR | BPF_K] = true,
764 [BPF_ALU | BPF_XOR | BPF_X] = true,
765 [BPF_ALU | BPF_LSH | BPF_K] = true,
766 [BPF_ALU | BPF_LSH | BPF_X] = true,
767 [BPF_ALU | BPF_RSH | BPF_K] = true,
768 [BPF_ALU | BPF_RSH | BPF_X] = true,
769 [BPF_ALU | BPF_NEG] = true,
770
771 [BPF_LD | BPF_W | BPF_ABS] = true,
772 [BPF_LD | BPF_H | BPF_ABS] = true,
773 [BPF_LD | BPF_B | BPF_ABS] = true,
774 [BPF_LD | BPF_W | BPF_LEN] = true,
775 [BPF_LD | BPF_W | BPF_IND] = true,
776 [BPF_LD | BPF_H | BPF_IND] = true,
777 [BPF_LD | BPF_B | BPF_IND] = true,
778 [BPF_LD | BPF_IMM] = true,
779 [BPF_LD | BPF_MEM] = true,
780 [BPF_LDX | BPF_W | BPF_LEN] = true,
781 [BPF_LDX | BPF_B | BPF_MSH] = true,
782 [BPF_LDX | BPF_IMM] = true,
783 [BPF_LDX | BPF_MEM] = true,
784
785 [BPF_ST] = true,
786 [BPF_STX] = true,
787
788 [BPF_MISC | BPF_TAX] = true,
789 [BPF_MISC | BPF_TXA] = true,
790
791 [BPF_RET | BPF_K] = true,
792 [BPF_RET | BPF_A] = true,
793
794 [BPF_JMP | BPF_JA] = true,
795 [BPF_JMP | BPF_JEQ | BPF_K] = true,
796 [BPF_JMP | BPF_JEQ | BPF_X] = true,
797 [BPF_JMP | BPF_JGE | BPF_K] = true,
798 [BPF_JMP | BPF_JGE | BPF_X] = true,
799 [BPF_JMP | BPF_JGT | BPF_K] = true,
800 [BPF_JMP | BPF_JGT | BPF_X] = true,
801 [BPF_JMP | BPF_JSET | BPF_K] = true,
802 [BPF_JMP | BPF_JSET | BPF_X] = true,
803 };
804
805 if (code_to_probe >= ARRAY_SIZE(codes))
806 return false;
807
808 return codes[code_to_probe];
809}
810
811static bool bpf_check_basics_ok(const struct sock_filter *filter,
812 unsigned int flen)
813{
814 if (filter == NULL)
815 return false;
816 if (flen == 0 || flen > BPF_MAXINSNS)
817 return false;
818
819 return true;
820}
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static int bpf_check_classic(const struct sock_filter *filter,
837 unsigned int flen)
838{
839 bool anc_found;
840 int pc;
841
842
843 for (pc = 0; pc < flen; pc++) {
844 const struct sock_filter *ftest = &filter[pc];
845
846
847 if (!chk_code_allowed(ftest->code))
848 return -EINVAL;
849
850
851 switch (ftest->code) {
852 case BPF_ALU | BPF_DIV | BPF_K:
853 case BPF_ALU | BPF_MOD | BPF_K:
854
855 if (ftest->k == 0)
856 return -EINVAL;
857 break;
858 case BPF_ALU | BPF_LSH | BPF_K:
859 case BPF_ALU | BPF_RSH | BPF_K:
860 if (ftest->k >= 32)
861 return -EINVAL;
862 break;
863 case BPF_LD | BPF_MEM:
864 case BPF_LDX | BPF_MEM:
865 case BPF_ST:
866 case BPF_STX:
867
868 if (ftest->k >= BPF_MEMWORDS)
869 return -EINVAL;
870 break;
871 case BPF_JMP | BPF_JA:
872
873
874
875
876 if (ftest->k >= (unsigned int)(flen - pc - 1))
877 return -EINVAL;
878 break;
879 case BPF_JMP | BPF_JEQ | BPF_K:
880 case BPF_JMP | BPF_JEQ | BPF_X:
881 case BPF_JMP | BPF_JGE | BPF_K:
882 case BPF_JMP | BPF_JGE | BPF_X:
883 case BPF_JMP | BPF_JGT | BPF_K:
884 case BPF_JMP | BPF_JGT | BPF_X:
885 case BPF_JMP | BPF_JSET | BPF_K:
886 case BPF_JMP | BPF_JSET | BPF_X:
887
888 if (pc + ftest->jt + 1 >= flen ||
889 pc + ftest->jf + 1 >= flen)
890 return -EINVAL;
891 break;
892 case BPF_LD | BPF_W | BPF_ABS:
893 case BPF_LD | BPF_H | BPF_ABS:
894 case BPF_LD | BPF_B | BPF_ABS:
895 anc_found = false;
896 if (bpf_anc_helper(ftest) & BPF_ANC)
897 anc_found = true;
898
899 if (anc_found == false && ftest->k >= SKF_AD_OFF)
900 return -EINVAL;
901 }
902 }
903
904
905 switch (filter[flen - 1].code) {
906 case BPF_RET | BPF_K:
907 case BPF_RET | BPF_A:
908 return check_load_and_stores(filter, flen);
909 }
910
911 return -EINVAL;
912}
913
914static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
915 const struct sock_fprog *fprog)
916{
917 unsigned int fsize = bpf_classic_proglen(fprog);
918 struct sock_fprog_kern *fkprog;
919
920 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
921 if (!fp->orig_prog)
922 return -ENOMEM;
923
924 fkprog = fp->orig_prog;
925 fkprog->len = fprog->len;
926
927 fkprog->filter = kmemdup(fp->insns, fsize,
928 GFP_KERNEL | __GFP_NOWARN);
929 if (!fkprog->filter) {
930 kfree(fp->orig_prog);
931 return -ENOMEM;
932 }
933
934 return 0;
935}
936
937static void bpf_release_orig_filter(struct bpf_prog *fp)
938{
939 struct sock_fprog_kern *fprog = fp->orig_prog;
940
941 if (fprog) {
942 kfree(fprog->filter);
943 kfree(fprog);
944 }
945}
946
947static void __bpf_prog_release(struct bpf_prog *prog)
948{
949 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
950 bpf_prog_put(prog);
951 } else {
952 bpf_release_orig_filter(prog);
953 bpf_prog_free(prog);
954 }
955}
956
957static void __sk_filter_release(struct sk_filter *fp)
958{
959 __bpf_prog_release(fp->prog);
960 kfree(fp);
961}
962
963
964
965
966
967static void sk_filter_release_rcu(struct rcu_head *rcu)
968{
969 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
970
971 __sk_filter_release(fp);
972}
973
974
975
976
977
978
979
980static void sk_filter_release(struct sk_filter *fp)
981{
982 if (refcount_dec_and_test(&fp->refcnt))
983 call_rcu(&fp->rcu, sk_filter_release_rcu);
984}
985
986void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
987{
988 u32 filter_size = bpf_prog_size(fp->prog->len);
989
990 atomic_sub(filter_size, &sk->sk_omem_alloc);
991 sk_filter_release(fp);
992}
993
994
995
996
997static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
998{
999 u32 filter_size = bpf_prog_size(fp->prog->len);
1000
1001
1002 if (filter_size <= sysctl_optmem_max &&
1003 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
1004 atomic_add(filter_size, &sk->sk_omem_alloc);
1005 return true;
1006 }
1007 return false;
1008}
1009
1010bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1011{
1012 if (!refcount_inc_not_zero(&fp->refcnt))
1013 return false;
1014
1015 if (!__sk_filter_charge(sk, fp)) {
1016 sk_filter_release(fp);
1017 return false;
1018 }
1019 return true;
1020}
1021
1022static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1023{
1024 struct sock_filter *old_prog;
1025 struct bpf_prog *old_fp;
1026 int err, new_len, old_len = fp->len;
1027
1028
1029
1030
1031
1032
1033 BUILD_BUG_ON(sizeof(struct sock_filter) !=
1034 sizeof(struct bpf_insn));
1035
1036
1037
1038
1039
1040 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
1041 GFP_KERNEL | __GFP_NOWARN);
1042 if (!old_prog) {
1043 err = -ENOMEM;
1044 goto out_err;
1045 }
1046
1047
1048 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
1049 if (err)
1050 goto out_err_free;
1051
1052
1053 old_fp = fp;
1054 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
1055 if (!fp) {
1056
1057
1058
1059 fp = old_fp;
1060 err = -ENOMEM;
1061 goto out_err_free;
1062 }
1063
1064 fp->len = new_len;
1065
1066
1067 err = bpf_convert_filter(old_prog, old_len, fp, &new_len);
1068 if (err)
1069
1070
1071
1072
1073
1074 goto out_err_free;
1075
1076 fp = bpf_prog_select_runtime(fp, &err);
1077 if (err)
1078 goto out_err_free;
1079
1080 kfree(old_prog);
1081 return fp;
1082
1083out_err_free:
1084 kfree(old_prog);
1085out_err:
1086 __bpf_prog_release(fp);
1087 return ERR_PTR(err);
1088}
1089
1090static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1091 bpf_aux_classic_check_t trans)
1092{
1093 int err;
1094
1095 fp->bpf_func = NULL;
1096 fp->jited = 0;
1097
1098 err = bpf_check_classic(fp->insns, fp->len);
1099 if (err) {
1100 __bpf_prog_release(fp);
1101 return ERR_PTR(err);
1102 }
1103
1104
1105
1106
1107 if (trans) {
1108 err = trans(fp->insns, fp->len);
1109 if (err) {
1110 __bpf_prog_release(fp);
1111 return ERR_PTR(err);
1112 }
1113 }
1114
1115
1116
1117
1118 bpf_jit_compile(fp);
1119
1120
1121
1122
1123 if (!fp->jited)
1124 fp = bpf_migrate_filter(fp);
1125
1126 return fp;
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1140{
1141 unsigned int fsize = bpf_classic_proglen(fprog);
1142 struct bpf_prog *fp;
1143
1144
1145 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1146 return -EINVAL;
1147
1148 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1149 if (!fp)
1150 return -ENOMEM;
1151
1152 memcpy(fp->insns, fprog->filter, fsize);
1153
1154 fp->len = fprog->len;
1155
1156
1157
1158
1159 fp->orig_prog = NULL;
1160
1161
1162
1163
1164 fp = bpf_prepare_filter(fp, NULL);
1165 if (IS_ERR(fp))
1166 return PTR_ERR(fp);
1167
1168 *pfp = fp;
1169 return 0;
1170}
1171EXPORT_SYMBOL_GPL(bpf_prog_create);
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1185 bpf_aux_classic_check_t trans, bool save_orig)
1186{
1187 unsigned int fsize = bpf_classic_proglen(fprog);
1188 struct bpf_prog *fp;
1189 int err;
1190
1191
1192 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1193 return -EINVAL;
1194
1195 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1196 if (!fp)
1197 return -ENOMEM;
1198
1199 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1200 __bpf_prog_free(fp);
1201 return -EFAULT;
1202 }
1203
1204 fp->len = fprog->len;
1205 fp->orig_prog = NULL;
1206
1207 if (save_orig) {
1208 err = bpf_prog_store_orig_filter(fp, fprog);
1209 if (err) {
1210 __bpf_prog_free(fp);
1211 return -ENOMEM;
1212 }
1213 }
1214
1215
1216
1217
1218 fp = bpf_prepare_filter(fp, trans);
1219 if (IS_ERR(fp))
1220 return PTR_ERR(fp);
1221
1222 *pfp = fp;
1223 return 0;
1224}
1225EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1226
1227void bpf_prog_destroy(struct bpf_prog *fp)
1228{
1229 __bpf_prog_release(fp);
1230}
1231EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1232
1233static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1234{
1235 struct sk_filter *fp, *old_fp;
1236
1237 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1238 if (!fp)
1239 return -ENOMEM;
1240
1241 fp->prog = prog;
1242
1243 if (!__sk_filter_charge(sk, fp)) {
1244 kfree(fp);
1245 return -ENOMEM;
1246 }
1247 refcount_set(&fp->refcnt, 1);
1248
1249 old_fp = rcu_dereference_protected(sk->sk_filter,
1250 lockdep_sock_is_held(sk));
1251 rcu_assign_pointer(sk->sk_filter, fp);
1252
1253 if (old_fp)
1254 sk_filter_uncharge(sk, old_fp);
1255
1256 return 0;
1257}
1258
1259static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
1260{
1261 struct bpf_prog *old_prog;
1262 int err;
1263
1264 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1265 return -ENOMEM;
1266
1267 if (sk_unhashed(sk) && sk->sk_reuseport) {
1268 err = reuseport_alloc(sk);
1269 if (err)
1270 return err;
1271 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
1272
1273 return -EINVAL;
1274 }
1275
1276 old_prog = reuseport_attach_prog(sk, prog);
1277 if (old_prog)
1278 bpf_prog_destroy(old_prog);
1279
1280 return 0;
1281}
1282
1283static
1284struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1285{
1286 unsigned int fsize = bpf_classic_proglen(fprog);
1287 struct bpf_prog *prog;
1288 int err;
1289
1290 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1291 return ERR_PTR(-EPERM);
1292
1293
1294 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1295 return ERR_PTR(-EINVAL);
1296
1297 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1298 if (!prog)
1299 return ERR_PTR(-ENOMEM);
1300
1301 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1302 __bpf_prog_free(prog);
1303 return ERR_PTR(-EFAULT);
1304 }
1305
1306 prog->len = fprog->len;
1307
1308 err = bpf_prog_store_orig_filter(prog, fprog);
1309 if (err) {
1310 __bpf_prog_free(prog);
1311 return ERR_PTR(-ENOMEM);
1312 }
1313
1314
1315
1316
1317 return bpf_prepare_filter(prog, NULL);
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1331{
1332 struct bpf_prog *prog = __get_filter(fprog, sk);
1333 int err;
1334
1335 if (IS_ERR(prog))
1336 return PTR_ERR(prog);
1337
1338 err = __sk_attach_prog(prog, sk);
1339 if (err < 0) {
1340 __bpf_prog_release(prog);
1341 return err;
1342 }
1343
1344 return 0;
1345}
1346EXPORT_SYMBOL_GPL(sk_attach_filter);
1347
1348int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1349{
1350 struct bpf_prog *prog = __get_filter(fprog, sk);
1351 int err;
1352
1353 if (IS_ERR(prog))
1354 return PTR_ERR(prog);
1355
1356 err = __reuseport_attach_prog(prog, sk);
1357 if (err < 0) {
1358 __bpf_prog_release(prog);
1359 return err;
1360 }
1361
1362 return 0;
1363}
1364
1365static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1366{
1367 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1368 return ERR_PTR(-EPERM);
1369
1370 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1371}
1372
1373int sk_attach_bpf(u32 ufd, struct sock *sk)
1374{
1375 struct bpf_prog *prog = __get_bpf(ufd, sk);
1376 int err;
1377
1378 if (IS_ERR(prog))
1379 return PTR_ERR(prog);
1380
1381 err = __sk_attach_prog(prog, sk);
1382 if (err < 0) {
1383 bpf_prog_put(prog);
1384 return err;
1385 }
1386
1387 return 0;
1388}
1389
1390int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1391{
1392 struct bpf_prog *prog = __get_bpf(ufd, sk);
1393 int err;
1394
1395 if (IS_ERR(prog))
1396 return PTR_ERR(prog);
1397
1398 err = __reuseport_attach_prog(prog, sk);
1399 if (err < 0) {
1400 bpf_prog_put(prog);
1401 return err;
1402 }
1403
1404 return 0;
1405}
1406
1407struct bpf_scratchpad {
1408 union {
1409 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1410 u8 buff[MAX_BPF_STACK];
1411 };
1412};
1413
1414static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
1415
1416static inline int __bpf_try_make_writable(struct sk_buff *skb,
1417 unsigned int write_len)
1418{
1419 return skb_ensure_writable(skb, write_len);
1420}
1421
1422static inline int bpf_try_make_writable(struct sk_buff *skb,
1423 unsigned int write_len)
1424{
1425 int err = __bpf_try_make_writable(skb, write_len);
1426
1427 bpf_compute_data_pointers(skb);
1428 return err;
1429}
1430
1431static int bpf_try_make_head_writable(struct sk_buff *skb)
1432{
1433 return bpf_try_make_writable(skb, skb_headlen(skb));
1434}
1435
1436static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1437{
1438 if (skb_at_tc_ingress(skb))
1439 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1440}
1441
1442static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1443{
1444 if (skb_at_tc_ingress(skb))
1445 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1446}
1447
1448BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1449 const void *, from, u32, len, u64, flags)
1450{
1451 void *ptr;
1452
1453 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1454 return -EINVAL;
1455 if (unlikely(offset > 0xffff))
1456 return -EFAULT;
1457 if (unlikely(bpf_try_make_writable(skb, offset + len)))
1458 return -EFAULT;
1459
1460 ptr = skb->data + offset;
1461 if (flags & BPF_F_RECOMPUTE_CSUM)
1462 __skb_postpull_rcsum(skb, ptr, len, offset);
1463
1464 memcpy(ptr, from, len);
1465
1466 if (flags & BPF_F_RECOMPUTE_CSUM)
1467 __skb_postpush_rcsum(skb, ptr, len, offset);
1468 if (flags & BPF_F_INVALIDATE_HASH)
1469 skb_clear_hash(skb);
1470
1471 return 0;
1472}
1473
1474static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1475 .func = bpf_skb_store_bytes,
1476 .gpl_only = false,
1477 .ret_type = RET_INTEGER,
1478 .arg1_type = ARG_PTR_TO_CTX,
1479 .arg2_type = ARG_ANYTHING,
1480 .arg3_type = ARG_PTR_TO_MEM,
1481 .arg4_type = ARG_CONST_SIZE,
1482 .arg5_type = ARG_ANYTHING,
1483};
1484
1485BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1486 void *, to, u32, len)
1487{
1488 void *ptr;
1489
1490 if (unlikely(offset > 0xffff))
1491 goto err_clear;
1492
1493 ptr = skb_header_pointer(skb, offset, len, to);
1494 if (unlikely(!ptr))
1495 goto err_clear;
1496 if (ptr != to)
1497 memcpy(to, ptr, len);
1498
1499 return 0;
1500err_clear:
1501 memset(to, 0, len);
1502 return -EFAULT;
1503}
1504
1505static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1506 .func = bpf_skb_load_bytes,
1507 .gpl_only = false,
1508 .ret_type = RET_INTEGER,
1509 .arg1_type = ARG_PTR_TO_CTX,
1510 .arg2_type = ARG_ANYTHING,
1511 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1512 .arg4_type = ARG_CONST_SIZE,
1513};
1514
1515BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1516{
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1527}
1528
1529static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1530 .func = bpf_skb_pull_data,
1531 .gpl_only = false,
1532 .ret_type = RET_INTEGER,
1533 .arg1_type = ARG_PTR_TO_CTX,
1534 .arg2_type = ARG_ANYTHING,
1535};
1536
1537BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1538 u64, from, u64, to, u64, flags)
1539{
1540 __sum16 *ptr;
1541
1542 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1543 return -EINVAL;
1544 if (unlikely(offset > 0xffff || offset & 1))
1545 return -EFAULT;
1546 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1547 return -EFAULT;
1548
1549 ptr = (__sum16 *)(skb->data + offset);
1550 switch (flags & BPF_F_HDR_FIELD_MASK) {
1551 case 0:
1552 if (unlikely(from != 0))
1553 return -EINVAL;
1554
1555 csum_replace_by_diff(ptr, to);
1556 break;
1557 case 2:
1558 csum_replace2(ptr, from, to);
1559 break;
1560 case 4:
1561 csum_replace4(ptr, from, to);
1562 break;
1563 default:
1564 return -EINVAL;
1565 }
1566
1567 return 0;
1568}
1569
1570static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1571 .func = bpf_l3_csum_replace,
1572 .gpl_only = false,
1573 .ret_type = RET_INTEGER,
1574 .arg1_type = ARG_PTR_TO_CTX,
1575 .arg2_type = ARG_ANYTHING,
1576 .arg3_type = ARG_ANYTHING,
1577 .arg4_type = ARG_ANYTHING,
1578 .arg5_type = ARG_ANYTHING,
1579};
1580
1581BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1582 u64, from, u64, to, u64, flags)
1583{
1584 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1585 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1586 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
1587 __sum16 *ptr;
1588
1589 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1590 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
1591 return -EINVAL;
1592 if (unlikely(offset > 0xffff || offset & 1))
1593 return -EFAULT;
1594 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1595 return -EFAULT;
1596
1597 ptr = (__sum16 *)(skb->data + offset);
1598 if (is_mmzero && !do_mforce && !*ptr)
1599 return 0;
1600
1601 switch (flags & BPF_F_HDR_FIELD_MASK) {
1602 case 0:
1603 if (unlikely(from != 0))
1604 return -EINVAL;
1605
1606 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1607 break;
1608 case 2:
1609 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1610 break;
1611 case 4:
1612 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1613 break;
1614 default:
1615 return -EINVAL;
1616 }
1617
1618 if (is_mmzero && !*ptr)
1619 *ptr = CSUM_MANGLED_0;
1620 return 0;
1621}
1622
1623static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1624 .func = bpf_l4_csum_replace,
1625 .gpl_only = false,
1626 .ret_type = RET_INTEGER,
1627 .arg1_type = ARG_PTR_TO_CTX,
1628 .arg2_type = ARG_ANYTHING,
1629 .arg3_type = ARG_ANYTHING,
1630 .arg4_type = ARG_ANYTHING,
1631 .arg5_type = ARG_ANYTHING,
1632};
1633
1634BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1635 __be32 *, to, u32, to_size, __wsum, seed)
1636{
1637 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1638 u32 diff_size = from_size + to_size;
1639 int i, j = 0;
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1650 diff_size > sizeof(sp->diff)))
1651 return -EINVAL;
1652
1653 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1654 sp->diff[j] = ~from[i];
1655 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
1656 sp->diff[j] = to[i];
1657
1658 return csum_partial(sp->diff, diff_size, seed);
1659}
1660
1661static const struct bpf_func_proto bpf_csum_diff_proto = {
1662 .func = bpf_csum_diff,
1663 .gpl_only = false,
1664 .pkt_access = true,
1665 .ret_type = RET_INTEGER,
1666 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1667 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1668 .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
1669 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
1670 .arg5_type = ARG_ANYTHING,
1671};
1672
1673BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
1674{
1675
1676
1677
1678
1679 if (skb->ip_summed == CHECKSUM_COMPLETE)
1680 return (skb->csum = csum_add(skb->csum, csum));
1681
1682 return -ENOTSUPP;
1683}
1684
1685static const struct bpf_func_proto bpf_csum_update_proto = {
1686 .func = bpf_csum_update,
1687 .gpl_only = false,
1688 .ret_type = RET_INTEGER,
1689 .arg1_type = ARG_PTR_TO_CTX,
1690 .arg2_type = ARG_ANYTHING,
1691};
1692
1693static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1694{
1695 return dev_forward_skb(dev, skb);
1696}
1697
1698static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
1699 struct sk_buff *skb)
1700{
1701 int ret = ____dev_forward_skb(dev, skb);
1702
1703 if (likely(!ret)) {
1704 skb->dev = dev;
1705 ret = netif_rx(skb);
1706 }
1707
1708 return ret;
1709}
1710
1711static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1712{
1713 int ret;
1714
1715 if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
1716 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1717 kfree_skb(skb);
1718 return -ENETDOWN;
1719 }
1720
1721 skb->dev = dev;
1722
1723 __this_cpu_inc(xmit_recursion);
1724 ret = dev_queue_xmit(skb);
1725 __this_cpu_dec(xmit_recursion);
1726
1727 return ret;
1728}
1729
1730static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
1731 u32 flags)
1732{
1733
1734 unsigned int mlen = skb->network_header - skb->mac_header;
1735
1736 __skb_pull(skb, mlen);
1737
1738
1739
1740
1741
1742
1743 if (!skb_at_tc_ingress(skb))
1744 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1745 skb_pop_mac_header(skb);
1746 skb_reset_mac_len(skb);
1747 return flags & BPF_F_INGRESS ?
1748 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
1749}
1750
1751static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
1752 u32 flags)
1753{
1754
1755 if (unlikely(skb->mac_header >= skb->network_header)) {
1756 kfree_skb(skb);
1757 return -ERANGE;
1758 }
1759
1760 bpf_push_mac_rcsum(skb);
1761 return flags & BPF_F_INGRESS ?
1762 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1763}
1764
1765static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
1766 u32 flags)
1767{
1768 if (dev_is_mac_header_xmit(dev))
1769 return __bpf_redirect_common(skb, dev, flags);
1770 else
1771 return __bpf_redirect_no_mac(skb, dev, flags);
1772}
1773
1774BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
1775{
1776 struct net_device *dev;
1777 struct sk_buff *clone;
1778 int ret;
1779
1780 if (unlikely(flags & ~(BPF_F_INGRESS)))
1781 return -EINVAL;
1782
1783 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
1784 if (unlikely(!dev))
1785 return -EINVAL;
1786
1787 clone = skb_clone(skb, GFP_ATOMIC);
1788 if (unlikely(!clone))
1789 return -ENOMEM;
1790
1791
1792
1793
1794
1795
1796 ret = bpf_try_make_head_writable(skb);
1797 if (unlikely(ret)) {
1798 kfree_skb(clone);
1799 return -ENOMEM;
1800 }
1801
1802 return __bpf_redirect(clone, dev, flags);
1803}
1804
1805static const struct bpf_func_proto bpf_clone_redirect_proto = {
1806 .func = bpf_clone_redirect,
1807 .gpl_only = false,
1808 .ret_type = RET_INTEGER,
1809 .arg1_type = ARG_PTR_TO_CTX,
1810 .arg2_type = ARG_ANYTHING,
1811 .arg3_type = ARG_ANYTHING,
1812};
1813
1814struct redirect_info {
1815 u32 ifindex;
1816 u32 flags;
1817 struct bpf_map *map;
1818 struct bpf_map *map_to_flush;
1819 unsigned long map_owner;
1820};
1821
1822static DEFINE_PER_CPU(struct redirect_info, redirect_info);
1823
1824BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
1825{
1826 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
1827
1828 if (unlikely(flags & ~(BPF_F_INGRESS)))
1829 return TC_ACT_SHOT;
1830
1831 ri->ifindex = ifindex;
1832 ri->flags = flags;
1833
1834 return TC_ACT_REDIRECT;
1835}
1836
1837int skb_do_redirect(struct sk_buff *skb)
1838{
1839 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
1840 struct net_device *dev;
1841
1842 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
1843 ri->ifindex = 0;
1844 if (unlikely(!dev)) {
1845 kfree_skb(skb);
1846 return -EINVAL;
1847 }
1848
1849 return __bpf_redirect(skb, dev, ri->flags);
1850}
1851
1852static const struct bpf_func_proto bpf_redirect_proto = {
1853 .func = bpf_redirect,
1854 .gpl_only = false,
1855 .ret_type = RET_INTEGER,
1856 .arg1_type = ARG_ANYTHING,
1857 .arg2_type = ARG_ANYTHING,
1858};
1859
1860BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
1861 struct bpf_map *, map, u32, key, u64, flags)
1862{
1863 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1864
1865
1866 if (unlikely(flags & ~(BPF_F_INGRESS)))
1867 return SK_DROP;
1868
1869 tcb->bpf.key = key;
1870 tcb->bpf.flags = flags;
1871 tcb->bpf.map = map;
1872
1873 return SK_PASS;
1874}
1875
1876struct sock *do_sk_redirect_map(struct sk_buff *skb)
1877{
1878 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1879 struct sock *sk = NULL;
1880
1881 if (tcb->bpf.map) {
1882 sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
1883
1884 tcb->bpf.key = 0;
1885 tcb->bpf.map = NULL;
1886 }
1887
1888 return sk;
1889}
1890
1891static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
1892 .func = bpf_sk_redirect_map,
1893 .gpl_only = false,
1894 .ret_type = RET_INTEGER,
1895 .arg1_type = ARG_PTR_TO_CTX,
1896 .arg2_type = ARG_CONST_MAP_PTR,
1897 .arg3_type = ARG_ANYTHING,
1898 .arg4_type = ARG_ANYTHING,
1899};
1900
1901BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg_buff *, msg,
1902 struct bpf_map *, map, u32, key, u64, flags)
1903{
1904
1905 if (unlikely(flags & ~(BPF_F_INGRESS)))
1906 return SK_DROP;
1907
1908 msg->key = key;
1909 msg->flags = flags;
1910 msg->map = map;
1911
1912 return SK_PASS;
1913}
1914
1915struct sock *do_msg_redirect_map(struct sk_msg_buff *msg)
1916{
1917 struct sock *sk = NULL;
1918
1919 if (msg->map) {
1920 sk = __sock_map_lookup_elem(msg->map, msg->key);
1921
1922 msg->key = 0;
1923 msg->map = NULL;
1924 }
1925
1926 return sk;
1927}
1928
1929static const struct bpf_func_proto bpf_msg_redirect_map_proto = {
1930 .func = bpf_msg_redirect_map,
1931 .gpl_only = false,
1932 .ret_type = RET_INTEGER,
1933 .arg1_type = ARG_PTR_TO_CTX,
1934 .arg2_type = ARG_CONST_MAP_PTR,
1935 .arg3_type = ARG_ANYTHING,
1936 .arg4_type = ARG_ANYTHING,
1937};
1938
1939BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg_buff *, msg, u32, bytes)
1940{
1941 msg->apply_bytes = bytes;
1942 return 0;
1943}
1944
1945static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
1946 .func = bpf_msg_apply_bytes,
1947 .gpl_only = false,
1948 .ret_type = RET_INTEGER,
1949 .arg1_type = ARG_PTR_TO_CTX,
1950 .arg2_type = ARG_ANYTHING,
1951};
1952
1953BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg_buff *, msg, u32, bytes)
1954{
1955 msg->cork_bytes = bytes;
1956 return 0;
1957}
1958
1959static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
1960 .func = bpf_msg_cork_bytes,
1961 .gpl_only = false,
1962 .ret_type = RET_INTEGER,
1963 .arg1_type = ARG_PTR_TO_CTX,
1964 .arg2_type = ARG_ANYTHING,
1965};
1966
1967BPF_CALL_4(bpf_msg_pull_data,
1968 struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
1969{
1970 unsigned int len = 0, offset = 0, copy = 0;
1971 struct scatterlist *sg = msg->sg_data;
1972 int first_sg, last_sg, i, shift;
1973 unsigned char *p, *to, *from;
1974 int bytes = end - start;
1975 struct page *page;
1976
1977 if (unlikely(flags || end <= start))
1978 return -EINVAL;
1979
1980
1981 i = msg->sg_start;
1982 do {
1983 len = sg[i].length;
1984 offset += len;
1985 if (start < offset + len)
1986 break;
1987 i++;
1988 if (i == MAX_SKB_FRAGS)
1989 i = 0;
1990 } while (i != msg->sg_end);
1991
1992 if (unlikely(start >= offset + len))
1993 return -EINVAL;
1994
1995 if (!msg->sg_copy[i] && bytes <= len)
1996 goto out;
1997
1998 first_sg = i;
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010 do {
2011 copy += sg[i].length;
2012 i++;
2013 if (i == MAX_SKB_FRAGS)
2014 i = 0;
2015 if (bytes < copy)
2016 break;
2017 } while (i != msg->sg_end);
2018 last_sg = i;
2019
2020 if (unlikely(copy < end - start))
2021 return -EINVAL;
2022
2023 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
2024 if (unlikely(!page))
2025 return -ENOMEM;
2026 p = page_address(page);
2027 offset = 0;
2028
2029 i = first_sg;
2030 do {
2031 from = sg_virt(&sg[i]);
2032 len = sg[i].length;
2033 to = p + offset;
2034
2035 memcpy(to, from, len);
2036 offset += len;
2037 sg[i].length = 0;
2038 put_page(sg_page(&sg[i]));
2039
2040 i++;
2041 if (i == MAX_SKB_FRAGS)
2042 i = 0;
2043 } while (i != last_sg);
2044
2045 sg[first_sg].length = copy;
2046 sg_set_page(&sg[first_sg], page, copy, 0);
2047
2048
2049
2050
2051
2052 shift = last_sg - first_sg - 1;
2053 if (!shift)
2054 goto out;
2055
2056 i = first_sg + 1;
2057 do {
2058 int move_from;
2059
2060 if (i + shift >= MAX_SKB_FRAGS)
2061 move_from = i + shift - MAX_SKB_FRAGS;
2062 else
2063 move_from = i + shift;
2064
2065 if (move_from == msg->sg_end)
2066 break;
2067
2068 sg[i] = sg[move_from];
2069 sg[move_from].length = 0;
2070 sg[move_from].page_link = 0;
2071 sg[move_from].offset = 0;
2072
2073 i++;
2074 if (i == MAX_SKB_FRAGS)
2075 i = 0;
2076 } while (1);
2077 msg->sg_end -= shift;
2078 if (msg->sg_end < 0)
2079 msg->sg_end += MAX_SKB_FRAGS;
2080out:
2081 msg->data = sg_virt(&sg[i]) + start - offset;
2082 msg->data_end = msg->data + bytes;
2083
2084 return 0;
2085}
2086
2087static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2088 .func = bpf_msg_pull_data,
2089 .gpl_only = false,
2090 .ret_type = RET_INTEGER,
2091 .arg1_type = ARG_PTR_TO_CTX,
2092 .arg2_type = ARG_ANYTHING,
2093 .arg3_type = ARG_ANYTHING,
2094 .arg4_type = ARG_ANYTHING,
2095};
2096
2097BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
2098{
2099 return task_get_classid(skb);
2100}
2101
2102static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2103 .func = bpf_get_cgroup_classid,
2104 .gpl_only = false,
2105 .ret_type = RET_INTEGER,
2106 .arg1_type = ARG_PTR_TO_CTX,
2107};
2108
2109BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
2110{
2111 return dst_tclassid(skb);
2112}
2113
2114static const struct bpf_func_proto bpf_get_route_realm_proto = {
2115 .func = bpf_get_route_realm,
2116 .gpl_only = false,
2117 .ret_type = RET_INTEGER,
2118 .arg1_type = ARG_PTR_TO_CTX,
2119};
2120
2121BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
2122{
2123
2124
2125
2126
2127
2128 return skb_get_hash(skb);
2129}
2130
2131static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2132 .func = bpf_get_hash_recalc,
2133 .gpl_only = false,
2134 .ret_type = RET_INTEGER,
2135 .arg1_type = ARG_PTR_TO_CTX,
2136};
2137
2138BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2139{
2140
2141
2142
2143 skb_clear_hash(skb);
2144 return 0;
2145}
2146
2147static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2148 .func = bpf_set_hash_invalid,
2149 .gpl_only = false,
2150 .ret_type = RET_INTEGER,
2151 .arg1_type = ARG_PTR_TO_CTX,
2152};
2153
2154BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2155{
2156
2157
2158
2159
2160 __skb_set_sw_hash(skb, hash, true);
2161 return 0;
2162}
2163
2164static const struct bpf_func_proto bpf_set_hash_proto = {
2165 .func = bpf_set_hash,
2166 .gpl_only = false,
2167 .ret_type = RET_INTEGER,
2168 .arg1_type = ARG_PTR_TO_CTX,
2169 .arg2_type = ARG_ANYTHING,
2170};
2171
2172BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2173 u16, vlan_tci)
2174{
2175 int ret;
2176
2177 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2178 vlan_proto != htons(ETH_P_8021AD)))
2179 vlan_proto = htons(ETH_P_8021Q);
2180
2181 bpf_push_mac_rcsum(skb);
2182 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
2183 bpf_pull_mac_rcsum(skb);
2184
2185 bpf_compute_data_pointers(skb);
2186 return ret;
2187}
2188
2189const struct bpf_func_proto bpf_skb_vlan_push_proto = {
2190 .func = bpf_skb_vlan_push,
2191 .gpl_only = false,
2192 .ret_type = RET_INTEGER,
2193 .arg1_type = ARG_PTR_TO_CTX,
2194 .arg2_type = ARG_ANYTHING,
2195 .arg3_type = ARG_ANYTHING,
2196};
2197EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
2198
2199BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
2200{
2201 int ret;
2202
2203 bpf_push_mac_rcsum(skb);
2204 ret = skb_vlan_pop(skb);
2205 bpf_pull_mac_rcsum(skb);
2206
2207 bpf_compute_data_pointers(skb);
2208 return ret;
2209}
2210
2211const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
2212 .func = bpf_skb_vlan_pop,
2213 .gpl_only = false,
2214 .ret_type = RET_INTEGER,
2215 .arg1_type = ARG_PTR_TO_CTX,
2216};
2217EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
2218
2219static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2220{
2221
2222
2223
2224 skb_push(skb, len);
2225 memmove(skb->data, skb->data + len, off);
2226 memset(skb->data + off, 0, len);
2227
2228
2229
2230
2231
2232
2233 return 0;
2234}
2235
2236static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2237{
2238
2239
2240
2241 if (unlikely(!pskb_may_pull(skb, off + len)))
2242 return -ENOMEM;
2243
2244 skb_postpull_rcsum(skb, skb->data + off, len);
2245 memmove(skb->data + len, skb->data, off);
2246 __skb_pull(skb, len);
2247
2248 return 0;
2249}
2250
2251static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2252{
2253 bool trans_same = skb->transport_header == skb->network_header;
2254 int ret;
2255
2256
2257
2258
2259
2260 ret = bpf_skb_generic_push(skb, off, len);
2261 if (likely(!ret)) {
2262 skb->mac_header -= len;
2263 skb->network_header -= len;
2264 if (trans_same)
2265 skb->transport_header = skb->network_header;
2266 }
2267
2268 return ret;
2269}
2270
2271static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2272{
2273 bool trans_same = skb->transport_header == skb->network_header;
2274 int ret;
2275
2276
2277 ret = bpf_skb_generic_pop(skb, off, len);
2278 if (likely(!ret)) {
2279 skb->mac_header += len;
2280 skb->network_header += len;
2281 if (trans_same)
2282 skb->transport_header = skb->network_header;
2283 }
2284
2285 return ret;
2286}
2287
2288static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2289{
2290 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2291 u32 off = skb_mac_header_len(skb);
2292 int ret;
2293
2294
2295 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2296 return -ENOTSUPP;
2297
2298 ret = skb_cow(skb, len_diff);
2299 if (unlikely(ret < 0))
2300 return ret;
2301
2302 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2303 if (unlikely(ret < 0))
2304 return ret;
2305
2306 if (skb_is_gso(skb)) {
2307 struct skb_shared_info *shinfo = skb_shinfo(skb);
2308
2309
2310
2311
2312 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2313 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2314 shinfo->gso_type |= SKB_GSO_TCPV6;
2315 }
2316
2317
2318 skb_decrease_gso_size(shinfo, len_diff);
2319
2320 shinfo->gso_type |= SKB_GSO_DODGY;
2321 shinfo->gso_segs = 0;
2322 }
2323
2324 skb->protocol = htons(ETH_P_IPV6);
2325 skb_clear_hash(skb);
2326
2327 return 0;
2328}
2329
2330static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2331{
2332 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2333 u32 off = skb_mac_header_len(skb);
2334 int ret;
2335
2336
2337 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2338 return -ENOTSUPP;
2339
2340 ret = skb_unclone(skb, GFP_ATOMIC);
2341 if (unlikely(ret < 0))
2342 return ret;
2343
2344 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2345 if (unlikely(ret < 0))
2346 return ret;
2347
2348 if (skb_is_gso(skb)) {
2349 struct skb_shared_info *shinfo = skb_shinfo(skb);
2350
2351
2352
2353
2354 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2355 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2356 shinfo->gso_type |= SKB_GSO_TCPV4;
2357 }
2358
2359
2360 skb_increase_gso_size(shinfo, len_diff);
2361
2362 shinfo->gso_type |= SKB_GSO_DODGY;
2363 shinfo->gso_segs = 0;
2364 }
2365
2366 skb->protocol = htons(ETH_P_IP);
2367 skb_clear_hash(skb);
2368
2369 return 0;
2370}
2371
2372static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2373{
2374 __be16 from_proto = skb->protocol;
2375
2376 if (from_proto == htons(ETH_P_IP) &&
2377 to_proto == htons(ETH_P_IPV6))
2378 return bpf_skb_proto_4_to_6(skb);
2379
2380 if (from_proto == htons(ETH_P_IPV6) &&
2381 to_proto == htons(ETH_P_IP))
2382 return bpf_skb_proto_6_to_4(skb);
2383
2384 return -ENOTSUPP;
2385}
2386
2387BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2388 u64, flags)
2389{
2390 int ret;
2391
2392 if (unlikely(flags))
2393 return -EINVAL;
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412 ret = bpf_skb_proto_xlat(skb, proto);
2413 bpf_compute_data_pointers(skb);
2414 return ret;
2415}
2416
2417static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2418 .func = bpf_skb_change_proto,
2419 .gpl_only = false,
2420 .ret_type = RET_INTEGER,
2421 .arg1_type = ARG_PTR_TO_CTX,
2422 .arg2_type = ARG_ANYTHING,
2423 .arg3_type = ARG_ANYTHING,
2424};
2425
2426BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
2427{
2428
2429 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2430 !skb_pkt_type_ok(pkt_type)))
2431 return -EINVAL;
2432
2433 skb->pkt_type = pkt_type;
2434 return 0;
2435}
2436
2437static const struct bpf_func_proto bpf_skb_change_type_proto = {
2438 .func = bpf_skb_change_type,
2439 .gpl_only = false,
2440 .ret_type = RET_INTEGER,
2441 .arg1_type = ARG_PTR_TO_CTX,
2442 .arg2_type = ARG_ANYTHING,
2443};
2444
2445static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2446{
2447 switch (skb->protocol) {
2448 case htons(ETH_P_IP):
2449 return sizeof(struct iphdr);
2450 case htons(ETH_P_IPV6):
2451 return sizeof(struct ipv6hdr);
2452 default:
2453 return ~0U;
2454 }
2455}
2456
2457static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2458{
2459 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2460 int ret;
2461
2462
2463 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2464 return -ENOTSUPP;
2465
2466 ret = skb_cow(skb, len_diff);
2467 if (unlikely(ret < 0))
2468 return ret;
2469
2470 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2471 if (unlikely(ret < 0))
2472 return ret;
2473
2474 if (skb_is_gso(skb)) {
2475 struct skb_shared_info *shinfo = skb_shinfo(skb);
2476
2477
2478 skb_decrease_gso_size(shinfo, len_diff);
2479
2480 shinfo->gso_type |= SKB_GSO_DODGY;
2481 shinfo->gso_segs = 0;
2482 }
2483
2484 return 0;
2485}
2486
2487static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2488{
2489 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2490 int ret;
2491
2492
2493 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2494 return -ENOTSUPP;
2495
2496 ret = skb_unclone(skb, GFP_ATOMIC);
2497 if (unlikely(ret < 0))
2498 return ret;
2499
2500 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2501 if (unlikely(ret < 0))
2502 return ret;
2503
2504 if (skb_is_gso(skb)) {
2505 struct skb_shared_info *shinfo = skb_shinfo(skb);
2506
2507
2508 skb_increase_gso_size(shinfo, len_diff);
2509
2510 shinfo->gso_type |= SKB_GSO_DODGY;
2511 shinfo->gso_segs = 0;
2512 }
2513
2514 return 0;
2515}
2516
2517static u32 __bpf_skb_max_len(const struct sk_buff *skb)
2518{
2519 return skb->dev->mtu + skb->dev->hard_header_len;
2520}
2521
2522static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
2523{
2524 bool trans_same = skb->transport_header == skb->network_header;
2525 u32 len_cur, len_diff_abs = abs(len_diff);
2526 u32 len_min = bpf_skb_net_base_len(skb);
2527 u32 len_max = __bpf_skb_max_len(skb);
2528 __be16 proto = skb->protocol;
2529 bool shrink = len_diff < 0;
2530 int ret;
2531
2532 if (unlikely(len_diff_abs > 0xfffU))
2533 return -EFAULT;
2534 if (unlikely(proto != htons(ETH_P_IP) &&
2535 proto != htons(ETH_P_IPV6)))
2536 return -ENOTSUPP;
2537
2538 len_cur = skb->len - skb_network_offset(skb);
2539 if (skb_transport_header_was_set(skb) && !trans_same)
2540 len_cur = skb_network_header_len(skb);
2541 if ((shrink && (len_diff_abs >= len_cur ||
2542 len_cur - len_diff_abs < len_min)) ||
2543 (!shrink && (skb->len + len_diff_abs > len_max &&
2544 !skb_is_gso(skb))))
2545 return -ENOTSUPP;
2546
2547 ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) :
2548 bpf_skb_net_grow(skb, len_diff_abs);
2549
2550 bpf_compute_data_pointers(skb);
2551 return ret;
2552}
2553
2554BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
2555 u32, mode, u64, flags)
2556{
2557 if (unlikely(flags))
2558 return -EINVAL;
2559 if (likely(mode == BPF_ADJ_ROOM_NET))
2560 return bpf_skb_adjust_net(skb, len_diff);
2561
2562 return -ENOTSUPP;
2563}
2564
2565static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
2566 .func = bpf_skb_adjust_room,
2567 .gpl_only = false,
2568 .ret_type = RET_INTEGER,
2569 .arg1_type = ARG_PTR_TO_CTX,
2570 .arg2_type = ARG_ANYTHING,
2571 .arg3_type = ARG_ANYTHING,
2572 .arg4_type = ARG_ANYTHING,
2573};
2574
2575static u32 __bpf_skb_min_len(const struct sk_buff *skb)
2576{
2577 u32 min_len = skb_network_offset(skb);
2578
2579 if (skb_transport_header_was_set(skb))
2580 min_len = skb_transport_offset(skb);
2581 if (skb->ip_summed == CHECKSUM_PARTIAL)
2582 min_len = skb_checksum_start_offset(skb) +
2583 skb->csum_offset + sizeof(__sum16);
2584 return min_len;
2585}
2586
2587static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
2588{
2589 unsigned int old_len = skb->len;
2590 int ret;
2591
2592 ret = __skb_grow_rcsum(skb, new_len);
2593 if (!ret)
2594 memset(skb->data + old_len, 0, new_len - old_len);
2595 return ret;
2596}
2597
2598static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
2599{
2600 return __skb_trim_rcsum(skb, new_len);
2601}
2602
2603BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2604 u64, flags)
2605{
2606 u32 max_len = __bpf_skb_max_len(skb);
2607 u32 min_len = __bpf_skb_min_len(skb);
2608 int ret;
2609
2610 if (unlikely(flags || new_len > max_len || new_len < min_len))
2611 return -EINVAL;
2612 if (skb->encapsulation)
2613 return -ENOTSUPP;
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631 ret = __bpf_try_make_writable(skb, skb->len);
2632 if (!ret) {
2633 if (new_len > skb->len)
2634 ret = bpf_skb_grow_rcsum(skb, new_len);
2635 else if (new_len < skb->len)
2636 ret = bpf_skb_trim_rcsum(skb, new_len);
2637 if (!ret && skb_is_gso(skb))
2638 skb_gso_reset(skb);
2639 }
2640
2641 bpf_compute_data_pointers(skb);
2642 return ret;
2643}
2644
2645static const struct bpf_func_proto bpf_skb_change_tail_proto = {
2646 .func = bpf_skb_change_tail,
2647 .gpl_only = false,
2648 .ret_type = RET_INTEGER,
2649 .arg1_type = ARG_PTR_TO_CTX,
2650 .arg2_type = ARG_ANYTHING,
2651 .arg3_type = ARG_ANYTHING,
2652};
2653
2654BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
2655 u64, flags)
2656{
2657 u32 max_len = __bpf_skb_max_len(skb);
2658 u32 new_len = skb->len + head_room;
2659 int ret;
2660
2661 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
2662 new_len < skb->len))
2663 return -EINVAL;
2664
2665 ret = skb_cow(skb, head_room);
2666 if (likely(!ret)) {
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676 __skb_push(skb, head_room);
2677 memset(skb->data, 0, head_room);
2678 skb_reset_mac_header(skb);
2679 }
2680
2681 bpf_compute_data_pointers(skb);
2682 return 0;
2683}
2684
2685static const struct bpf_func_proto bpf_skb_change_head_proto = {
2686 .func = bpf_skb_change_head,
2687 .gpl_only = false,
2688 .ret_type = RET_INTEGER,
2689 .arg1_type = ARG_PTR_TO_CTX,
2690 .arg2_type = ARG_ANYTHING,
2691 .arg3_type = ARG_ANYTHING,
2692};
2693
2694static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
2695{
2696 return xdp_data_meta_unsupported(xdp) ? 0 :
2697 xdp->data - xdp->data_meta;
2698}
2699
2700BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
2701{
2702 unsigned long metalen = xdp_get_metalen(xdp);
2703 void *data_start = xdp->data_hard_start + metalen;
2704 void *data = xdp->data + offset;
2705
2706 if (unlikely(data < data_start ||
2707 data > xdp->data_end - ETH_HLEN))
2708 return -EINVAL;
2709
2710 if (metalen)
2711 memmove(xdp->data_meta + offset,
2712 xdp->data_meta, metalen);
2713 xdp->data_meta += offset;
2714 xdp->data = data;
2715
2716 return 0;
2717}
2718
2719static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
2720 .func = bpf_xdp_adjust_head,
2721 .gpl_only = false,
2722 .ret_type = RET_INTEGER,
2723 .arg1_type = ARG_PTR_TO_CTX,
2724 .arg2_type = ARG_ANYTHING,
2725};
2726
2727BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
2728{
2729 void *meta = xdp->data_meta + offset;
2730 unsigned long metalen = xdp->data - meta;
2731
2732 if (xdp_data_meta_unsupported(xdp))
2733 return -ENOTSUPP;
2734 if (unlikely(meta < xdp->data_hard_start ||
2735 meta > xdp->data))
2736 return -EINVAL;
2737 if (unlikely((metalen & (sizeof(__u32) - 1)) ||
2738 (metalen > 32)))
2739 return -EACCES;
2740
2741 xdp->data_meta = meta;
2742
2743 return 0;
2744}
2745
2746static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
2747 .func = bpf_xdp_adjust_meta,
2748 .gpl_only = false,
2749 .ret_type = RET_INTEGER,
2750 .arg1_type = ARG_PTR_TO_CTX,
2751 .arg2_type = ARG_ANYTHING,
2752};
2753
2754static int __bpf_tx_xdp(struct net_device *dev,
2755 struct bpf_map *map,
2756 struct xdp_buff *xdp,
2757 u32 index)
2758{
2759 int err;
2760
2761 if (!dev->netdev_ops->ndo_xdp_xmit) {
2762 return -EOPNOTSUPP;
2763 }
2764
2765 err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
2766 if (err)
2767 return err;
2768 dev->netdev_ops->ndo_xdp_flush(dev);
2769 return 0;
2770}
2771
2772static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
2773 struct bpf_map *map,
2774 struct xdp_buff *xdp,
2775 u32 index)
2776{
2777 int err;
2778
2779 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
2780 struct net_device *dev = fwd;
2781
2782 if (!dev->netdev_ops->ndo_xdp_xmit)
2783 return -EOPNOTSUPP;
2784
2785 err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
2786 if (err)
2787 return err;
2788 __dev_map_insert_ctx(map, index);
2789
2790 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
2791 struct bpf_cpu_map_entry *rcpu = fwd;
2792
2793 err = cpu_map_enqueue(rcpu, xdp, dev_rx);
2794 if (err)
2795 return err;
2796 __cpu_map_insert_ctx(map, index);
2797 }
2798 return 0;
2799}
2800
2801void xdp_do_flush_map(void)
2802{
2803 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2804 struct bpf_map *map = ri->map_to_flush;
2805
2806 ri->map_to_flush = NULL;
2807 if (map) {
2808 switch (map->map_type) {
2809 case BPF_MAP_TYPE_DEVMAP:
2810 __dev_map_flush(map);
2811 break;
2812 case BPF_MAP_TYPE_CPUMAP:
2813 __cpu_map_flush(map);
2814 break;
2815 default:
2816 break;
2817 }
2818 }
2819}
2820EXPORT_SYMBOL_GPL(xdp_do_flush_map);
2821
2822static void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
2823{
2824 switch (map->map_type) {
2825 case BPF_MAP_TYPE_DEVMAP:
2826 return __dev_map_lookup_elem(map, index);
2827 case BPF_MAP_TYPE_CPUMAP:
2828 return __cpu_map_lookup_elem(map, index);
2829 default:
2830 return NULL;
2831 }
2832}
2833
2834static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
2835 unsigned long aux)
2836{
2837 return (unsigned long)xdp_prog->aux != aux;
2838}
2839
2840static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
2841 struct bpf_prog *xdp_prog)
2842{
2843 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2844 unsigned long map_owner = ri->map_owner;
2845 struct bpf_map *map = ri->map;
2846 u32 index = ri->ifindex;
2847 void *fwd = NULL;
2848 int err;
2849
2850 ri->ifindex = 0;
2851 ri->map = NULL;
2852 ri->map_owner = 0;
2853
2854 if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
2855 err = -EFAULT;
2856 map = NULL;
2857 goto err;
2858 }
2859
2860 fwd = __xdp_map_lookup_elem(map, index);
2861 if (!fwd) {
2862 err = -EINVAL;
2863 goto err;
2864 }
2865 if (ri->map_to_flush && ri->map_to_flush != map)
2866 xdp_do_flush_map();
2867
2868 err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
2869 if (unlikely(err))
2870 goto err;
2871
2872 ri->map_to_flush = map;
2873 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
2874 return 0;
2875err:
2876 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
2877 return err;
2878}
2879
2880int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
2881 struct bpf_prog *xdp_prog)
2882{
2883 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2884 struct net_device *fwd;
2885 u32 index = ri->ifindex;
2886 int err;
2887
2888 if (ri->map)
2889 return xdp_do_redirect_map(dev, xdp, xdp_prog);
2890
2891 fwd = dev_get_by_index_rcu(dev_net(dev), index);
2892 ri->ifindex = 0;
2893 if (unlikely(!fwd)) {
2894 err = -EINVAL;
2895 goto err;
2896 }
2897
2898 err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
2899 if (unlikely(err))
2900 goto err;
2901
2902 _trace_xdp_redirect(dev, xdp_prog, index);
2903 return 0;
2904err:
2905 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
2906 return err;
2907}
2908EXPORT_SYMBOL_GPL(xdp_do_redirect);
2909
2910static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
2911{
2912 unsigned int len;
2913
2914 if (unlikely(!(fwd->flags & IFF_UP)))
2915 return -ENETDOWN;
2916
2917 len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
2918 if (skb->len > len)
2919 return -EMSGSIZE;
2920
2921 return 0;
2922}
2923
2924static int xdp_do_generic_redirect_map(struct net_device *dev,
2925 struct sk_buff *skb,
2926 struct bpf_prog *xdp_prog)
2927{
2928 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2929 unsigned long map_owner = ri->map_owner;
2930 struct bpf_map *map = ri->map;
2931 struct net_device *fwd = NULL;
2932 u32 index = ri->ifindex;
2933 int err = 0;
2934
2935 ri->ifindex = 0;
2936 ri->map = NULL;
2937 ri->map_owner = 0;
2938
2939 if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
2940 err = -EFAULT;
2941 map = NULL;
2942 goto err;
2943 }
2944 fwd = __xdp_map_lookup_elem(map, index);
2945 if (unlikely(!fwd)) {
2946 err = -EINVAL;
2947 goto err;
2948 }
2949
2950 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
2951 if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
2952 goto err;
2953 skb->dev = fwd;
2954 } else {
2955
2956 err = -EBADRQC;
2957 goto err;
2958 }
2959
2960 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
2961 return 0;
2962err:
2963 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
2964 return err;
2965}
2966
2967int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
2968 struct bpf_prog *xdp_prog)
2969{
2970 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2971 u32 index = ri->ifindex;
2972 struct net_device *fwd;
2973 int err = 0;
2974
2975 if (ri->map)
2976 return xdp_do_generic_redirect_map(dev, skb, xdp_prog);
2977
2978 ri->ifindex = 0;
2979 fwd = dev_get_by_index_rcu(dev_net(dev), index);
2980 if (unlikely(!fwd)) {
2981 err = -EINVAL;
2982 goto err;
2983 }
2984
2985 if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
2986 goto err;
2987
2988 skb->dev = fwd;
2989 _trace_xdp_redirect(dev, xdp_prog, index);
2990 return 0;
2991err:
2992 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
2993 return err;
2994}
2995EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
2996
2997BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
2998{
2999 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3000
3001 if (unlikely(flags))
3002 return XDP_ABORTED;
3003
3004 ri->ifindex = ifindex;
3005 ri->flags = flags;
3006 ri->map = NULL;
3007 ri->map_owner = 0;
3008
3009 return XDP_REDIRECT;
3010}
3011
3012static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3013 .func = bpf_xdp_redirect,
3014 .gpl_only = false,
3015 .ret_type = RET_INTEGER,
3016 .arg1_type = ARG_ANYTHING,
3017 .arg2_type = ARG_ANYTHING,
3018};
3019
3020BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
3021 unsigned long, map_owner)
3022{
3023 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3024
3025 if (unlikely(flags))
3026 return XDP_ABORTED;
3027
3028 ri->ifindex = ifindex;
3029 ri->flags = flags;
3030 ri->map = map;
3031 ri->map_owner = map_owner;
3032
3033 return XDP_REDIRECT;
3034}
3035
3036
3037
3038
3039static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3040 .func = bpf_xdp_redirect_map,
3041 .gpl_only = false,
3042 .ret_type = RET_INTEGER,
3043 .arg1_type = ARG_CONST_MAP_PTR,
3044 .arg2_type = ARG_ANYTHING,
3045 .arg3_type = ARG_ANYTHING,
3046};
3047
3048bool bpf_helper_changes_pkt_data(void *func)
3049{
3050 if (func == bpf_skb_vlan_push ||
3051 func == bpf_skb_vlan_pop ||
3052 func == bpf_skb_store_bytes ||
3053 func == bpf_skb_change_proto ||
3054 func == bpf_skb_change_head ||
3055 func == bpf_skb_change_tail ||
3056 func == bpf_skb_adjust_room ||
3057 func == bpf_skb_pull_data ||
3058 func == bpf_clone_redirect ||
3059 func == bpf_l3_csum_replace ||
3060 func == bpf_l4_csum_replace ||
3061 func == bpf_xdp_adjust_head ||
3062 func == bpf_xdp_adjust_meta ||
3063 func == bpf_msg_pull_data)
3064 return true;
3065
3066 return false;
3067}
3068
3069static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
3070 unsigned long off, unsigned long len)
3071{
3072 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
3073
3074 if (unlikely(!ptr))
3075 return len;
3076 if (ptr != dst_buff)
3077 memcpy(dst_buff, ptr, len);
3078
3079 return 0;
3080}
3081
3082BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3083 u64, flags, void *, meta, u64, meta_size)
3084{
3085 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
3086
3087 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3088 return -EINVAL;
3089 if (unlikely(skb_size > skb->len))
3090 return -EFAULT;
3091
3092 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3093 bpf_skb_copy);
3094}
3095
3096static const struct bpf_func_proto bpf_skb_event_output_proto = {
3097 .func = bpf_skb_event_output,
3098 .gpl_only = true,
3099 .ret_type = RET_INTEGER,
3100 .arg1_type = ARG_PTR_TO_CTX,
3101 .arg2_type = ARG_CONST_MAP_PTR,
3102 .arg3_type = ARG_ANYTHING,
3103 .arg4_type = ARG_PTR_TO_MEM,
3104 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
3105};
3106
3107static unsigned short bpf_tunnel_key_af(u64 flags)
3108{
3109 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3110}
3111
3112BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3113 u32, size, u64, flags)
3114{
3115 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3116 u8 compat[sizeof(struct bpf_tunnel_key)];
3117 void *to_orig = to;
3118 int err;
3119
3120 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3121 err = -EINVAL;
3122 goto err_clear;
3123 }
3124 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3125 err = -EPROTO;
3126 goto err_clear;
3127 }
3128 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3129 err = -EINVAL;
3130 switch (size) {
3131 case offsetof(struct bpf_tunnel_key, tunnel_label):
3132 case offsetof(struct bpf_tunnel_key, tunnel_ext):
3133 goto set_compat;
3134 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3135
3136
3137
3138 if (ip_tunnel_info_af(info) != AF_INET)
3139 goto err_clear;
3140set_compat:
3141 to = (struct bpf_tunnel_key *)compat;
3142 break;
3143 default:
3144 goto err_clear;
3145 }
3146 }
3147
3148 to->tunnel_id = be64_to_cpu(info->key.tun_id);
3149 to->tunnel_tos = info->key.tos;
3150 to->tunnel_ttl = info->key.ttl;
3151
3152 if (flags & BPF_F_TUNINFO_IPV6) {
3153 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3154 sizeof(to->remote_ipv6));
3155 to->tunnel_label = be32_to_cpu(info->key.label);
3156 } else {
3157 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
3158 }
3159
3160 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
3161 memcpy(to_orig, to, size);
3162
3163 return 0;
3164err_clear:
3165 memset(to_orig, 0, size);
3166 return err;
3167}
3168
3169static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
3170 .func = bpf_skb_get_tunnel_key,
3171 .gpl_only = false,
3172 .ret_type = RET_INTEGER,
3173 .arg1_type = ARG_PTR_TO_CTX,
3174 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3175 .arg3_type = ARG_CONST_SIZE,
3176 .arg4_type = ARG_ANYTHING,
3177};
3178
3179BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
3180{
3181 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3182 int err;
3183
3184 if (unlikely(!info ||
3185 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3186 err = -ENOENT;
3187 goto err_clear;
3188 }
3189 if (unlikely(size < info->options_len)) {
3190 err = -ENOMEM;
3191 goto err_clear;
3192 }
3193
3194 ip_tunnel_info_opts_get(to, info);
3195 if (size > info->options_len)
3196 memset(to + info->options_len, 0, size - info->options_len);
3197
3198 return info->options_len;
3199err_clear:
3200 memset(to, 0, size);
3201 return err;
3202}
3203
3204static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3205 .func = bpf_skb_get_tunnel_opt,
3206 .gpl_only = false,
3207 .ret_type = RET_INTEGER,
3208 .arg1_type = ARG_PTR_TO_CTX,
3209 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3210 .arg3_type = ARG_CONST_SIZE,
3211};
3212
3213static struct metadata_dst __percpu *md_dst;
3214
3215BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3216 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
3217{
3218 struct metadata_dst *md = this_cpu_ptr(md_dst);
3219 u8 compat[sizeof(struct bpf_tunnel_key)];
3220 struct ip_tunnel_info *info;
3221
3222 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
3223 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
3224 return -EINVAL;
3225 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3226 switch (size) {
3227 case offsetof(struct bpf_tunnel_key, tunnel_label):
3228 case offsetof(struct bpf_tunnel_key, tunnel_ext):
3229 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3230
3231
3232
3233 memcpy(compat, from, size);
3234 memset(compat + size, 0, sizeof(compat) - size);
3235 from = (const struct bpf_tunnel_key *) compat;
3236 break;
3237 default:
3238 return -EINVAL;
3239 }
3240 }
3241 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3242 from->tunnel_ext))
3243 return -EINVAL;
3244
3245 skb_dst_drop(skb);
3246 dst_hold((struct dst_entry *) md);
3247 skb_dst_set(skb, (struct dst_entry *) md);
3248
3249 info = &md->u.tun_info;
3250 memset(info, 0, sizeof(*info));
3251 info->mode = IP_TUNNEL_INFO_TX;
3252
3253 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
3254 if (flags & BPF_F_DONT_FRAGMENT)
3255 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
3256 if (flags & BPF_F_ZERO_CSUM_TX)
3257 info->key.tun_flags &= ~TUNNEL_CSUM;
3258 if (flags & BPF_F_SEQ_NUMBER)
3259 info->key.tun_flags |= TUNNEL_SEQ;
3260
3261 info->key.tun_id = cpu_to_be64(from->tunnel_id);
3262 info->key.tos = from->tunnel_tos;
3263 info->key.ttl = from->tunnel_ttl;
3264
3265 if (flags & BPF_F_TUNINFO_IPV6) {
3266 info->mode |= IP_TUNNEL_INFO_IPV6;
3267 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3268 sizeof(from->remote_ipv6));
3269 info->key.label = cpu_to_be32(from->tunnel_label) &
3270 IPV6_FLOWLABEL_MASK;
3271 } else {
3272 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
3273 }
3274
3275 return 0;
3276}
3277
3278static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
3279 .func = bpf_skb_set_tunnel_key,
3280 .gpl_only = false,
3281 .ret_type = RET_INTEGER,
3282 .arg1_type = ARG_PTR_TO_CTX,
3283 .arg2_type = ARG_PTR_TO_MEM,
3284 .arg3_type = ARG_CONST_SIZE,
3285 .arg4_type = ARG_ANYTHING,
3286};
3287
3288BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
3289 const u8 *, from, u32, size)
3290{
3291 struct ip_tunnel_info *info = skb_tunnel_info(skb);
3292 const struct metadata_dst *md = this_cpu_ptr(md_dst);
3293
3294 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
3295 return -EINVAL;
3296 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
3297 return -ENOMEM;
3298
3299 ip_tunnel_info_opts_set(info, from, size);
3300
3301 return 0;
3302}
3303
3304static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
3305 .func = bpf_skb_set_tunnel_opt,
3306 .gpl_only = false,
3307 .ret_type = RET_INTEGER,
3308 .arg1_type = ARG_PTR_TO_CTX,
3309 .arg2_type = ARG_PTR_TO_MEM,
3310 .arg3_type = ARG_CONST_SIZE,
3311};
3312
3313static const struct bpf_func_proto *
3314bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
3315{
3316 if (!md_dst) {
3317 struct metadata_dst __percpu *tmp;
3318
3319 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
3320 METADATA_IP_TUNNEL,
3321 GFP_KERNEL);
3322 if (!tmp)
3323 return NULL;
3324 if (cmpxchg(&md_dst, NULL, tmp))
3325 metadata_dst_free_percpu(tmp);
3326 }
3327
3328 switch (which) {
3329 case BPF_FUNC_skb_set_tunnel_key:
3330 return &bpf_skb_set_tunnel_key_proto;
3331 case BPF_FUNC_skb_set_tunnel_opt:
3332 return &bpf_skb_set_tunnel_opt_proto;
3333 default:
3334 return NULL;
3335 }
3336}
3337
3338BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
3339 u32, idx)
3340{
3341 struct bpf_array *array = container_of(map, struct bpf_array, map);
3342 struct cgroup *cgrp;
3343 struct sock *sk;
3344
3345 sk = skb_to_full_sk(skb);
3346 if (!sk || !sk_fullsock(sk))
3347 return -ENOENT;
3348 if (unlikely(idx >= array->map.max_entries))
3349 return -E2BIG;
3350
3351 cgrp = READ_ONCE(array->ptrs[idx]);
3352 if (unlikely(!cgrp))
3353 return -EAGAIN;
3354
3355 return sk_under_cgroup_hierarchy(sk, cgrp);
3356}
3357
3358static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
3359 .func = bpf_skb_under_cgroup,
3360 .gpl_only = false,
3361 .ret_type = RET_INTEGER,
3362 .arg1_type = ARG_PTR_TO_CTX,
3363 .arg2_type = ARG_CONST_MAP_PTR,
3364 .arg3_type = ARG_ANYTHING,
3365};
3366
3367static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
3368 unsigned long off, unsigned long len)
3369{
3370 memcpy(dst_buff, src_buff + off, len);
3371 return 0;
3372}
3373
3374BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
3375 u64, flags, void *, meta, u64, meta_size)
3376{
3377 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
3378
3379 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3380 return -EINVAL;
3381 if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
3382 return -EFAULT;
3383
3384 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
3385 xdp_size, bpf_xdp_copy);
3386}
3387
3388static const struct bpf_func_proto bpf_xdp_event_output_proto = {
3389 .func = bpf_xdp_event_output,
3390 .gpl_only = true,
3391 .ret_type = RET_INTEGER,
3392 .arg1_type = ARG_PTR_TO_CTX,
3393 .arg2_type = ARG_CONST_MAP_PTR,
3394 .arg3_type = ARG_ANYTHING,
3395 .arg4_type = ARG_PTR_TO_MEM,
3396 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
3397};
3398
3399BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
3400{
3401 return skb->sk ? sock_gen_cookie(skb->sk) : 0;
3402}
3403
3404static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
3405 .func = bpf_get_socket_cookie,
3406 .gpl_only = false,
3407 .ret_type = RET_INTEGER,
3408 .arg1_type = ARG_PTR_TO_CTX,
3409};
3410
3411BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
3412{
3413 struct sock *sk = sk_to_full_sk(skb->sk);
3414 kuid_t kuid;
3415
3416 if (!sk || !sk_fullsock(sk))
3417 return overflowuid;
3418 kuid = sock_net_uid(sock_net(sk), sk);
3419 return from_kuid_munged(sock_net(sk)->user_ns, kuid);
3420}
3421
3422static const struct bpf_func_proto bpf_get_socket_uid_proto = {
3423 .func = bpf_get_socket_uid,
3424 .gpl_only = false,
3425 .ret_type = RET_INTEGER,
3426 .arg1_type = ARG_PTR_TO_CTX,
3427};
3428
3429BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3430 int, level, int, optname, char *, optval, int, optlen)
3431{
3432 struct sock *sk = bpf_sock->sk;
3433 int ret = 0;
3434 int val;
3435
3436 if (!sk_fullsock(sk))
3437 return -EINVAL;
3438
3439 if (level == SOL_SOCKET) {
3440 if (optlen != sizeof(int))
3441 return -EINVAL;
3442 val = *((int *)optval);
3443
3444
3445 switch (optname) {
3446 case SO_RCVBUF:
3447 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
3448 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
3449 break;
3450 case SO_SNDBUF:
3451 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
3452 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
3453 break;
3454 case SO_MAX_PACING_RATE:
3455 sk->sk_max_pacing_rate = val;
3456 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
3457 sk->sk_max_pacing_rate);
3458 break;
3459 case SO_PRIORITY:
3460 sk->sk_priority = val;
3461 break;
3462 case SO_RCVLOWAT:
3463 if (val < 0)
3464 val = INT_MAX;
3465 sk->sk_rcvlowat = val ? : 1;
3466 break;
3467 case SO_MARK:
3468 sk->sk_mark = val;
3469 break;
3470 default:
3471 ret = -EINVAL;
3472 }
3473#ifdef CONFIG_INET
3474 } else if (level == SOL_IP) {
3475 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3476 return -EINVAL;
3477
3478 val = *((int *)optval);
3479
3480 switch (optname) {
3481 case IP_TOS:
3482 if (val < -1 || val > 0xff) {
3483 ret = -EINVAL;
3484 } else {
3485 struct inet_sock *inet = inet_sk(sk);
3486
3487 if (val == -1)
3488 val = 0;
3489 inet->tos = val;
3490 }
3491 break;
3492 default:
3493 ret = -EINVAL;
3494 }
3495#if IS_ENABLED(CONFIG_IPV6)
3496 } else if (level == SOL_IPV6) {
3497 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3498 return -EINVAL;
3499
3500 val = *((int *)optval);
3501
3502 switch (optname) {
3503 case IPV6_TCLASS:
3504 if (val < -1 || val > 0xff) {
3505 ret = -EINVAL;
3506 } else {
3507 struct ipv6_pinfo *np = inet6_sk(sk);
3508
3509 if (val == -1)
3510 val = 0;
3511 np->tclass = val;
3512 }
3513 break;
3514 default:
3515 ret = -EINVAL;
3516 }
3517#endif
3518 } else if (level == SOL_TCP &&
3519 sk->sk_prot->setsockopt == tcp_setsockopt) {
3520 if (optname == TCP_CONGESTION) {
3521 char name[TCP_CA_NAME_MAX];
3522 bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
3523
3524 strncpy(name, optval, min_t(long, optlen,
3525 TCP_CA_NAME_MAX-1));
3526 name[TCP_CA_NAME_MAX-1] = 0;
3527 ret = tcp_set_congestion_control(sk, name, false,
3528 reinit);
3529 } else {
3530 struct tcp_sock *tp = tcp_sk(sk);
3531
3532 if (optlen != sizeof(int))
3533 return -EINVAL;
3534
3535 val = *((int *)optval);
3536
3537 switch (optname) {
3538 case TCP_BPF_IW:
3539 if (val <= 0 || tp->data_segs_out > 0)
3540 ret = -EINVAL;
3541 else
3542 tp->snd_cwnd = val;
3543 break;
3544 case TCP_BPF_SNDCWND_CLAMP:
3545 if (val <= 0) {
3546 ret = -EINVAL;
3547 } else {
3548 tp->snd_cwnd_clamp = val;
3549 tp->snd_ssthresh = val;
3550 }
3551 break;
3552 default:
3553 ret = -EINVAL;
3554 }
3555 }
3556#endif
3557 } else {
3558 ret = -EINVAL;
3559 }
3560 return ret;
3561}
3562
3563static const struct bpf_func_proto bpf_setsockopt_proto = {
3564 .func = bpf_setsockopt,
3565 .gpl_only = false,
3566 .ret_type = RET_INTEGER,
3567 .arg1_type = ARG_PTR_TO_CTX,
3568 .arg2_type = ARG_ANYTHING,
3569 .arg3_type = ARG_ANYTHING,
3570 .arg4_type = ARG_PTR_TO_MEM,
3571 .arg5_type = ARG_CONST_SIZE,
3572};
3573
3574BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3575 int, level, int, optname, char *, optval, int, optlen)
3576{
3577 struct sock *sk = bpf_sock->sk;
3578
3579 if (!sk_fullsock(sk))
3580 goto err_clear;
3581
3582#ifdef CONFIG_INET
3583 if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
3584 if (optname == TCP_CONGESTION) {
3585 struct inet_connection_sock *icsk = inet_csk(sk);
3586
3587 if (!icsk->icsk_ca_ops || optlen <= 1)
3588 goto err_clear;
3589 strncpy(optval, icsk->icsk_ca_ops->name, optlen);
3590 optval[optlen - 1] = 0;
3591 } else {
3592 goto err_clear;
3593 }
3594 } else if (level == SOL_IP) {
3595 struct inet_sock *inet = inet_sk(sk);
3596
3597 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3598 goto err_clear;
3599
3600
3601 switch (optname) {
3602 case IP_TOS:
3603 *((int *)optval) = (int)inet->tos;
3604 break;
3605 default:
3606 goto err_clear;
3607 }
3608#if IS_ENABLED(CONFIG_IPV6)
3609 } else if (level == SOL_IPV6) {
3610 struct ipv6_pinfo *np = inet6_sk(sk);
3611
3612 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3613 goto err_clear;
3614
3615
3616 switch (optname) {
3617 case IPV6_TCLASS:
3618 *((int *)optval) = (int)np->tclass;
3619 break;
3620 default:
3621 goto err_clear;
3622 }
3623#endif
3624 } else {
3625 goto err_clear;
3626 }
3627 return 0;
3628#endif
3629err_clear:
3630 memset(optval, 0, optlen);
3631 return -EINVAL;
3632}
3633
3634static const struct bpf_func_proto bpf_getsockopt_proto = {
3635 .func = bpf_getsockopt,
3636 .gpl_only = false,
3637 .ret_type = RET_INTEGER,
3638 .arg1_type = ARG_PTR_TO_CTX,
3639 .arg2_type = ARG_ANYTHING,
3640 .arg3_type = ARG_ANYTHING,
3641 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
3642 .arg5_type = ARG_CONST_SIZE,
3643};
3644
3645BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
3646 int, argval)
3647{
3648 struct sock *sk = bpf_sock->sk;
3649 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
3650
3651 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
3652 return -EINVAL;
3653
3654 if (val)
3655 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
3656
3657 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
3658}
3659
3660static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
3661 .func = bpf_sock_ops_cb_flags_set,
3662 .gpl_only = false,
3663 .ret_type = RET_INTEGER,
3664 .arg1_type = ARG_PTR_TO_CTX,
3665 .arg2_type = ARG_ANYTHING,
3666};
3667
3668const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
3669EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
3670
3671BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
3672 int, addr_len)
3673{
3674#ifdef CONFIG_INET
3675 struct sock *sk = ctx->sk;
3676 int err;
3677
3678
3679
3680
3681 err = -EINVAL;
3682 if (addr->sa_family == AF_INET) {
3683 if (addr_len < sizeof(struct sockaddr_in))
3684 return err;
3685 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3686 return err;
3687 return __inet_bind(sk, addr, addr_len, true, false);
3688#if IS_ENABLED(CONFIG_IPV6)
3689 } else if (addr->sa_family == AF_INET6) {
3690 if (addr_len < SIN6_LEN_RFC2133)
3691 return err;
3692 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3693 return err;
3694
3695
3696
3697 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
3698#endif
3699 }
3700#endif
3701
3702 return -EAFNOSUPPORT;
3703}
3704
3705static const struct bpf_func_proto bpf_bind_proto = {
3706 .func = bpf_bind,
3707 .gpl_only = false,
3708 .ret_type = RET_INTEGER,
3709 .arg1_type = ARG_PTR_TO_CTX,
3710 .arg2_type = ARG_PTR_TO_MEM,
3711 .arg3_type = ARG_CONST_SIZE,
3712};
3713
3714static const struct bpf_func_proto *
3715bpf_base_func_proto(enum bpf_func_id func_id)
3716{
3717 switch (func_id) {
3718 case BPF_FUNC_map_lookup_elem:
3719 return &bpf_map_lookup_elem_proto;
3720 case BPF_FUNC_map_update_elem:
3721 return &bpf_map_update_elem_proto;
3722 case BPF_FUNC_map_delete_elem:
3723 return &bpf_map_delete_elem_proto;
3724 case BPF_FUNC_get_prandom_u32:
3725 return &bpf_get_prandom_u32_proto;
3726 case BPF_FUNC_get_smp_processor_id:
3727 return &bpf_get_raw_smp_processor_id_proto;
3728 case BPF_FUNC_get_numa_node_id:
3729 return &bpf_get_numa_node_id_proto;
3730 case BPF_FUNC_tail_call:
3731 return &bpf_tail_call_proto;
3732 case BPF_FUNC_ktime_get_ns:
3733 return &bpf_ktime_get_ns_proto;
3734 case BPF_FUNC_trace_printk:
3735 if (capable(CAP_SYS_ADMIN))
3736 return bpf_get_trace_printk_proto();
3737 default:
3738 return NULL;
3739 }
3740}
3741
3742static const struct bpf_func_proto *
3743sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3744{
3745 switch (func_id) {
3746
3747
3748
3749 case BPF_FUNC_get_current_uid_gid:
3750 return &bpf_get_current_uid_gid_proto;
3751 default:
3752 return bpf_base_func_proto(func_id);
3753 }
3754}
3755
3756static const struct bpf_func_proto *
3757sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3758{
3759 switch (func_id) {
3760
3761
3762
3763 case BPF_FUNC_get_current_uid_gid:
3764 return &bpf_get_current_uid_gid_proto;
3765 case BPF_FUNC_bind:
3766 switch (prog->expected_attach_type) {
3767 case BPF_CGROUP_INET4_CONNECT:
3768 case BPF_CGROUP_INET6_CONNECT:
3769 return &bpf_bind_proto;
3770 default:
3771 return NULL;
3772 }
3773 default:
3774 return bpf_base_func_proto(func_id);
3775 }
3776}
3777
3778static const struct bpf_func_proto *
3779sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3780{
3781 switch (func_id) {
3782 case BPF_FUNC_skb_load_bytes:
3783 return &bpf_skb_load_bytes_proto;
3784 case BPF_FUNC_get_socket_cookie:
3785 return &bpf_get_socket_cookie_proto;
3786 case BPF_FUNC_get_socket_uid:
3787 return &bpf_get_socket_uid_proto;
3788 default:
3789 return bpf_base_func_proto(func_id);
3790 }
3791}
3792
3793static const struct bpf_func_proto *
3794tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3795{
3796 switch (func_id) {
3797 case BPF_FUNC_skb_store_bytes:
3798 return &bpf_skb_store_bytes_proto;
3799 case BPF_FUNC_skb_load_bytes:
3800 return &bpf_skb_load_bytes_proto;
3801 case BPF_FUNC_skb_pull_data:
3802 return &bpf_skb_pull_data_proto;
3803 case BPF_FUNC_csum_diff:
3804 return &bpf_csum_diff_proto;
3805 case BPF_FUNC_csum_update:
3806 return &bpf_csum_update_proto;
3807 case BPF_FUNC_l3_csum_replace:
3808 return &bpf_l3_csum_replace_proto;
3809 case BPF_FUNC_l4_csum_replace:
3810 return &bpf_l4_csum_replace_proto;
3811 case BPF_FUNC_clone_redirect:
3812 return &bpf_clone_redirect_proto;
3813 case BPF_FUNC_get_cgroup_classid:
3814 return &bpf_get_cgroup_classid_proto;
3815 case BPF_FUNC_skb_vlan_push:
3816 return &bpf_skb_vlan_push_proto;
3817 case BPF_FUNC_skb_vlan_pop:
3818 return &bpf_skb_vlan_pop_proto;
3819 case BPF_FUNC_skb_change_proto:
3820 return &bpf_skb_change_proto_proto;
3821 case BPF_FUNC_skb_change_type:
3822 return &bpf_skb_change_type_proto;
3823 case BPF_FUNC_skb_adjust_room:
3824 return &bpf_skb_adjust_room_proto;
3825 case BPF_FUNC_skb_change_tail:
3826 return &bpf_skb_change_tail_proto;
3827 case BPF_FUNC_skb_get_tunnel_key:
3828 return &bpf_skb_get_tunnel_key_proto;
3829 case BPF_FUNC_skb_set_tunnel_key:
3830 return bpf_get_skb_set_tunnel_proto(func_id);
3831 case BPF_FUNC_skb_get_tunnel_opt:
3832 return &bpf_skb_get_tunnel_opt_proto;
3833 case BPF_FUNC_skb_set_tunnel_opt:
3834 return bpf_get_skb_set_tunnel_proto(func_id);
3835 case BPF_FUNC_redirect:
3836 return &bpf_redirect_proto;
3837 case BPF_FUNC_get_route_realm:
3838 return &bpf_get_route_realm_proto;
3839 case BPF_FUNC_get_hash_recalc:
3840 return &bpf_get_hash_recalc_proto;
3841 case BPF_FUNC_set_hash_invalid:
3842 return &bpf_set_hash_invalid_proto;
3843 case BPF_FUNC_set_hash:
3844 return &bpf_set_hash_proto;
3845 case BPF_FUNC_perf_event_output:
3846 return &bpf_skb_event_output_proto;
3847 case BPF_FUNC_get_smp_processor_id:
3848 return &bpf_get_smp_processor_id_proto;
3849 case BPF_FUNC_skb_under_cgroup:
3850 return &bpf_skb_under_cgroup_proto;
3851 case BPF_FUNC_get_socket_cookie:
3852 return &bpf_get_socket_cookie_proto;
3853 case BPF_FUNC_get_socket_uid:
3854 return &bpf_get_socket_uid_proto;
3855 default:
3856 return bpf_base_func_proto(func_id);
3857 }
3858}
3859
3860static const struct bpf_func_proto *
3861xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3862{
3863 switch (func_id) {
3864 case BPF_FUNC_perf_event_output:
3865 return &bpf_xdp_event_output_proto;
3866 case BPF_FUNC_get_smp_processor_id:
3867 return &bpf_get_smp_processor_id_proto;
3868 case BPF_FUNC_csum_diff:
3869 return &bpf_csum_diff_proto;
3870 case BPF_FUNC_xdp_adjust_head:
3871 return &bpf_xdp_adjust_head_proto;
3872 case BPF_FUNC_xdp_adjust_meta:
3873 return &bpf_xdp_adjust_meta_proto;
3874 case BPF_FUNC_redirect:
3875 return &bpf_xdp_redirect_proto;
3876 case BPF_FUNC_redirect_map:
3877 return &bpf_xdp_redirect_map_proto;
3878 default:
3879 return bpf_base_func_proto(func_id);
3880 }
3881}
3882
3883static const struct bpf_func_proto *
3884lwt_inout_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3885{
3886 switch (func_id) {
3887 case BPF_FUNC_skb_load_bytes:
3888 return &bpf_skb_load_bytes_proto;
3889 case BPF_FUNC_skb_pull_data:
3890 return &bpf_skb_pull_data_proto;
3891 case BPF_FUNC_csum_diff:
3892 return &bpf_csum_diff_proto;
3893 case BPF_FUNC_get_cgroup_classid:
3894 return &bpf_get_cgroup_classid_proto;
3895 case BPF_FUNC_get_route_realm:
3896 return &bpf_get_route_realm_proto;
3897 case BPF_FUNC_get_hash_recalc:
3898 return &bpf_get_hash_recalc_proto;
3899 case BPF_FUNC_perf_event_output:
3900 return &bpf_skb_event_output_proto;
3901 case BPF_FUNC_get_smp_processor_id:
3902 return &bpf_get_smp_processor_id_proto;
3903 case BPF_FUNC_skb_under_cgroup:
3904 return &bpf_skb_under_cgroup_proto;
3905 default:
3906 return bpf_base_func_proto(func_id);
3907 }
3908}
3909
3910static const struct bpf_func_proto *
3911sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3912{
3913 switch (func_id) {
3914 case BPF_FUNC_setsockopt:
3915 return &bpf_setsockopt_proto;
3916 case BPF_FUNC_getsockopt:
3917 return &bpf_getsockopt_proto;
3918 case BPF_FUNC_sock_ops_cb_flags_set:
3919 return &bpf_sock_ops_cb_flags_set_proto;
3920 case BPF_FUNC_sock_map_update:
3921 return &bpf_sock_map_update_proto;
3922 default:
3923 return bpf_base_func_proto(func_id);
3924 }
3925}
3926
3927static const struct bpf_func_proto *
3928sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3929{
3930 switch (func_id) {
3931 case BPF_FUNC_msg_redirect_map:
3932 return &bpf_msg_redirect_map_proto;
3933 case BPF_FUNC_msg_apply_bytes:
3934 return &bpf_msg_apply_bytes_proto;
3935 case BPF_FUNC_msg_cork_bytes:
3936 return &bpf_msg_cork_bytes_proto;
3937 case BPF_FUNC_msg_pull_data:
3938 return &bpf_msg_pull_data_proto;
3939 default:
3940 return bpf_base_func_proto(func_id);
3941 }
3942}
3943
3944static const struct bpf_func_proto *
3945sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3946{
3947 switch (func_id) {
3948 case BPF_FUNC_skb_store_bytes:
3949 return &bpf_skb_store_bytes_proto;
3950 case BPF_FUNC_skb_load_bytes:
3951 return &bpf_skb_load_bytes_proto;
3952 case BPF_FUNC_skb_pull_data:
3953 return &bpf_skb_pull_data_proto;
3954 case BPF_FUNC_skb_change_tail:
3955 return &bpf_skb_change_tail_proto;
3956 case BPF_FUNC_skb_change_head:
3957 return &bpf_skb_change_head_proto;
3958 case BPF_FUNC_get_socket_cookie:
3959 return &bpf_get_socket_cookie_proto;
3960 case BPF_FUNC_get_socket_uid:
3961 return &bpf_get_socket_uid_proto;
3962 case BPF_FUNC_sk_redirect_map:
3963 return &bpf_sk_redirect_map_proto;
3964 default:
3965 return bpf_base_func_proto(func_id);
3966 }
3967}
3968
3969static const struct bpf_func_proto *
3970lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3971{
3972 switch (func_id) {
3973 case BPF_FUNC_skb_get_tunnel_key:
3974 return &bpf_skb_get_tunnel_key_proto;
3975 case BPF_FUNC_skb_set_tunnel_key:
3976 return bpf_get_skb_set_tunnel_proto(func_id);
3977 case BPF_FUNC_skb_get_tunnel_opt:
3978 return &bpf_skb_get_tunnel_opt_proto;
3979 case BPF_FUNC_skb_set_tunnel_opt:
3980 return bpf_get_skb_set_tunnel_proto(func_id);
3981 case BPF_FUNC_redirect:
3982 return &bpf_redirect_proto;
3983 case BPF_FUNC_clone_redirect:
3984 return &bpf_clone_redirect_proto;
3985 case BPF_FUNC_skb_change_tail:
3986 return &bpf_skb_change_tail_proto;
3987 case BPF_FUNC_skb_change_head:
3988 return &bpf_skb_change_head_proto;
3989 case BPF_FUNC_skb_store_bytes:
3990 return &bpf_skb_store_bytes_proto;
3991 case BPF_FUNC_csum_update:
3992 return &bpf_csum_update_proto;
3993 case BPF_FUNC_l3_csum_replace:
3994 return &bpf_l3_csum_replace_proto;
3995 case BPF_FUNC_l4_csum_replace:
3996 return &bpf_l4_csum_replace_proto;
3997 case BPF_FUNC_set_hash_invalid:
3998 return &bpf_set_hash_invalid_proto;
3999 default:
4000 return lwt_inout_func_proto(func_id, prog);
4001 }
4002}
4003
4004static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
4005 const struct bpf_prog *prog,
4006 struct bpf_insn_access_aux *info)
4007{
4008 const int size_default = sizeof(__u32);
4009
4010 if (off < 0 || off >= sizeof(struct __sk_buff))
4011 return false;
4012
4013
4014 if (off % size != 0)
4015 return false;
4016
4017 switch (off) {
4018 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
4019 if (off + size > offsetofend(struct __sk_buff, cb[4]))
4020 return false;
4021 break;
4022 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
4023 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
4024 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
4025 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
4026 case bpf_ctx_range(struct __sk_buff, data):
4027 case bpf_ctx_range(struct __sk_buff, data_meta):
4028 case bpf_ctx_range(struct __sk_buff, data_end):
4029 if (size != size_default)
4030 return false;
4031 break;
4032 default:
4033
4034 if (type == BPF_WRITE) {
4035 if (size != size_default)
4036 return false;
4037 } else {
4038 bpf_ctx_record_field_size(info, size_default);
4039 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
4040 return false;
4041 }
4042 }
4043
4044 return true;
4045}
4046
4047static bool sk_filter_is_valid_access(int off, int size,
4048 enum bpf_access_type type,
4049 const struct bpf_prog *prog,
4050 struct bpf_insn_access_aux *info)
4051{
4052 switch (off) {
4053 case bpf_ctx_range(struct __sk_buff, tc_classid):
4054 case bpf_ctx_range(struct __sk_buff, data):
4055 case bpf_ctx_range(struct __sk_buff, data_meta):
4056 case bpf_ctx_range(struct __sk_buff, data_end):
4057 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
4058 return false;
4059 }
4060
4061 if (type == BPF_WRITE) {
4062 switch (off) {
4063 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
4064 break;
4065 default:
4066 return false;
4067 }
4068 }
4069
4070 return bpf_skb_is_valid_access(off, size, type, prog, info);
4071}
4072
4073static bool lwt_is_valid_access(int off, int size,
4074 enum bpf_access_type type,
4075 const struct bpf_prog *prog,
4076 struct bpf_insn_access_aux *info)
4077{
4078 switch (off) {
4079 case bpf_ctx_range(struct __sk_buff, tc_classid):
4080 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
4081 case bpf_ctx_range(struct __sk_buff, data_meta):
4082 return false;
4083 }
4084
4085 if (type == BPF_WRITE) {
4086 switch (off) {
4087 case bpf_ctx_range(struct __sk_buff, mark):
4088 case bpf_ctx_range(struct __sk_buff, priority):
4089 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
4090 break;
4091 default:
4092 return false;
4093 }
4094 }
4095
4096 switch (off) {
4097 case bpf_ctx_range(struct __sk_buff, data):
4098 info->reg_type = PTR_TO_PACKET;
4099 break;
4100 case bpf_ctx_range(struct __sk_buff, data_end):
4101 info->reg_type = PTR_TO_PACKET_END;
4102 break;
4103 }
4104
4105 return bpf_skb_is_valid_access(off, size, type, prog, info);
4106}
4107
4108
4109
4110static bool __sock_filter_check_attach_type(int off,
4111 enum bpf_access_type access_type,
4112 enum bpf_attach_type attach_type)
4113{
4114 switch (off) {
4115 case offsetof(struct bpf_sock, bound_dev_if):
4116 case offsetof(struct bpf_sock, mark):
4117 case offsetof(struct bpf_sock, priority):
4118 switch (attach_type) {
4119 case BPF_CGROUP_INET_SOCK_CREATE:
4120 goto full_access;
4121 default:
4122 return false;
4123 }
4124 case bpf_ctx_range(struct bpf_sock, src_ip4):
4125 switch (attach_type) {
4126 case BPF_CGROUP_INET4_POST_BIND:
4127 goto read_only;
4128 default:
4129 return false;
4130 }
4131 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
4132 switch (attach_type) {
4133 case BPF_CGROUP_INET6_POST_BIND:
4134 goto read_only;
4135 default:
4136 return false;
4137 }
4138 case bpf_ctx_range(struct bpf_sock, src_port):
4139 switch (attach_type) {
4140 case BPF_CGROUP_INET4_POST_BIND:
4141 case BPF_CGROUP_INET6_POST_BIND:
4142 goto read_only;
4143 default:
4144 return false;
4145 }
4146 }
4147read_only:
4148 return access_type == BPF_READ;
4149full_access:
4150 return true;
4151}
4152
4153static bool __sock_filter_check_size(int off, int size,
4154 struct bpf_insn_access_aux *info)
4155{
4156 const int size_default = sizeof(__u32);
4157
4158 switch (off) {
4159 case bpf_ctx_range(struct bpf_sock, src_ip4):
4160 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
4161 bpf_ctx_record_field_size(info, size_default);
4162 return bpf_ctx_narrow_access_ok(off, size, size_default);
4163 }
4164
4165 return size == size_default;
4166}
4167
4168static bool sock_filter_is_valid_access(int off, int size,
4169 enum bpf_access_type type,
4170 const struct bpf_prog *prog,
4171 struct bpf_insn_access_aux *info)
4172{
4173 if (off < 0 || off >= sizeof(struct bpf_sock))
4174 return false;
4175 if (off % size != 0)
4176 return false;
4177 if (!__sock_filter_check_attach_type(off, type,
4178 prog->expected_attach_type))
4179 return false;
4180 if (!__sock_filter_check_size(off, size, info))
4181 return false;
4182 return true;
4183}
4184
4185static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
4186 const struct bpf_prog *prog, int drop_verdict)
4187{
4188 struct bpf_insn *insn = insn_buf;
4189
4190 if (!direct_write)
4191 return 0;
4192
4193
4194
4195
4196
4197
4198
4199 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
4200 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
4201 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
4202
4203
4204 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
4205 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
4206 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4207 BPF_FUNC_skb_pull_data);
4208
4209
4210
4211
4212 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
4213 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
4214 *insn++ = BPF_EXIT_INSN();
4215
4216
4217 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
4218
4219 *insn++ = prog->insnsi[0];
4220
4221 return insn - insn_buf;
4222}
4223
4224static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
4225 const struct bpf_prog *prog)
4226{
4227 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
4228}
4229
4230static bool tc_cls_act_is_valid_access(int off, int size,
4231 enum bpf_access_type type,
4232 const struct bpf_prog *prog,
4233 struct bpf_insn_access_aux *info)
4234{
4235 if (type == BPF_WRITE) {
4236 switch (off) {
4237 case bpf_ctx_range(struct __sk_buff, mark):
4238 case bpf_ctx_range(struct __sk_buff, tc_index):
4239 case bpf_ctx_range(struct __sk_buff, priority):
4240 case bpf_ctx_range(struct __sk_buff, tc_classid):
4241 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
4242 break;
4243 default:
4244 return false;
4245 }
4246 }
4247
4248 switch (off) {
4249 case bpf_ctx_range(struct __sk_buff, data):
4250 info->reg_type = PTR_TO_PACKET;
4251 break;
4252 case bpf_ctx_range(struct __sk_buff, data_meta):
4253 info->reg_type = PTR_TO_PACKET_META;
4254 break;
4255 case bpf_ctx_range(struct __sk_buff, data_end):
4256 info->reg_type = PTR_TO_PACKET_END;
4257 break;
4258 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
4259 return false;
4260 }
4261
4262 return bpf_skb_is_valid_access(off, size, type, prog, info);
4263}
4264
4265static bool __is_valid_xdp_access(int off, int size)
4266{
4267 if (off < 0 || off >= sizeof(struct xdp_md))
4268 return false;
4269 if (off % size != 0)
4270 return false;
4271 if (size != sizeof(__u32))
4272 return false;
4273
4274 return true;
4275}
4276
4277static bool xdp_is_valid_access(int off, int size,
4278 enum bpf_access_type type,
4279 const struct bpf_prog *prog,
4280 struct bpf_insn_access_aux *info)
4281{
4282 if (type == BPF_WRITE)
4283 return false;
4284
4285 switch (off) {
4286 case offsetof(struct xdp_md, data):
4287 info->reg_type = PTR_TO_PACKET;
4288 break;
4289 case offsetof(struct xdp_md, data_meta):
4290 info->reg_type = PTR_TO_PACKET_META;
4291 break;
4292 case offsetof(struct xdp_md, data_end):
4293 info->reg_type = PTR_TO_PACKET_END;
4294 break;
4295 }
4296
4297 return __is_valid_xdp_access(off, size);
4298}
4299
4300void bpf_warn_invalid_xdp_action(u32 act)
4301{
4302 const u32 act_max = XDP_REDIRECT;
4303
4304 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
4305 act > act_max ? "Illegal" : "Driver unsupported",
4306 act);
4307}
4308EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
4309
4310static bool sock_addr_is_valid_access(int off, int size,
4311 enum bpf_access_type type,
4312 const struct bpf_prog *prog,
4313 struct bpf_insn_access_aux *info)
4314{
4315 const int size_default = sizeof(__u32);
4316
4317 if (off < 0 || off >= sizeof(struct bpf_sock_addr))
4318 return false;
4319 if (off % size != 0)
4320 return false;
4321
4322
4323
4324
4325 switch (off) {
4326 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
4327 switch (prog->expected_attach_type) {
4328 case BPF_CGROUP_INET4_BIND:
4329 case BPF_CGROUP_INET4_CONNECT:
4330 break;
4331 default:
4332 return false;
4333 }
4334 break;
4335 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
4336 switch (prog->expected_attach_type) {
4337 case BPF_CGROUP_INET6_BIND:
4338 case BPF_CGROUP_INET6_CONNECT:
4339 break;
4340 default:
4341 return false;
4342 }
4343 break;
4344 }
4345
4346 switch (off) {
4347 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
4348 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
4349
4350 if (type == BPF_READ) {
4351 bpf_ctx_record_field_size(info, size_default);
4352 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
4353 return false;
4354 } else {
4355 if (size != size_default)
4356 return false;
4357 }
4358 break;
4359 case bpf_ctx_range(struct bpf_sock_addr, user_port):
4360 if (size != size_default)
4361 return false;
4362 break;
4363 default:
4364 if (type == BPF_READ) {
4365 if (size != size_default)
4366 return false;
4367 } else {
4368 return false;
4369 }
4370 }
4371
4372 return true;
4373}
4374
4375static bool sock_ops_is_valid_access(int off, int size,
4376 enum bpf_access_type type,
4377 const struct bpf_prog *prog,
4378 struct bpf_insn_access_aux *info)
4379{
4380 const int size_default = sizeof(__u32);
4381
4382 if (off < 0 || off >= sizeof(struct bpf_sock_ops))
4383 return false;
4384
4385
4386 if (off % size != 0)
4387 return false;
4388
4389 if (type == BPF_WRITE) {
4390 switch (off) {
4391 case offsetof(struct bpf_sock_ops, reply):
4392 case offsetof(struct bpf_sock_ops, sk_txhash):
4393 if (size != size_default)
4394 return false;
4395 break;
4396 default:
4397 return false;
4398 }
4399 } else {
4400 switch (off) {
4401 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
4402 bytes_acked):
4403 if (size != sizeof(__u64))
4404 return false;
4405 break;
4406 default:
4407 if (size != size_default)
4408 return false;
4409 break;
4410 }
4411 }
4412
4413 return true;
4414}
4415
4416static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
4417 const struct bpf_prog *prog)
4418{
4419 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
4420}
4421
4422static bool sk_skb_is_valid_access(int off, int size,
4423 enum bpf_access_type type,
4424 const struct bpf_prog *prog,
4425 struct bpf_insn_access_aux *info)
4426{
4427 switch (off) {
4428 case bpf_ctx_range(struct __sk_buff, tc_classid):
4429 case bpf_ctx_range(struct __sk_buff, data_meta):
4430 return false;
4431 }
4432
4433 if (type == BPF_WRITE) {
4434 switch (off) {
4435 case bpf_ctx_range(struct __sk_buff, tc_index):
4436 case bpf_ctx_range(struct __sk_buff, priority):
4437 break;
4438 default:
4439 return false;
4440 }
4441 }
4442
4443 switch (off) {
4444 case bpf_ctx_range(struct __sk_buff, mark):
4445 return false;
4446 case bpf_ctx_range(struct __sk_buff, data):
4447 info->reg_type = PTR_TO_PACKET;
4448 break;
4449 case bpf_ctx_range(struct __sk_buff, data_end):
4450 info->reg_type = PTR_TO_PACKET_END;
4451 break;
4452 }
4453
4454 return bpf_skb_is_valid_access(off, size, type, prog, info);
4455}
4456
4457static bool sk_msg_is_valid_access(int off, int size,
4458 enum bpf_access_type type,
4459 const struct bpf_prog *prog,
4460 struct bpf_insn_access_aux *info)
4461{
4462 if (type == BPF_WRITE)
4463 return false;
4464
4465 switch (off) {
4466 case offsetof(struct sk_msg_md, data):
4467 info->reg_type = PTR_TO_PACKET;
4468 break;
4469 case offsetof(struct sk_msg_md, data_end):
4470 info->reg_type = PTR_TO_PACKET_END;
4471 break;
4472 }
4473
4474 if (off < 0 || off >= sizeof(struct sk_msg_md))
4475 return false;
4476 if (off % size != 0)
4477 return false;
4478 if (size != sizeof(__u64))
4479 return false;
4480
4481 return true;
4482}
4483
4484static u32 bpf_convert_ctx_access(enum bpf_access_type type,
4485 const struct bpf_insn *si,
4486 struct bpf_insn *insn_buf,
4487 struct bpf_prog *prog, u32 *target_size)
4488{
4489 struct bpf_insn *insn = insn_buf;
4490 int off;
4491
4492 switch (si->off) {
4493 case offsetof(struct __sk_buff, len):
4494 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4495 bpf_target_off(struct sk_buff, len, 4,
4496 target_size));
4497 break;
4498
4499 case offsetof(struct __sk_buff, protocol):
4500 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
4501 bpf_target_off(struct sk_buff, protocol, 2,
4502 target_size));
4503 break;
4504
4505 case offsetof(struct __sk_buff, vlan_proto):
4506 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
4507 bpf_target_off(struct sk_buff, vlan_proto, 2,
4508 target_size));
4509 break;
4510
4511 case offsetof(struct __sk_buff, priority):
4512 if (type == BPF_WRITE)
4513 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
4514 bpf_target_off(struct sk_buff, priority, 4,
4515 target_size));
4516 else
4517 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4518 bpf_target_off(struct sk_buff, priority, 4,
4519 target_size));
4520 break;
4521
4522 case offsetof(struct __sk_buff, ingress_ifindex):
4523 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4524 bpf_target_off(struct sk_buff, skb_iif, 4,
4525 target_size));
4526 break;
4527
4528 case offsetof(struct __sk_buff, ifindex):
4529 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
4530 si->dst_reg, si->src_reg,
4531 offsetof(struct sk_buff, dev));
4532 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
4533 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4534 bpf_target_off(struct net_device, ifindex, 4,
4535 target_size));
4536 break;
4537
4538 case offsetof(struct __sk_buff, hash):
4539 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4540 bpf_target_off(struct sk_buff, hash, 4,
4541 target_size));
4542 break;
4543
4544 case offsetof(struct __sk_buff, mark):
4545 if (type == BPF_WRITE)
4546 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
4547 bpf_target_off(struct sk_buff, mark, 4,
4548 target_size));
4549 else
4550 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4551 bpf_target_off(struct sk_buff, mark, 4,
4552 target_size));
4553 break;
4554
4555 case offsetof(struct __sk_buff, pkt_type):
4556 *target_size = 1;
4557 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
4558 PKT_TYPE_OFFSET());
4559 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
4560#ifdef __BIG_ENDIAN_BITFIELD
4561 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
4562#endif
4563 break;
4564
4565 case offsetof(struct __sk_buff, queue_mapping):
4566 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
4567 bpf_target_off(struct sk_buff, queue_mapping, 2,
4568 target_size));
4569 break;
4570
4571 case offsetof(struct __sk_buff, vlan_present):
4572 case offsetof(struct __sk_buff, vlan_tci):
4573 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
4574
4575 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
4576 bpf_target_off(struct sk_buff, vlan_tci, 2,
4577 target_size));
4578 if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
4579 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
4580 ~VLAN_TAG_PRESENT);
4581 } else {
4582 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
4583 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
4584 }
4585 break;
4586
4587 case offsetof(struct __sk_buff, cb[0]) ...
4588 offsetofend(struct __sk_buff, cb[4]) - 1:
4589 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
4590 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
4591 offsetof(struct qdisc_skb_cb, data)) %
4592 sizeof(__u64));
4593
4594 prog->cb_access = 1;
4595 off = si->off;
4596 off -= offsetof(struct __sk_buff, cb[0]);
4597 off += offsetof(struct sk_buff, cb);
4598 off += offsetof(struct qdisc_skb_cb, data);
4599 if (type == BPF_WRITE)
4600 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
4601 si->src_reg, off);
4602 else
4603 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
4604 si->src_reg, off);
4605 break;
4606
4607 case offsetof(struct __sk_buff, tc_classid):
4608 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
4609
4610 off = si->off;
4611 off -= offsetof(struct __sk_buff, tc_classid);
4612 off += offsetof(struct sk_buff, cb);
4613 off += offsetof(struct qdisc_skb_cb, tc_classid);
4614 *target_size = 2;
4615 if (type == BPF_WRITE)
4616 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
4617 si->src_reg, off);
4618 else
4619 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
4620 si->src_reg, off);
4621 break;
4622
4623 case offsetof(struct __sk_buff, data):
4624 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
4625 si->dst_reg, si->src_reg,
4626 offsetof(struct sk_buff, data));
4627 break;
4628
4629 case offsetof(struct __sk_buff, data_meta):
4630 off = si->off;
4631 off -= offsetof(struct __sk_buff, data_meta);
4632 off += offsetof(struct sk_buff, cb);
4633 off += offsetof(struct bpf_skb_data_end, data_meta);
4634 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
4635 si->src_reg, off);
4636 break;
4637
4638 case offsetof(struct __sk_buff, data_end):
4639 off = si->off;
4640 off -= offsetof(struct __sk_buff, data_end);
4641 off += offsetof(struct sk_buff, cb);
4642 off += offsetof(struct bpf_skb_data_end, data_end);
4643 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
4644 si->src_reg, off);
4645 break;
4646
4647 case offsetof(struct __sk_buff, tc_index):
4648#ifdef CONFIG_NET_SCHED
4649 if (type == BPF_WRITE)
4650 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
4651 bpf_target_off(struct sk_buff, tc_index, 2,
4652 target_size));
4653 else
4654 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
4655 bpf_target_off(struct sk_buff, tc_index, 2,
4656 target_size));
4657#else
4658 *target_size = 2;
4659 if (type == BPF_WRITE)
4660 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
4661 else
4662 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
4663#endif
4664 break;
4665
4666 case offsetof(struct __sk_buff, napi_id):
4667#if defined(CONFIG_NET_RX_BUSY_POLL)
4668 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4669 bpf_target_off(struct sk_buff, napi_id, 4,
4670 target_size));
4671 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
4672 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
4673#else
4674 *target_size = 4;
4675 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
4676#endif
4677 break;
4678 case offsetof(struct __sk_buff, family):
4679 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
4680
4681 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4682 si->dst_reg, si->src_reg,
4683 offsetof(struct sk_buff, sk));
4684 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
4685 bpf_target_off(struct sock_common,
4686 skc_family,
4687 2, target_size));
4688 break;
4689 case offsetof(struct __sk_buff, remote_ip4):
4690 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
4691
4692 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4693 si->dst_reg, si->src_reg,
4694 offsetof(struct sk_buff, sk));
4695 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4696 bpf_target_off(struct sock_common,
4697 skc_daddr,
4698 4, target_size));
4699 break;
4700 case offsetof(struct __sk_buff, local_ip4):
4701 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
4702 skc_rcv_saddr) != 4);
4703
4704 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4705 si->dst_reg, si->src_reg,
4706 offsetof(struct sk_buff, sk));
4707 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4708 bpf_target_off(struct sock_common,
4709 skc_rcv_saddr,
4710 4, target_size));
4711 break;
4712 case offsetof(struct __sk_buff, remote_ip6[0]) ...
4713 offsetof(struct __sk_buff, remote_ip6[3]):
4714#if IS_ENABLED(CONFIG_IPV6)
4715 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
4716 skc_v6_daddr.s6_addr32[0]) != 4);
4717
4718 off = si->off;
4719 off -= offsetof(struct __sk_buff, remote_ip6[0]);
4720
4721 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4722 si->dst_reg, si->src_reg,
4723 offsetof(struct sk_buff, sk));
4724 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4725 offsetof(struct sock_common,
4726 skc_v6_daddr.s6_addr32[0]) +
4727 off);
4728#else
4729 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
4730#endif
4731 break;
4732 case offsetof(struct __sk_buff, local_ip6[0]) ...
4733 offsetof(struct __sk_buff, local_ip6[3]):
4734#if IS_ENABLED(CONFIG_IPV6)
4735 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
4736 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
4737
4738 off = si->off;
4739 off -= offsetof(struct __sk_buff, local_ip6[0]);
4740
4741 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4742 si->dst_reg, si->src_reg,
4743 offsetof(struct sk_buff, sk));
4744 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4745 offsetof(struct sock_common,
4746 skc_v6_rcv_saddr.s6_addr32[0]) +
4747 off);
4748#else
4749 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
4750#endif
4751 break;
4752
4753 case offsetof(struct __sk_buff, remote_port):
4754 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
4755
4756 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4757 si->dst_reg, si->src_reg,
4758 offsetof(struct sk_buff, sk));
4759 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
4760 bpf_target_off(struct sock_common,
4761 skc_dport,
4762 2, target_size));
4763#ifndef __BIG_ENDIAN_BITFIELD
4764 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
4765#endif
4766 break;
4767
4768 case offsetof(struct __sk_buff, local_port):
4769 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
4770
4771 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4772 si->dst_reg, si->src_reg,
4773 offsetof(struct sk_buff, sk));
4774 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
4775 bpf_target_off(struct sock_common,
4776 skc_num, 2, target_size));
4777 break;
4778 }
4779
4780 return insn - insn_buf;
4781}
4782
4783static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
4784 const struct bpf_insn *si,
4785 struct bpf_insn *insn_buf,
4786 struct bpf_prog *prog, u32 *target_size)
4787{
4788 struct bpf_insn *insn = insn_buf;
4789 int off;
4790
4791 switch (si->off) {
4792 case offsetof(struct bpf_sock, bound_dev_if):
4793 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
4794
4795 if (type == BPF_WRITE)
4796 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
4797 offsetof(struct sock, sk_bound_dev_if));
4798 else
4799 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4800 offsetof(struct sock, sk_bound_dev_if));
4801 break;
4802
4803 case offsetof(struct bpf_sock, mark):
4804 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
4805
4806 if (type == BPF_WRITE)
4807 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
4808 offsetof(struct sock, sk_mark));
4809 else
4810 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4811 offsetof(struct sock, sk_mark));
4812 break;
4813
4814 case offsetof(struct bpf_sock, priority):
4815 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
4816
4817 if (type == BPF_WRITE)
4818 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
4819 offsetof(struct sock, sk_priority));
4820 else
4821 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4822 offsetof(struct sock, sk_priority));
4823 break;
4824
4825 case offsetof(struct bpf_sock, family):
4826 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
4827
4828 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
4829 offsetof(struct sock, sk_family));
4830 break;
4831
4832 case offsetof(struct bpf_sock, type):
4833 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4834 offsetof(struct sock, __sk_flags_offset));
4835 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
4836 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
4837 break;
4838
4839 case offsetof(struct bpf_sock, protocol):
4840 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4841 offsetof(struct sock, __sk_flags_offset));
4842 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
4843 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
4844 break;
4845
4846 case offsetof(struct bpf_sock, src_ip4):
4847 *insn++ = BPF_LDX_MEM(
4848 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
4849 bpf_target_off(struct sock_common, skc_rcv_saddr,
4850 FIELD_SIZEOF(struct sock_common,
4851 skc_rcv_saddr),
4852 target_size));
4853 break;
4854
4855 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
4856#if IS_ENABLED(CONFIG_IPV6)
4857 off = si->off;
4858 off -= offsetof(struct bpf_sock, src_ip6[0]);
4859 *insn++ = BPF_LDX_MEM(
4860 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
4861 bpf_target_off(
4862 struct sock_common,
4863 skc_v6_rcv_saddr.s6_addr32[0],
4864 FIELD_SIZEOF(struct sock_common,
4865 skc_v6_rcv_saddr.s6_addr32[0]),
4866 target_size) + off);
4867#else
4868 (void)off;
4869 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
4870#endif
4871 break;
4872
4873 case offsetof(struct bpf_sock, src_port):
4874 *insn++ = BPF_LDX_MEM(
4875 BPF_FIELD_SIZEOF(struct sock_common, skc_num),
4876 si->dst_reg, si->src_reg,
4877 bpf_target_off(struct sock_common, skc_num,
4878 FIELD_SIZEOF(struct sock_common,
4879 skc_num),
4880 target_size));
4881 break;
4882 }
4883
4884 return insn - insn_buf;
4885}
4886
4887static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
4888 const struct bpf_insn *si,
4889 struct bpf_insn *insn_buf,
4890 struct bpf_prog *prog, u32 *target_size)
4891{
4892 struct bpf_insn *insn = insn_buf;
4893
4894 switch (si->off) {
4895 case offsetof(struct __sk_buff, ifindex):
4896 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
4897 si->dst_reg, si->src_reg,
4898 offsetof(struct sk_buff, dev));
4899 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4900 bpf_target_off(struct net_device, ifindex, 4,
4901 target_size));
4902 break;
4903 default:
4904 return bpf_convert_ctx_access(type, si, insn_buf, prog,
4905 target_size);
4906 }
4907
4908 return insn - insn_buf;
4909}
4910
4911static u32 xdp_convert_ctx_access(enum bpf_access_type type,
4912 const struct bpf_insn *si,
4913 struct bpf_insn *insn_buf,
4914 struct bpf_prog *prog, u32 *target_size)
4915{
4916 struct bpf_insn *insn = insn_buf;
4917
4918 switch (si->off) {
4919 case offsetof(struct xdp_md, data):
4920 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
4921 si->dst_reg, si->src_reg,
4922 offsetof(struct xdp_buff, data));
4923 break;
4924 case offsetof(struct xdp_md, data_meta):
4925 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
4926 si->dst_reg, si->src_reg,
4927 offsetof(struct xdp_buff, data_meta));
4928 break;
4929 case offsetof(struct xdp_md, data_end):
4930 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
4931 si->dst_reg, si->src_reg,
4932 offsetof(struct xdp_buff, data_end));
4933 break;
4934 case offsetof(struct xdp_md, ingress_ifindex):
4935 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
4936 si->dst_reg, si->src_reg,
4937 offsetof(struct xdp_buff, rxq));
4938 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
4939 si->dst_reg, si->dst_reg,
4940 offsetof(struct xdp_rxq_info, dev));
4941 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4942 offsetof(struct net_device, ifindex));
4943 break;
4944 case offsetof(struct xdp_md, rx_queue_index):
4945 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
4946 si->dst_reg, si->src_reg,
4947 offsetof(struct xdp_buff, rxq));
4948 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4949 offsetof(struct xdp_rxq_info,
4950 queue_index));
4951 break;
4952 }
4953
4954 return insn - insn_buf;
4955}
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
4968 do { \
4969 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
4970 si->src_reg, offsetof(S, F)); \
4971 *insn++ = BPF_LDX_MEM( \
4972 SIZE, si->dst_reg, si->dst_reg, \
4973 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
4974 target_size) \
4975 + OFF); \
4976 } while (0)
4977
4978#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
4979 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
4980 BPF_FIELD_SIZEOF(NS, NF), 0)
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \
4996 do { \
4997 int tmp_reg = BPF_REG_9; \
4998 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
4999 --tmp_reg; \
5000 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
5001 --tmp_reg; \
5002 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
5003 offsetof(S, TF)); \
5004 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
5005 si->dst_reg, offsetof(S, F)); \
5006 *insn++ = BPF_STX_MEM( \
5007 BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \
5008 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
5009 target_size) \
5010 + OFF); \
5011 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
5012 offsetof(S, TF)); \
5013 } while (0)
5014
5015#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
5016 TF) \
5017 do { \
5018 if (type == BPF_WRITE) { \
5019 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \
5020 TF); \
5021 } else { \
5022 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
5023 S, NS, F, NF, SIZE, OFF); \
5024 } \
5025 } while (0)
5026
5027#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
5028 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
5029 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
5030
5031static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
5032 const struct bpf_insn *si,
5033 struct bpf_insn *insn_buf,
5034 struct bpf_prog *prog, u32 *target_size)
5035{
5036 struct bpf_insn *insn = insn_buf;
5037 int off;
5038
5039 switch (si->off) {
5040 case offsetof(struct bpf_sock_addr, user_family):
5041 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
5042 struct sockaddr, uaddr, sa_family);
5043 break;
5044
5045 case offsetof(struct bpf_sock_addr, user_ip4):
5046 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
5047 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
5048 sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
5049 break;
5050
5051 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
5052 off = si->off;
5053 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
5054 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
5055 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
5056 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
5057 tmp_reg);
5058 break;
5059
5060 case offsetof(struct bpf_sock_addr, user_port):
5061
5062
5063
5064
5065
5066
5067
5068 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
5069 offsetof(struct sockaddr_in6, sin6_port));
5070 BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
5071 FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
5072 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
5073 struct sockaddr_in6, uaddr,
5074 sin6_port, tmp_reg);
5075 break;
5076
5077 case offsetof(struct bpf_sock_addr, family):
5078 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
5079 struct sock, sk, sk_family);
5080 break;
5081
5082 case offsetof(struct bpf_sock_addr, type):
5083 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
5084 struct bpf_sock_addr_kern, struct sock, sk,
5085 __sk_flags_offset, BPF_W, 0);
5086 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
5087 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
5088 break;
5089
5090 case offsetof(struct bpf_sock_addr, protocol):
5091 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
5092 struct bpf_sock_addr_kern, struct sock, sk,
5093 __sk_flags_offset, BPF_W, 0);
5094 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
5095 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
5096 SK_FL_PROTO_SHIFT);
5097 break;
5098 }
5099
5100 return insn - insn_buf;
5101}
5102
5103static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
5104 const struct bpf_insn *si,
5105 struct bpf_insn *insn_buf,
5106 struct bpf_prog *prog,
5107 u32 *target_size)
5108{
5109 struct bpf_insn *insn = insn_buf;
5110 int off;
5111
5112 switch (si->off) {
5113 case offsetof(struct bpf_sock_ops, op) ...
5114 offsetof(struct bpf_sock_ops, replylong[3]):
5115 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
5116 FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
5117 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
5118 FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
5119 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
5120 FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
5121 off = si->off;
5122 off -= offsetof(struct bpf_sock_ops, op);
5123 off += offsetof(struct bpf_sock_ops_kern, op);
5124 if (type == BPF_WRITE)
5125 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5126 off);
5127 else
5128 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5129 off);
5130 break;
5131
5132 case offsetof(struct bpf_sock_ops, family):
5133 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
5134
5135 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5136 struct bpf_sock_ops_kern, sk),
5137 si->dst_reg, si->src_reg,
5138 offsetof(struct bpf_sock_ops_kern, sk));
5139 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5140 offsetof(struct sock_common, skc_family));
5141 break;
5142
5143 case offsetof(struct bpf_sock_ops, remote_ip4):
5144 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
5145
5146 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5147 struct bpf_sock_ops_kern, sk),
5148 si->dst_reg, si->src_reg,
5149 offsetof(struct bpf_sock_ops_kern, sk));
5150 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5151 offsetof(struct sock_common, skc_daddr));
5152 break;
5153
5154 case offsetof(struct bpf_sock_ops, local_ip4):
5155 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4);
5156
5157 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5158 struct bpf_sock_ops_kern, sk),
5159 si->dst_reg, si->src_reg,
5160 offsetof(struct bpf_sock_ops_kern, sk));
5161 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5162 offsetof(struct sock_common,
5163 skc_rcv_saddr));
5164 break;
5165
5166 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
5167 offsetof(struct bpf_sock_ops, remote_ip6[3]):
5168#if IS_ENABLED(CONFIG_IPV6)
5169 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5170 skc_v6_daddr.s6_addr32[0]) != 4);
5171
5172 off = si->off;
5173 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
5174 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5175 struct bpf_sock_ops_kern, sk),
5176 si->dst_reg, si->src_reg,
5177 offsetof(struct bpf_sock_ops_kern, sk));
5178 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5179 offsetof(struct sock_common,
5180 skc_v6_daddr.s6_addr32[0]) +
5181 off);
5182#else
5183 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5184#endif
5185 break;
5186
5187 case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
5188 offsetof(struct bpf_sock_ops, local_ip6[3]):
5189#if IS_ENABLED(CONFIG_IPV6)
5190 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5191 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
5192
5193 off = si->off;
5194 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
5195 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5196 struct bpf_sock_ops_kern, sk),
5197 si->dst_reg, si->src_reg,
5198 offsetof(struct bpf_sock_ops_kern, sk));
5199 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5200 offsetof(struct sock_common,
5201 skc_v6_rcv_saddr.s6_addr32[0]) +
5202 off);
5203#else
5204 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5205#endif
5206 break;
5207
5208 case offsetof(struct bpf_sock_ops, remote_port):
5209 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
5210
5211 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5212 struct bpf_sock_ops_kern, sk),
5213 si->dst_reg, si->src_reg,
5214 offsetof(struct bpf_sock_ops_kern, sk));
5215 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5216 offsetof(struct sock_common, skc_dport));
5217#ifndef __BIG_ENDIAN_BITFIELD
5218 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
5219#endif
5220 break;
5221
5222 case offsetof(struct bpf_sock_ops, local_port):
5223 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
5224
5225 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5226 struct bpf_sock_ops_kern, sk),
5227 si->dst_reg, si->src_reg,
5228 offsetof(struct bpf_sock_ops_kern, sk));
5229 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5230 offsetof(struct sock_common, skc_num));
5231 break;
5232
5233 case offsetof(struct bpf_sock_ops, is_fullsock):
5234 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5235 struct bpf_sock_ops_kern,
5236 is_fullsock),
5237 si->dst_reg, si->src_reg,
5238 offsetof(struct bpf_sock_ops_kern,
5239 is_fullsock));
5240 break;
5241
5242 case offsetof(struct bpf_sock_ops, state):
5243 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
5244
5245 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5246 struct bpf_sock_ops_kern, sk),
5247 si->dst_reg, si->src_reg,
5248 offsetof(struct bpf_sock_ops_kern, sk));
5249 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
5250 offsetof(struct sock_common, skc_state));
5251 break;
5252
5253 case offsetof(struct bpf_sock_ops, rtt_min):
5254 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
5255 sizeof(struct minmax));
5256 BUILD_BUG_ON(sizeof(struct minmax) <
5257 sizeof(struct minmax_sample));
5258
5259 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5260 struct bpf_sock_ops_kern, sk),
5261 si->dst_reg, si->src_reg,
5262 offsetof(struct bpf_sock_ops_kern, sk));
5263 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5264 offsetof(struct tcp_sock, rtt_min) +
5265 FIELD_SIZEOF(struct minmax_sample, t));
5266 break;
5267
5268
5269#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
5270 do { \
5271 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
5272 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
5273 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5274 struct bpf_sock_ops_kern, \
5275 is_fullsock), \
5276 si->dst_reg, si->src_reg, \
5277 offsetof(struct bpf_sock_ops_kern, \
5278 is_fullsock)); \
5279 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
5280 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5281 struct bpf_sock_ops_kern, sk),\
5282 si->dst_reg, si->src_reg, \
5283 offsetof(struct bpf_sock_ops_kern, sk));\
5284 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
5285 OBJ_FIELD), \
5286 si->dst_reg, si->dst_reg, \
5287 offsetof(OBJ, OBJ_FIELD)); \
5288 } while (0)
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
5300 do { \
5301 int reg = BPF_REG_9; \
5302 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
5303 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
5304 if (si->dst_reg == reg || si->src_reg == reg) \
5305 reg--; \
5306 if (si->dst_reg == reg || si->src_reg == reg) \
5307 reg--; \
5308 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
5309 offsetof(struct bpf_sock_ops_kern, \
5310 temp)); \
5311 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5312 struct bpf_sock_ops_kern, \
5313 is_fullsock), \
5314 reg, si->dst_reg, \
5315 offsetof(struct bpf_sock_ops_kern, \
5316 is_fullsock)); \
5317 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
5318 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5319 struct bpf_sock_ops_kern, sk),\
5320 reg, si->dst_reg, \
5321 offsetof(struct bpf_sock_ops_kern, sk));\
5322 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
5323 reg, si->src_reg, \
5324 offsetof(OBJ, OBJ_FIELD)); \
5325 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
5326 offsetof(struct bpf_sock_ops_kern, \
5327 temp)); \
5328 } while (0)
5329
5330#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
5331 do { \
5332 if (TYPE == BPF_WRITE) \
5333 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
5334 else \
5335 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
5336 } while (0)
5337
5338 case offsetof(struct bpf_sock_ops, snd_cwnd):
5339 SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock);
5340 break;
5341
5342 case offsetof(struct bpf_sock_ops, srtt_us):
5343 SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock);
5344 break;
5345
5346 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
5347 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
5348 struct tcp_sock);
5349 break;
5350
5351 case offsetof(struct bpf_sock_ops, snd_ssthresh):
5352 SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock);
5353 break;
5354
5355 case offsetof(struct bpf_sock_ops, rcv_nxt):
5356 SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock);
5357 break;
5358
5359 case offsetof(struct bpf_sock_ops, snd_nxt):
5360 SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock);
5361 break;
5362
5363 case offsetof(struct bpf_sock_ops, snd_una):
5364 SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock);
5365 break;
5366
5367 case offsetof(struct bpf_sock_ops, mss_cache):
5368 SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock);
5369 break;
5370
5371 case offsetof(struct bpf_sock_ops, ecn_flags):
5372 SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock);
5373 break;
5374
5375 case offsetof(struct bpf_sock_ops, rate_delivered):
5376 SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered,
5377 struct tcp_sock);
5378 break;
5379
5380 case offsetof(struct bpf_sock_ops, rate_interval_us):
5381 SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us,
5382 struct tcp_sock);
5383 break;
5384
5385 case offsetof(struct bpf_sock_ops, packets_out):
5386 SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock);
5387 break;
5388
5389 case offsetof(struct bpf_sock_ops, retrans_out):
5390 SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock);
5391 break;
5392
5393 case offsetof(struct bpf_sock_ops, total_retrans):
5394 SOCK_OPS_GET_FIELD(total_retrans, total_retrans,
5395 struct tcp_sock);
5396 break;
5397
5398 case offsetof(struct bpf_sock_ops, segs_in):
5399 SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock);
5400 break;
5401
5402 case offsetof(struct bpf_sock_ops, data_segs_in):
5403 SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock);
5404 break;
5405
5406 case offsetof(struct bpf_sock_ops, segs_out):
5407 SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock);
5408 break;
5409
5410 case offsetof(struct bpf_sock_ops, data_segs_out):
5411 SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out,
5412 struct tcp_sock);
5413 break;
5414
5415 case offsetof(struct bpf_sock_ops, lost_out):
5416 SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock);
5417 break;
5418
5419 case offsetof(struct bpf_sock_ops, sacked_out):
5420 SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock);
5421 break;
5422
5423 case offsetof(struct bpf_sock_ops, sk_txhash):
5424 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
5425 struct sock, type);
5426 break;
5427
5428 case offsetof(struct bpf_sock_ops, bytes_received):
5429 SOCK_OPS_GET_FIELD(bytes_received, bytes_received,
5430 struct tcp_sock);
5431 break;
5432
5433 case offsetof(struct bpf_sock_ops, bytes_acked):
5434 SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock);
5435 break;
5436
5437 }
5438 return insn - insn_buf;
5439}
5440
5441static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
5442 const struct bpf_insn *si,
5443 struct bpf_insn *insn_buf,
5444 struct bpf_prog *prog, u32 *target_size)
5445{
5446 struct bpf_insn *insn = insn_buf;
5447 int off;
5448
5449 switch (si->off) {
5450 case offsetof(struct __sk_buff, data_end):
5451 off = si->off;
5452 off -= offsetof(struct __sk_buff, data_end);
5453 off += offsetof(struct sk_buff, cb);
5454 off += offsetof(struct tcp_skb_cb, bpf.data_end);
5455 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
5456 si->src_reg, off);
5457 break;
5458 default:
5459 return bpf_convert_ctx_access(type, si, insn_buf, prog,
5460 target_size);
5461 }
5462
5463 return insn - insn_buf;
5464}
5465
5466static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
5467 const struct bpf_insn *si,
5468 struct bpf_insn *insn_buf,
5469 struct bpf_prog *prog, u32 *target_size)
5470{
5471 struct bpf_insn *insn = insn_buf;
5472
5473 switch (si->off) {
5474 case offsetof(struct sk_msg_md, data):
5475 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data),
5476 si->dst_reg, si->src_reg,
5477 offsetof(struct sk_msg_buff, data));
5478 break;
5479 case offsetof(struct sk_msg_md, data_end):
5480 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data_end),
5481 si->dst_reg, si->src_reg,
5482 offsetof(struct sk_msg_buff, data_end));
5483 break;
5484 }
5485
5486 return insn - insn_buf;
5487}
5488
5489const struct bpf_verifier_ops sk_filter_verifier_ops = {
5490 .get_func_proto = sk_filter_func_proto,
5491 .is_valid_access = sk_filter_is_valid_access,
5492 .convert_ctx_access = bpf_convert_ctx_access,
5493};
5494
5495const struct bpf_prog_ops sk_filter_prog_ops = {
5496 .test_run = bpf_prog_test_run_skb,
5497};
5498
5499const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
5500 .get_func_proto = tc_cls_act_func_proto,
5501 .is_valid_access = tc_cls_act_is_valid_access,
5502 .convert_ctx_access = tc_cls_act_convert_ctx_access,
5503 .gen_prologue = tc_cls_act_prologue,
5504};
5505
5506const struct bpf_prog_ops tc_cls_act_prog_ops = {
5507 .test_run = bpf_prog_test_run_skb,
5508};
5509
5510const struct bpf_verifier_ops xdp_verifier_ops = {
5511 .get_func_proto = xdp_func_proto,
5512 .is_valid_access = xdp_is_valid_access,
5513 .convert_ctx_access = xdp_convert_ctx_access,
5514};
5515
5516const struct bpf_prog_ops xdp_prog_ops = {
5517 .test_run = bpf_prog_test_run_xdp,
5518};
5519
5520const struct bpf_verifier_ops cg_skb_verifier_ops = {
5521 .get_func_proto = sk_filter_func_proto,
5522 .is_valid_access = sk_filter_is_valid_access,
5523 .convert_ctx_access = bpf_convert_ctx_access,
5524};
5525
5526const struct bpf_prog_ops cg_skb_prog_ops = {
5527 .test_run = bpf_prog_test_run_skb,
5528};
5529
5530const struct bpf_verifier_ops lwt_inout_verifier_ops = {
5531 .get_func_proto = lwt_inout_func_proto,
5532 .is_valid_access = lwt_is_valid_access,
5533 .convert_ctx_access = bpf_convert_ctx_access,
5534};
5535
5536const struct bpf_prog_ops lwt_inout_prog_ops = {
5537 .test_run = bpf_prog_test_run_skb,
5538};
5539
5540const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
5541 .get_func_proto = lwt_xmit_func_proto,
5542 .is_valid_access = lwt_is_valid_access,
5543 .convert_ctx_access = bpf_convert_ctx_access,
5544 .gen_prologue = tc_cls_act_prologue,
5545};
5546
5547const struct bpf_prog_ops lwt_xmit_prog_ops = {
5548 .test_run = bpf_prog_test_run_skb,
5549};
5550
5551const struct bpf_verifier_ops cg_sock_verifier_ops = {
5552 .get_func_proto = sock_filter_func_proto,
5553 .is_valid_access = sock_filter_is_valid_access,
5554 .convert_ctx_access = sock_filter_convert_ctx_access,
5555};
5556
5557const struct bpf_prog_ops cg_sock_prog_ops = {
5558};
5559
5560const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
5561 .get_func_proto = sock_addr_func_proto,
5562 .is_valid_access = sock_addr_is_valid_access,
5563 .convert_ctx_access = sock_addr_convert_ctx_access,
5564};
5565
5566const struct bpf_prog_ops cg_sock_addr_prog_ops = {
5567};
5568
5569const struct bpf_verifier_ops sock_ops_verifier_ops = {
5570 .get_func_proto = sock_ops_func_proto,
5571 .is_valid_access = sock_ops_is_valid_access,
5572 .convert_ctx_access = sock_ops_convert_ctx_access,
5573};
5574
5575const struct bpf_prog_ops sock_ops_prog_ops = {
5576};
5577
5578const struct bpf_verifier_ops sk_skb_verifier_ops = {
5579 .get_func_proto = sk_skb_func_proto,
5580 .is_valid_access = sk_skb_is_valid_access,
5581 .convert_ctx_access = sk_skb_convert_ctx_access,
5582 .gen_prologue = sk_skb_prologue,
5583};
5584
5585const struct bpf_prog_ops sk_skb_prog_ops = {
5586};
5587
5588const struct bpf_verifier_ops sk_msg_verifier_ops = {
5589 .get_func_proto = sk_msg_func_proto,
5590 .is_valid_access = sk_msg_is_valid_access,
5591 .convert_ctx_access = sk_msg_convert_ctx_access,
5592};
5593
5594const struct bpf_prog_ops sk_msg_prog_ops = {
5595};
5596
5597int sk_detach_filter(struct sock *sk)
5598{
5599 int ret = -ENOENT;
5600 struct sk_filter *filter;
5601
5602 if (sock_flag(sk, SOCK_FILTER_LOCKED))
5603 return -EPERM;
5604
5605 filter = rcu_dereference_protected(sk->sk_filter,
5606 lockdep_sock_is_held(sk));
5607 if (filter) {
5608 RCU_INIT_POINTER(sk->sk_filter, NULL);
5609 sk_filter_uncharge(sk, filter);
5610 ret = 0;
5611 }
5612
5613 return ret;
5614}
5615EXPORT_SYMBOL_GPL(sk_detach_filter);
5616
5617int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
5618 unsigned int len)
5619{
5620 struct sock_fprog_kern *fprog;
5621 struct sk_filter *filter;
5622 int ret = 0;
5623
5624 lock_sock(sk);
5625 filter = rcu_dereference_protected(sk->sk_filter,
5626 lockdep_sock_is_held(sk));
5627 if (!filter)
5628 goto out;
5629
5630
5631
5632
5633
5634 ret = -EACCES;
5635 fprog = filter->prog->orig_prog;
5636 if (!fprog)
5637 goto out;
5638
5639 ret = fprog->len;
5640 if (!len)
5641
5642 goto out;
5643
5644 ret = -EINVAL;
5645 if (len < fprog->len)
5646 goto out;
5647
5648 ret = -EFAULT;
5649 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
5650 goto out;
5651
5652
5653
5654
5655 ret = fprog->len;
5656out:
5657 release_sock(sk);
5658 return ret;
5659}
5660