1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/mm.h>
23#include <linux/fcntl.h>
24#include <linux/socket.h>
25#include <linux/sock_diag.h>
26#include <linux/in.h>
27#include <linux/inet.h>
28#include <linux/netdevice.h>
29#include <linux/if_packet.h>
30#include <linux/if_arp.h>
31#include <linux/gfp.h>
32#include <net/inet_common.h>
33#include <net/ip.h>
34#include <net/protocol.h>
35#include <net/netlink.h>
36#include <linux/skbuff.h>
37#include <linux/skmsg.h>
38#include <net/sock.h>
39#include <net/flow_dissector.h>
40#include <linux/errno.h>
41#include <linux/timer.h>
42#include <linux/uaccess.h>
43#include <asm/unaligned.h>
44#include <asm/cmpxchg.h>
45#include <linux/filter.h>
46#include <linux/ratelimit.h>
47#include <linux/seccomp.h>
48#include <linux/if_vlan.h>
49#include <linux/bpf.h>
50#include <net/sch_generic.h>
51#include <net/cls_cgroup.h>
52#include <net/dst_metadata.h>
53#include <net/dst.h>
54#include <net/sock_reuseport.h>
55#include <net/busy_poll.h>
56#include <net/tcp.h>
57#include <net/xfrm.h>
58#include <net/udp.h>
59#include <linux/bpf_trace.h>
60#include <net/xdp_sock.h>
61#include <linux/inetdevice.h>
62#include <net/inet_hashtables.h>
63#include <net/inet6_hashtables.h>
64#include <net/ip_fib.h>
65#include <net/nexthop.h>
66#include <net/flow.h>
67#include <net/arp.h>
68#include <net/ipv6.h>
69#include <net/net_namespace.h>
70#include <linux/seg6_local.h>
71#include <net/seg6.h>
72#include <net/seg6_local.h>
73#include <net/lwtunnel.h>
74#include <net/ipv6_stubs.h>
75#include <net/bpf_sk_storage.h>
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
91{
92 int err;
93 struct sk_filter *filter;
94
95
96
97
98
99
100 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
101 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
102 return -ENOMEM;
103 }
104 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
105 if (err)
106 return err;
107
108 err = security_sock_rcv_skb(sk, skb);
109 if (err)
110 return err;
111
112 rcu_read_lock();
113 filter = rcu_dereference(sk->sk_filter);
114 if (filter) {
115 struct sock *save_sk = skb->sk;
116 unsigned int pkt_len;
117
118 skb->sk = sk;
119 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
120 skb->sk = save_sk;
121 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
122 }
123 rcu_read_unlock();
124
125 return err;
126}
127EXPORT_SYMBOL(sk_filter_trim_cap);
128
129BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
130{
131 return skb_get_poff(skb);
132}
133
134BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
135{
136 struct nlattr *nla;
137
138 if (skb_is_nonlinear(skb))
139 return 0;
140
141 if (skb->len < sizeof(struct nlattr))
142 return 0;
143
144 if (a > skb->len - sizeof(struct nlattr))
145 return 0;
146
147 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
148 if (nla)
149 return (void *) nla - (void *) skb->data;
150
151 return 0;
152}
153
154BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
155{
156 struct nlattr *nla;
157
158 if (skb_is_nonlinear(skb))
159 return 0;
160
161 if (skb->len < sizeof(struct nlattr))
162 return 0;
163
164 if (a > skb->len - sizeof(struct nlattr))
165 return 0;
166
167 nla = (struct nlattr *) &skb->data[a];
168 if (nla->nla_len > skb->len - a)
169 return 0;
170
171 nla = nla_find_nested(nla, x);
172 if (nla)
173 return (void *) nla - (void *) skb->data;
174
175 return 0;
176}
177
178BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
179 data, int, headlen, int, offset)
180{
181 u8 tmp, *ptr;
182 const int len = sizeof(tmp);
183
184 if (offset >= 0) {
185 if (headlen - offset >= len)
186 return *(u8 *)(data + offset);
187 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
188 return tmp;
189 } else {
190 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
191 if (likely(ptr))
192 return *(u8 *)ptr;
193 }
194
195 return -EFAULT;
196}
197
198BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
199 int, offset)
200{
201 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
202 offset);
203}
204
205BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
206 data, int, headlen, int, offset)
207{
208 u16 tmp, *ptr;
209 const int len = sizeof(tmp);
210
211 if (offset >= 0) {
212 if (headlen - offset >= len)
213 return get_unaligned_be16(data + offset);
214 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
215 return be16_to_cpu(tmp);
216 } else {
217 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
218 if (likely(ptr))
219 return get_unaligned_be16(ptr);
220 }
221
222 return -EFAULT;
223}
224
225BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
226 int, offset)
227{
228 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
229 offset);
230}
231
232BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
233 data, int, headlen, int, offset)
234{
235 u32 tmp, *ptr;
236 const int len = sizeof(tmp);
237
238 if (likely(offset >= 0)) {
239 if (headlen - offset >= len)
240 return get_unaligned_be32(data + offset);
241 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
242 return be32_to_cpu(tmp);
243 } else {
244 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
245 if (likely(ptr))
246 return get_unaligned_be32(ptr);
247 }
248
249 return -EFAULT;
250}
251
252BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
253 int, offset)
254{
255 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
256 offset);
257}
258
259BPF_CALL_0(bpf_get_raw_cpu_id)
260{
261 return raw_smp_processor_id();
262}
263
264static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
265 .func = bpf_get_raw_cpu_id,
266 .gpl_only = false,
267 .ret_type = RET_INTEGER,
268};
269
270static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
271 struct bpf_insn *insn_buf)
272{
273 struct bpf_insn *insn = insn_buf;
274
275 switch (skb_field) {
276 case SKF_AD_MARK:
277 BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
278
279 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
280 offsetof(struct sk_buff, mark));
281 break;
282
283 case SKF_AD_PKTTYPE:
284 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
285 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
286#ifdef __BIG_ENDIAN_BITFIELD
287 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
288#endif
289 break;
290
291 case SKF_AD_QUEUE:
292 BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2);
293
294 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
295 offsetof(struct sk_buff, queue_mapping));
296 break;
297
298 case SKF_AD_VLAN_TAG:
299 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
300
301
302 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
303 offsetof(struct sk_buff, vlan_tci));
304 break;
305 case SKF_AD_VLAN_TAG_PRESENT:
306 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
307 if (PKT_VLAN_PRESENT_BIT)
308 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
309 if (PKT_VLAN_PRESENT_BIT < 7)
310 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
311 break;
312 }
313
314 return insn - insn_buf;
315}
316
317static bool convert_bpf_extensions(struct sock_filter *fp,
318 struct bpf_insn **insnp)
319{
320 struct bpf_insn *insn = *insnp;
321 u32 cnt;
322
323 switch (fp->k) {
324 case SKF_AD_OFF + SKF_AD_PROTOCOL:
325 BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2);
326
327
328 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
329 offsetof(struct sk_buff, protocol));
330
331 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
332 break;
333
334 case SKF_AD_OFF + SKF_AD_PKTTYPE:
335 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
336 insn += cnt - 1;
337 break;
338
339 case SKF_AD_OFF + SKF_AD_IFINDEX:
340 case SKF_AD_OFF + SKF_AD_HATYPE:
341 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
342 BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
343
344 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
345 BPF_REG_TMP, BPF_REG_CTX,
346 offsetof(struct sk_buff, dev));
347
348 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
349 *insn++ = BPF_EXIT_INSN();
350 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
351 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
352 offsetof(struct net_device, ifindex));
353 else
354 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
355 offsetof(struct net_device, type));
356 break;
357
358 case SKF_AD_OFF + SKF_AD_MARK:
359 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
360 insn += cnt - 1;
361 break;
362
363 case SKF_AD_OFF + SKF_AD_RXHASH:
364 BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
365
366 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
367 offsetof(struct sk_buff, hash));
368 break;
369
370 case SKF_AD_OFF + SKF_AD_QUEUE:
371 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
372 insn += cnt - 1;
373 break;
374
375 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
376 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
377 BPF_REG_A, BPF_REG_CTX, insn);
378 insn += cnt - 1;
379 break;
380
381 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
382 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
383 BPF_REG_A, BPF_REG_CTX, insn);
384 insn += cnt - 1;
385 break;
386
387 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
388 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2);
389
390
391 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
392 offsetof(struct sk_buff, vlan_proto));
393
394 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
395 break;
396
397 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
398 case SKF_AD_OFF + SKF_AD_NLATTR:
399 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
400 case SKF_AD_OFF + SKF_AD_CPU:
401 case SKF_AD_OFF + SKF_AD_RANDOM:
402
403 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
404
405 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
406
407 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
408
409 switch (fp->k) {
410 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
411 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
412 break;
413 case SKF_AD_OFF + SKF_AD_NLATTR:
414 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
415 break;
416 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
417 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
418 break;
419 case SKF_AD_OFF + SKF_AD_CPU:
420 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
421 break;
422 case SKF_AD_OFF + SKF_AD_RANDOM:
423 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
424 bpf_user_rnd_init_once();
425 break;
426 }
427 break;
428
429 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
430
431 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
432 break;
433
434 default:
435
436
437
438
439 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
440 return false;
441 }
442
443 *insnp = insn;
444 return true;
445}
446
447static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
448{
449 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
450 int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
451 bool endian = BPF_SIZE(fp->code) == BPF_H ||
452 BPF_SIZE(fp->code) == BPF_W;
453 bool indirect = BPF_MODE(fp->code) == BPF_IND;
454 const int ip_align = NET_IP_ALIGN;
455 struct bpf_insn *insn = *insnp;
456 int offset = fp->k;
457
458 if (!indirect &&
459 ((unaligned_ok && offset >= 0) ||
460 (!unaligned_ok && offset >= 0 &&
461 offset + ip_align >= 0 &&
462 offset + ip_align % size == 0))) {
463 bool ldx_off_ok = offset <= S16_MAX;
464
465 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
466 if (offset)
467 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
468 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
469 size, 2 + endian + (!ldx_off_ok * 2));
470 if (ldx_off_ok) {
471 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
472 BPF_REG_D, offset);
473 } else {
474 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
475 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
476 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
477 BPF_REG_TMP, 0);
478 }
479 if (endian)
480 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
481 *insn++ = BPF_JMP_A(8);
482 }
483
484 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
485 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
486 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
487 if (!indirect) {
488 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
489 } else {
490 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
491 if (fp->k)
492 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
493 }
494
495 switch (BPF_SIZE(fp->code)) {
496 case BPF_B:
497 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
498 break;
499 case BPF_H:
500 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
501 break;
502 case BPF_W:
503 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
504 break;
505 default:
506 return false;
507 }
508
509 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
510 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
511 *insn = BPF_EXIT_INSN();
512
513 *insnp = insn;
514 return true;
515}
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536static int bpf_convert_filter(struct sock_filter *prog, int len,
537 struct bpf_prog *new_prog, int *new_len,
538 bool *seen_ld_abs)
539{
540 int new_flen = 0, pass = 0, target, i, stack_off;
541 struct bpf_insn *new_insn, *first_insn = NULL;
542 struct sock_filter *fp;
543 int *addrs = NULL;
544 u8 bpf_src;
545
546 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
547 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
548
549 if (len <= 0 || len > BPF_MAXINSNS)
550 return -EINVAL;
551
552 if (new_prog) {
553 first_insn = new_prog->insnsi;
554 addrs = kcalloc(len, sizeof(*addrs),
555 GFP_KERNEL | __GFP_NOWARN);
556 if (!addrs)
557 return -ENOMEM;
558 }
559
560do_pass:
561 new_insn = first_insn;
562 fp = prog;
563
564
565 if (new_prog) {
566
567
568
569 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
570 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
571
572
573
574
575
576 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
577 if (*seen_ld_abs) {
578
579
580
581
582
583 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
584 BPF_REG_D, BPF_REG_CTX,
585 offsetof(struct sk_buff, data));
586 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
587 offsetof(struct sk_buff, len));
588 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
589 offsetof(struct sk_buff, data_len));
590 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
591 }
592 } else {
593 new_insn += 3;
594 }
595
596 for (i = 0; i < len; fp++, i++) {
597 struct bpf_insn tmp_insns[32] = { };
598 struct bpf_insn *insn = tmp_insns;
599
600 if (addrs)
601 addrs[i] = new_insn - first_insn;
602
603 switch (fp->code) {
604
605 case BPF_ALU | BPF_ADD | BPF_X:
606 case BPF_ALU | BPF_ADD | BPF_K:
607 case BPF_ALU | BPF_SUB | BPF_X:
608 case BPF_ALU | BPF_SUB | BPF_K:
609 case BPF_ALU | BPF_AND | BPF_X:
610 case BPF_ALU | BPF_AND | BPF_K:
611 case BPF_ALU | BPF_OR | BPF_X:
612 case BPF_ALU | BPF_OR | BPF_K:
613 case BPF_ALU | BPF_LSH | BPF_X:
614 case BPF_ALU | BPF_LSH | BPF_K:
615 case BPF_ALU | BPF_RSH | BPF_X:
616 case BPF_ALU | BPF_RSH | BPF_K:
617 case BPF_ALU | BPF_XOR | BPF_X:
618 case BPF_ALU | BPF_XOR | BPF_K:
619 case BPF_ALU | BPF_MUL | BPF_X:
620 case BPF_ALU | BPF_MUL | BPF_K:
621 case BPF_ALU | BPF_DIV | BPF_X:
622 case BPF_ALU | BPF_DIV | BPF_K:
623 case BPF_ALU | BPF_MOD | BPF_X:
624 case BPF_ALU | BPF_MOD | BPF_K:
625 case BPF_ALU | BPF_NEG:
626 case BPF_LD | BPF_ABS | BPF_W:
627 case BPF_LD | BPF_ABS | BPF_H:
628 case BPF_LD | BPF_ABS | BPF_B:
629 case BPF_LD | BPF_IND | BPF_W:
630 case BPF_LD | BPF_IND | BPF_H:
631 case BPF_LD | BPF_IND | BPF_B:
632
633
634
635
636 if (BPF_CLASS(fp->code) == BPF_LD &&
637 BPF_MODE(fp->code) == BPF_ABS &&
638 convert_bpf_extensions(fp, &insn))
639 break;
640 if (BPF_CLASS(fp->code) == BPF_LD &&
641 convert_bpf_ld_abs(fp, &insn)) {
642 *seen_ld_abs = true;
643 break;
644 }
645
646 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
647 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
648 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
649
650
651
652 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
653 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
654 *insn++ = BPF_EXIT_INSN();
655 }
656
657 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
658 break;
659
660
661
662
663
664
665
666#define BPF_EMIT_JMP \
667 do { \
668 const s32 off_min = S16_MIN, off_max = S16_MAX; \
669 s32 off; \
670 \
671 if (target >= len || target < 0) \
672 goto err; \
673 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
674 \
675 off -= insn - tmp_insns; \
676 \
677 if (off < off_min || off > off_max) \
678 goto err; \
679 insn->off = off; \
680 } while (0)
681
682 case BPF_JMP | BPF_JA:
683 target = i + fp->k + 1;
684 insn->code = fp->code;
685 BPF_EMIT_JMP;
686 break;
687
688 case BPF_JMP | BPF_JEQ | BPF_K:
689 case BPF_JMP | BPF_JEQ | BPF_X:
690 case BPF_JMP | BPF_JSET | BPF_K:
691 case BPF_JMP | BPF_JSET | BPF_X:
692 case BPF_JMP | BPF_JGT | BPF_K:
693 case BPF_JMP | BPF_JGT | BPF_X:
694 case BPF_JMP | BPF_JGE | BPF_K:
695 case BPF_JMP | BPF_JGE | BPF_X:
696 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
697
698
699
700
701 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
702
703 insn->dst_reg = BPF_REG_A;
704 insn->src_reg = BPF_REG_TMP;
705 bpf_src = BPF_X;
706 } else {
707 insn->dst_reg = BPF_REG_A;
708 insn->imm = fp->k;
709 bpf_src = BPF_SRC(fp->code);
710 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
711 }
712
713
714 if (fp->jf == 0) {
715 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
716 target = i + fp->jt + 1;
717 BPF_EMIT_JMP;
718 break;
719 }
720
721
722 if (fp->jt == 0) {
723 switch (BPF_OP(fp->code)) {
724 case BPF_JEQ:
725 insn->code = BPF_JMP | BPF_JNE | bpf_src;
726 break;
727 case BPF_JGT:
728 insn->code = BPF_JMP | BPF_JLE | bpf_src;
729 break;
730 case BPF_JGE:
731 insn->code = BPF_JMP | BPF_JLT | bpf_src;
732 break;
733 default:
734 goto jmp_rest;
735 }
736
737 target = i + fp->jf + 1;
738 BPF_EMIT_JMP;
739 break;
740 }
741jmp_rest:
742
743 target = i + fp->jt + 1;
744 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
745 BPF_EMIT_JMP;
746 insn++;
747
748 insn->code = BPF_JMP | BPF_JA;
749 target = i + fp->jf + 1;
750 BPF_EMIT_JMP;
751 break;
752
753
754 case BPF_LDX | BPF_MSH | BPF_B: {
755 struct sock_filter tmp = {
756 .code = BPF_LD | BPF_ABS | BPF_B,
757 .k = fp->k,
758 };
759
760 *seen_ld_abs = true;
761
762
763 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
764
765 convert_bpf_ld_abs(&tmp, &insn);
766 insn++;
767
768 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
769
770 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
771
772 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
773
774 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
775
776 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
777 break;
778 }
779
780
781
782 case BPF_RET | BPF_A:
783 case BPF_RET | BPF_K:
784 if (BPF_RVAL(fp->code) == BPF_K)
785 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
786 0, fp->k);
787 *insn = BPF_EXIT_INSN();
788 break;
789
790
791 case BPF_ST:
792 case BPF_STX:
793 stack_off = fp->k * 4 + 4;
794 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
795 BPF_ST ? BPF_REG_A : BPF_REG_X,
796 -stack_off);
797
798
799
800
801 if (new_prog && new_prog->aux->stack_depth < stack_off)
802 new_prog->aux->stack_depth = stack_off;
803 break;
804
805
806 case BPF_LD | BPF_MEM:
807 case BPF_LDX | BPF_MEM:
808 stack_off = fp->k * 4 + 4;
809 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
810 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
811 -stack_off);
812 break;
813
814
815 case BPF_LD | BPF_IMM:
816 case BPF_LDX | BPF_IMM:
817 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
818 BPF_REG_A : BPF_REG_X, fp->k);
819 break;
820
821
822 case BPF_MISC | BPF_TAX:
823 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
824 break;
825
826
827 case BPF_MISC | BPF_TXA:
828 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
829 break;
830
831
832 case BPF_LD | BPF_W | BPF_LEN:
833 case BPF_LDX | BPF_W | BPF_LEN:
834 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
835 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
836 offsetof(struct sk_buff, len));
837 break;
838
839
840 case BPF_LDX | BPF_ABS | BPF_W:
841
842 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
843 break;
844
845
846 default:
847 goto err;
848 }
849
850 insn++;
851 if (new_prog)
852 memcpy(new_insn, tmp_insns,
853 sizeof(*insn) * (insn - tmp_insns));
854 new_insn += insn - tmp_insns;
855 }
856
857 if (!new_prog) {
858
859 *new_len = new_insn - first_insn;
860 if (*seen_ld_abs)
861 *new_len += 4;
862 return 0;
863 }
864
865 pass++;
866 if (new_flen != new_insn - first_insn) {
867 new_flen = new_insn - first_insn;
868 if (pass > 2)
869 goto err;
870 goto do_pass;
871 }
872
873 kfree(addrs);
874 BUG_ON(*new_len != new_flen);
875 return 0;
876err:
877 kfree(addrs);
878 return -EINVAL;
879}
880
881
882
883
884
885
886
887
888static int check_load_and_stores(const struct sock_filter *filter, int flen)
889{
890 u16 *masks, memvalid = 0;
891 int pc, ret = 0;
892
893 BUILD_BUG_ON(BPF_MEMWORDS > 16);
894
895 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
896 if (!masks)
897 return -ENOMEM;
898
899 memset(masks, 0xff, flen * sizeof(*masks));
900
901 for (pc = 0; pc < flen; pc++) {
902 memvalid &= masks[pc];
903
904 switch (filter[pc].code) {
905 case BPF_ST:
906 case BPF_STX:
907 memvalid |= (1 << filter[pc].k);
908 break;
909 case BPF_LD | BPF_MEM:
910 case BPF_LDX | BPF_MEM:
911 if (!(memvalid & (1 << filter[pc].k))) {
912 ret = -EINVAL;
913 goto error;
914 }
915 break;
916 case BPF_JMP | BPF_JA:
917
918 masks[pc + 1 + filter[pc].k] &= memvalid;
919 memvalid = ~0;
920 break;
921 case BPF_JMP | BPF_JEQ | BPF_K:
922 case BPF_JMP | BPF_JEQ | BPF_X:
923 case BPF_JMP | BPF_JGE | BPF_K:
924 case BPF_JMP | BPF_JGE | BPF_X:
925 case BPF_JMP | BPF_JGT | BPF_K:
926 case BPF_JMP | BPF_JGT | BPF_X:
927 case BPF_JMP | BPF_JSET | BPF_K:
928 case BPF_JMP | BPF_JSET | BPF_X:
929
930 masks[pc + 1 + filter[pc].jt] &= memvalid;
931 masks[pc + 1 + filter[pc].jf] &= memvalid;
932 memvalid = ~0;
933 break;
934 }
935 }
936error:
937 kfree(masks);
938 return ret;
939}
940
941static bool chk_code_allowed(u16 code_to_probe)
942{
943 static const bool codes[] = {
944
945 [BPF_ALU | BPF_ADD | BPF_K] = true,
946 [BPF_ALU | BPF_ADD | BPF_X] = true,
947 [BPF_ALU | BPF_SUB | BPF_K] = true,
948 [BPF_ALU | BPF_SUB | BPF_X] = true,
949 [BPF_ALU | BPF_MUL | BPF_K] = true,
950 [BPF_ALU | BPF_MUL | BPF_X] = true,
951 [BPF_ALU | BPF_DIV | BPF_K] = true,
952 [BPF_ALU | BPF_DIV | BPF_X] = true,
953 [BPF_ALU | BPF_MOD | BPF_K] = true,
954 [BPF_ALU | BPF_MOD | BPF_X] = true,
955 [BPF_ALU | BPF_AND | BPF_K] = true,
956 [BPF_ALU | BPF_AND | BPF_X] = true,
957 [BPF_ALU | BPF_OR | BPF_K] = true,
958 [BPF_ALU | BPF_OR | BPF_X] = true,
959 [BPF_ALU | BPF_XOR | BPF_K] = true,
960 [BPF_ALU | BPF_XOR | BPF_X] = true,
961 [BPF_ALU | BPF_LSH | BPF_K] = true,
962 [BPF_ALU | BPF_LSH | BPF_X] = true,
963 [BPF_ALU | BPF_RSH | BPF_K] = true,
964 [BPF_ALU | BPF_RSH | BPF_X] = true,
965 [BPF_ALU | BPF_NEG] = true,
966
967 [BPF_LD | BPF_W | BPF_ABS] = true,
968 [BPF_LD | BPF_H | BPF_ABS] = true,
969 [BPF_LD | BPF_B | BPF_ABS] = true,
970 [BPF_LD | BPF_W | BPF_LEN] = true,
971 [BPF_LD | BPF_W | BPF_IND] = true,
972 [BPF_LD | BPF_H | BPF_IND] = true,
973 [BPF_LD | BPF_B | BPF_IND] = true,
974 [BPF_LD | BPF_IMM] = true,
975 [BPF_LD | BPF_MEM] = true,
976 [BPF_LDX | BPF_W | BPF_LEN] = true,
977 [BPF_LDX | BPF_B | BPF_MSH] = true,
978 [BPF_LDX | BPF_IMM] = true,
979 [BPF_LDX | BPF_MEM] = true,
980
981 [BPF_ST] = true,
982 [BPF_STX] = true,
983
984 [BPF_MISC | BPF_TAX] = true,
985 [BPF_MISC | BPF_TXA] = true,
986
987 [BPF_RET | BPF_K] = true,
988 [BPF_RET | BPF_A] = true,
989
990 [BPF_JMP | BPF_JA] = true,
991 [BPF_JMP | BPF_JEQ | BPF_K] = true,
992 [BPF_JMP | BPF_JEQ | BPF_X] = true,
993 [BPF_JMP | BPF_JGE | BPF_K] = true,
994 [BPF_JMP | BPF_JGE | BPF_X] = true,
995 [BPF_JMP | BPF_JGT | BPF_K] = true,
996 [BPF_JMP | BPF_JGT | BPF_X] = true,
997 [BPF_JMP | BPF_JSET | BPF_K] = true,
998 [BPF_JMP | BPF_JSET | BPF_X] = true,
999 };
1000
1001 if (code_to_probe >= ARRAY_SIZE(codes))
1002 return false;
1003
1004 return codes[code_to_probe];
1005}
1006
1007static bool bpf_check_basics_ok(const struct sock_filter *filter,
1008 unsigned int flen)
1009{
1010 if (filter == NULL)
1011 return false;
1012 if (flen == 0 || flen > BPF_MAXINSNS)
1013 return false;
1014
1015 return true;
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032static int bpf_check_classic(const struct sock_filter *filter,
1033 unsigned int flen)
1034{
1035 bool anc_found;
1036 int pc;
1037
1038
1039 for (pc = 0; pc < flen; pc++) {
1040 const struct sock_filter *ftest = &filter[pc];
1041
1042
1043 if (!chk_code_allowed(ftest->code))
1044 return -EINVAL;
1045
1046
1047 switch (ftest->code) {
1048 case BPF_ALU | BPF_DIV | BPF_K:
1049 case BPF_ALU | BPF_MOD | BPF_K:
1050
1051 if (ftest->k == 0)
1052 return -EINVAL;
1053 break;
1054 case BPF_ALU | BPF_LSH | BPF_K:
1055 case BPF_ALU | BPF_RSH | BPF_K:
1056 if (ftest->k >= 32)
1057 return -EINVAL;
1058 break;
1059 case BPF_LD | BPF_MEM:
1060 case BPF_LDX | BPF_MEM:
1061 case BPF_ST:
1062 case BPF_STX:
1063
1064 if (ftest->k >= BPF_MEMWORDS)
1065 return -EINVAL;
1066 break;
1067 case BPF_JMP | BPF_JA:
1068
1069
1070
1071
1072 if (ftest->k >= (unsigned int)(flen - pc - 1))
1073 return -EINVAL;
1074 break;
1075 case BPF_JMP | BPF_JEQ | BPF_K:
1076 case BPF_JMP | BPF_JEQ | BPF_X:
1077 case BPF_JMP | BPF_JGE | BPF_K:
1078 case BPF_JMP | BPF_JGE | BPF_X:
1079 case BPF_JMP | BPF_JGT | BPF_K:
1080 case BPF_JMP | BPF_JGT | BPF_X:
1081 case BPF_JMP | BPF_JSET | BPF_K:
1082 case BPF_JMP | BPF_JSET | BPF_X:
1083
1084 if (pc + ftest->jt + 1 >= flen ||
1085 pc + ftest->jf + 1 >= flen)
1086 return -EINVAL;
1087 break;
1088 case BPF_LD | BPF_W | BPF_ABS:
1089 case BPF_LD | BPF_H | BPF_ABS:
1090 case BPF_LD | BPF_B | BPF_ABS:
1091 anc_found = false;
1092 if (bpf_anc_helper(ftest) & BPF_ANC)
1093 anc_found = true;
1094
1095 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1096 return -EINVAL;
1097 }
1098 }
1099
1100
1101 switch (filter[flen - 1].code) {
1102 case BPF_RET | BPF_K:
1103 case BPF_RET | BPF_A:
1104 return check_load_and_stores(filter, flen);
1105 }
1106
1107 return -EINVAL;
1108}
1109
1110static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1111 const struct sock_fprog *fprog)
1112{
1113 unsigned int fsize = bpf_classic_proglen(fprog);
1114 struct sock_fprog_kern *fkprog;
1115
1116 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1117 if (!fp->orig_prog)
1118 return -ENOMEM;
1119
1120 fkprog = fp->orig_prog;
1121 fkprog->len = fprog->len;
1122
1123 fkprog->filter = kmemdup(fp->insns, fsize,
1124 GFP_KERNEL | __GFP_NOWARN);
1125 if (!fkprog->filter) {
1126 kfree(fp->orig_prog);
1127 return -ENOMEM;
1128 }
1129
1130 return 0;
1131}
1132
1133static void bpf_release_orig_filter(struct bpf_prog *fp)
1134{
1135 struct sock_fprog_kern *fprog = fp->orig_prog;
1136
1137 if (fprog) {
1138 kfree(fprog->filter);
1139 kfree(fprog);
1140 }
1141}
1142
1143static void __bpf_prog_release(struct bpf_prog *prog)
1144{
1145 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
1146 bpf_prog_put(prog);
1147 } else {
1148 bpf_release_orig_filter(prog);
1149 bpf_prog_free(prog);
1150 }
1151}
1152
1153static void __sk_filter_release(struct sk_filter *fp)
1154{
1155 __bpf_prog_release(fp->prog);
1156 kfree(fp);
1157}
1158
1159
1160
1161
1162
1163static void sk_filter_release_rcu(struct rcu_head *rcu)
1164{
1165 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1166
1167 __sk_filter_release(fp);
1168}
1169
1170
1171
1172
1173
1174
1175
1176static void sk_filter_release(struct sk_filter *fp)
1177{
1178 if (refcount_dec_and_test(&fp->refcnt))
1179 call_rcu(&fp->rcu, sk_filter_release_rcu);
1180}
1181
1182void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1183{
1184 u32 filter_size = bpf_prog_size(fp->prog->len);
1185
1186 atomic_sub(filter_size, &sk->sk_omem_alloc);
1187 sk_filter_release(fp);
1188}
1189
1190
1191
1192
1193static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1194{
1195 u32 filter_size = bpf_prog_size(fp->prog->len);
1196
1197
1198 if (filter_size <= sysctl_optmem_max &&
1199 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
1200 atomic_add(filter_size, &sk->sk_omem_alloc);
1201 return true;
1202 }
1203 return false;
1204}
1205
1206bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1207{
1208 if (!refcount_inc_not_zero(&fp->refcnt))
1209 return false;
1210
1211 if (!__sk_filter_charge(sk, fp)) {
1212 sk_filter_release(fp);
1213 return false;
1214 }
1215 return true;
1216}
1217
1218static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1219{
1220 struct sock_filter *old_prog;
1221 struct bpf_prog *old_fp;
1222 int err, new_len, old_len = fp->len;
1223 bool seen_ld_abs = false;
1224
1225
1226
1227
1228
1229
1230 BUILD_BUG_ON(sizeof(struct sock_filter) !=
1231 sizeof(struct bpf_insn));
1232
1233
1234
1235
1236
1237 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
1238 GFP_KERNEL | __GFP_NOWARN);
1239 if (!old_prog) {
1240 err = -ENOMEM;
1241 goto out_err;
1242 }
1243
1244
1245 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1246 &seen_ld_abs);
1247 if (err)
1248 goto out_err_free;
1249
1250
1251 old_fp = fp;
1252 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
1253 if (!fp) {
1254
1255
1256
1257 fp = old_fp;
1258 err = -ENOMEM;
1259 goto out_err_free;
1260 }
1261
1262 fp->len = new_len;
1263
1264
1265 err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1266 &seen_ld_abs);
1267 if (err)
1268
1269
1270
1271
1272
1273 goto out_err_free;
1274
1275 fp = bpf_prog_select_runtime(fp, &err);
1276 if (err)
1277 goto out_err_free;
1278
1279 kfree(old_prog);
1280 return fp;
1281
1282out_err_free:
1283 kfree(old_prog);
1284out_err:
1285 __bpf_prog_release(fp);
1286 return ERR_PTR(err);
1287}
1288
1289static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1290 bpf_aux_classic_check_t trans)
1291{
1292 int err;
1293
1294 fp->bpf_func = NULL;
1295 fp->jited = 0;
1296
1297 err = bpf_check_classic(fp->insns, fp->len);
1298 if (err) {
1299 __bpf_prog_release(fp);
1300 return ERR_PTR(err);
1301 }
1302
1303
1304
1305
1306 if (trans) {
1307 err = trans(fp->insns, fp->len);
1308 if (err) {
1309 __bpf_prog_release(fp);
1310 return ERR_PTR(err);
1311 }
1312 }
1313
1314
1315
1316
1317 bpf_jit_compile(fp);
1318
1319
1320
1321
1322 if (!fp->jited)
1323 fp = bpf_migrate_filter(fp);
1324
1325 return fp;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1339{
1340 unsigned int fsize = bpf_classic_proglen(fprog);
1341 struct bpf_prog *fp;
1342
1343
1344 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1345 return -EINVAL;
1346
1347 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1348 if (!fp)
1349 return -ENOMEM;
1350
1351 memcpy(fp->insns, fprog->filter, fsize);
1352
1353 fp->len = fprog->len;
1354
1355
1356
1357
1358 fp->orig_prog = NULL;
1359
1360
1361
1362
1363 fp = bpf_prepare_filter(fp, NULL);
1364 if (IS_ERR(fp))
1365 return PTR_ERR(fp);
1366
1367 *pfp = fp;
1368 return 0;
1369}
1370EXPORT_SYMBOL_GPL(bpf_prog_create);
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1384 bpf_aux_classic_check_t trans, bool save_orig)
1385{
1386 unsigned int fsize = bpf_classic_proglen(fprog);
1387 struct bpf_prog *fp;
1388 int err;
1389
1390
1391 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1392 return -EINVAL;
1393
1394 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1395 if (!fp)
1396 return -ENOMEM;
1397
1398 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1399 __bpf_prog_free(fp);
1400 return -EFAULT;
1401 }
1402
1403 fp->len = fprog->len;
1404 fp->orig_prog = NULL;
1405
1406 if (save_orig) {
1407 err = bpf_prog_store_orig_filter(fp, fprog);
1408 if (err) {
1409 __bpf_prog_free(fp);
1410 return -ENOMEM;
1411 }
1412 }
1413
1414
1415
1416
1417 fp = bpf_prepare_filter(fp, trans);
1418 if (IS_ERR(fp))
1419 return PTR_ERR(fp);
1420
1421 *pfp = fp;
1422 return 0;
1423}
1424EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1425
1426void bpf_prog_destroy(struct bpf_prog *fp)
1427{
1428 __bpf_prog_release(fp);
1429}
1430EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1431
1432static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1433{
1434 struct sk_filter *fp, *old_fp;
1435
1436 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1437 if (!fp)
1438 return -ENOMEM;
1439
1440 fp->prog = prog;
1441
1442 if (!__sk_filter_charge(sk, fp)) {
1443 kfree(fp);
1444 return -ENOMEM;
1445 }
1446 refcount_set(&fp->refcnt, 1);
1447
1448 old_fp = rcu_dereference_protected(sk->sk_filter,
1449 lockdep_sock_is_held(sk));
1450 rcu_assign_pointer(sk->sk_filter, fp);
1451
1452 if (old_fp)
1453 sk_filter_uncharge(sk, old_fp);
1454
1455 return 0;
1456}
1457
1458static
1459struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1460{
1461 unsigned int fsize = bpf_classic_proglen(fprog);
1462 struct bpf_prog *prog;
1463 int err;
1464
1465 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1466 return ERR_PTR(-EPERM);
1467
1468
1469 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1470 return ERR_PTR(-EINVAL);
1471
1472 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1473 if (!prog)
1474 return ERR_PTR(-ENOMEM);
1475
1476 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1477 __bpf_prog_free(prog);
1478 return ERR_PTR(-EFAULT);
1479 }
1480
1481 prog->len = fprog->len;
1482
1483 err = bpf_prog_store_orig_filter(prog, fprog);
1484 if (err) {
1485 __bpf_prog_free(prog);
1486 return ERR_PTR(-ENOMEM);
1487 }
1488
1489
1490
1491
1492 return bpf_prepare_filter(prog, NULL);
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1506{
1507 struct bpf_prog *prog = __get_filter(fprog, sk);
1508 int err;
1509
1510 if (IS_ERR(prog))
1511 return PTR_ERR(prog);
1512
1513 err = __sk_attach_prog(prog, sk);
1514 if (err < 0) {
1515 __bpf_prog_release(prog);
1516 return err;
1517 }
1518
1519 return 0;
1520}
1521EXPORT_SYMBOL_GPL(sk_attach_filter);
1522
1523int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1524{
1525 struct bpf_prog *prog = __get_filter(fprog, sk);
1526 int err;
1527
1528 if (IS_ERR(prog))
1529 return PTR_ERR(prog);
1530
1531 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1532 err = -ENOMEM;
1533 else
1534 err = reuseport_attach_prog(sk, prog);
1535
1536 if (err)
1537 __bpf_prog_release(prog);
1538
1539 return err;
1540}
1541
1542static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1543{
1544 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1545 return ERR_PTR(-EPERM);
1546
1547 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1548}
1549
1550int sk_attach_bpf(u32 ufd, struct sock *sk)
1551{
1552 struct bpf_prog *prog = __get_bpf(ufd, sk);
1553 int err;
1554
1555 if (IS_ERR(prog))
1556 return PTR_ERR(prog);
1557
1558 err = __sk_attach_prog(prog, sk);
1559 if (err < 0) {
1560 bpf_prog_put(prog);
1561 return err;
1562 }
1563
1564 return 0;
1565}
1566
1567int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1568{
1569 struct bpf_prog *prog;
1570 int err;
1571
1572 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1573 return -EPERM;
1574
1575 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1576 if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL)
1577 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
1578 if (IS_ERR(prog))
1579 return PTR_ERR(prog);
1580
1581 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
1582
1583
1584
1585
1586
1587 if ((sk->sk_type != SOCK_STREAM &&
1588 sk->sk_type != SOCK_DGRAM) ||
1589 (sk->sk_protocol != IPPROTO_UDP &&
1590 sk->sk_protocol != IPPROTO_TCP) ||
1591 (sk->sk_family != AF_INET &&
1592 sk->sk_family != AF_INET6)) {
1593 err = -ENOTSUPP;
1594 goto err_prog_put;
1595 }
1596 } else {
1597
1598 if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
1599 err = -ENOMEM;
1600 goto err_prog_put;
1601 }
1602 }
1603
1604 err = reuseport_attach_prog(sk, prog);
1605err_prog_put:
1606 if (err)
1607 bpf_prog_put(prog);
1608
1609 return err;
1610}
1611
1612void sk_reuseport_prog_free(struct bpf_prog *prog)
1613{
1614 if (!prog)
1615 return;
1616
1617 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
1618 bpf_prog_put(prog);
1619 else
1620 bpf_prog_destroy(prog);
1621}
1622
1623struct bpf_scratchpad {
1624 union {
1625 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1626 u8 buff[MAX_BPF_STACK];
1627 };
1628};
1629
1630static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
1631
1632static inline int __bpf_try_make_writable(struct sk_buff *skb,
1633 unsigned int write_len)
1634{
1635 return skb_ensure_writable(skb, write_len);
1636}
1637
1638static inline int bpf_try_make_writable(struct sk_buff *skb,
1639 unsigned int write_len)
1640{
1641 int err = __bpf_try_make_writable(skb, write_len);
1642
1643 bpf_compute_data_pointers(skb);
1644 return err;
1645}
1646
1647static int bpf_try_make_head_writable(struct sk_buff *skb)
1648{
1649 return bpf_try_make_writable(skb, skb_headlen(skb));
1650}
1651
1652static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1653{
1654 if (skb_at_tc_ingress(skb))
1655 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1656}
1657
1658static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1659{
1660 if (skb_at_tc_ingress(skb))
1661 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1662}
1663
1664BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1665 const void *, from, u32, len, u64, flags)
1666{
1667 void *ptr;
1668
1669 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1670 return -EINVAL;
1671 if (unlikely(offset > 0xffff))
1672 return -EFAULT;
1673 if (unlikely(bpf_try_make_writable(skb, offset + len)))
1674 return -EFAULT;
1675
1676 ptr = skb->data + offset;
1677 if (flags & BPF_F_RECOMPUTE_CSUM)
1678 __skb_postpull_rcsum(skb, ptr, len, offset);
1679
1680 memcpy(ptr, from, len);
1681
1682 if (flags & BPF_F_RECOMPUTE_CSUM)
1683 __skb_postpush_rcsum(skb, ptr, len, offset);
1684 if (flags & BPF_F_INVALIDATE_HASH)
1685 skb_clear_hash(skb);
1686
1687 return 0;
1688}
1689
1690static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1691 .func = bpf_skb_store_bytes,
1692 .gpl_only = false,
1693 .ret_type = RET_INTEGER,
1694 .arg1_type = ARG_PTR_TO_CTX,
1695 .arg2_type = ARG_ANYTHING,
1696 .arg3_type = ARG_PTR_TO_MEM,
1697 .arg4_type = ARG_CONST_SIZE,
1698 .arg5_type = ARG_ANYTHING,
1699};
1700
1701BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1702 void *, to, u32, len)
1703{
1704 void *ptr;
1705
1706 if (unlikely(offset > 0xffff))
1707 goto err_clear;
1708
1709 ptr = skb_header_pointer(skb, offset, len, to);
1710 if (unlikely(!ptr))
1711 goto err_clear;
1712 if (ptr != to)
1713 memcpy(to, ptr, len);
1714
1715 return 0;
1716err_clear:
1717 memset(to, 0, len);
1718 return -EFAULT;
1719}
1720
1721static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1722 .func = bpf_skb_load_bytes,
1723 .gpl_only = false,
1724 .ret_type = RET_INTEGER,
1725 .arg1_type = ARG_PTR_TO_CTX,
1726 .arg2_type = ARG_ANYTHING,
1727 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1728 .arg4_type = ARG_CONST_SIZE,
1729};
1730
1731BPF_CALL_4(bpf_flow_dissector_load_bytes,
1732 const struct bpf_flow_dissector *, ctx, u32, offset,
1733 void *, to, u32, len)
1734{
1735 void *ptr;
1736
1737 if (unlikely(offset > 0xffff))
1738 goto err_clear;
1739
1740 if (unlikely(!ctx->skb))
1741 goto err_clear;
1742
1743 ptr = skb_header_pointer(ctx->skb, offset, len, to);
1744 if (unlikely(!ptr))
1745 goto err_clear;
1746 if (ptr != to)
1747 memcpy(to, ptr, len);
1748
1749 return 0;
1750err_clear:
1751 memset(to, 0, len);
1752 return -EFAULT;
1753}
1754
1755static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
1756 .func = bpf_flow_dissector_load_bytes,
1757 .gpl_only = false,
1758 .ret_type = RET_INTEGER,
1759 .arg1_type = ARG_PTR_TO_CTX,
1760 .arg2_type = ARG_ANYTHING,
1761 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1762 .arg4_type = ARG_CONST_SIZE,
1763};
1764
1765BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1766 u32, offset, void *, to, u32, len, u32, start_header)
1767{
1768 u8 *end = skb_tail_pointer(skb);
1769 u8 *net = skb_network_header(skb);
1770 u8 *mac = skb_mac_header(skb);
1771 u8 *ptr;
1772
1773 if (unlikely(offset > 0xffff || len > (end - mac)))
1774 goto err_clear;
1775
1776 switch (start_header) {
1777 case BPF_HDR_START_MAC:
1778 ptr = mac + offset;
1779 break;
1780 case BPF_HDR_START_NET:
1781 ptr = net + offset;
1782 break;
1783 default:
1784 goto err_clear;
1785 }
1786
1787 if (likely(ptr >= mac && ptr + len <= end)) {
1788 memcpy(to, ptr, len);
1789 return 0;
1790 }
1791
1792err_clear:
1793 memset(to, 0, len);
1794 return -EFAULT;
1795}
1796
1797static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1798 .func = bpf_skb_load_bytes_relative,
1799 .gpl_only = false,
1800 .ret_type = RET_INTEGER,
1801 .arg1_type = ARG_PTR_TO_CTX,
1802 .arg2_type = ARG_ANYTHING,
1803 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1804 .arg4_type = ARG_CONST_SIZE,
1805 .arg5_type = ARG_ANYTHING,
1806};
1807
1808BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1809{
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1820}
1821
1822static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1823 .func = bpf_skb_pull_data,
1824 .gpl_only = false,
1825 .ret_type = RET_INTEGER,
1826 .arg1_type = ARG_PTR_TO_CTX,
1827 .arg2_type = ARG_ANYTHING,
1828};
1829
1830BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
1831{
1832 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
1833}
1834
1835static const struct bpf_func_proto bpf_sk_fullsock_proto = {
1836 .func = bpf_sk_fullsock,
1837 .gpl_only = false,
1838 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
1839 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
1840};
1841
1842static inline int sk_skb_try_make_writable(struct sk_buff *skb,
1843 unsigned int write_len)
1844{
1845 int err = __bpf_try_make_writable(skb, write_len);
1846
1847 bpf_compute_data_end_sk_skb(skb);
1848 return err;
1849}
1850
1851BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
1852{
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
1863}
1864
1865static const struct bpf_func_proto sk_skb_pull_data_proto = {
1866 .func = sk_skb_pull_data,
1867 .gpl_only = false,
1868 .ret_type = RET_INTEGER,
1869 .arg1_type = ARG_PTR_TO_CTX,
1870 .arg2_type = ARG_ANYTHING,
1871};
1872
1873BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1874 u64, from, u64, to, u64, flags)
1875{
1876 __sum16 *ptr;
1877
1878 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1879 return -EINVAL;
1880 if (unlikely(offset > 0xffff || offset & 1))
1881 return -EFAULT;
1882 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1883 return -EFAULT;
1884
1885 ptr = (__sum16 *)(skb->data + offset);
1886 switch (flags & BPF_F_HDR_FIELD_MASK) {
1887 case 0:
1888 if (unlikely(from != 0))
1889 return -EINVAL;
1890
1891 csum_replace_by_diff(ptr, to);
1892 break;
1893 case 2:
1894 csum_replace2(ptr, from, to);
1895 break;
1896 case 4:
1897 csum_replace4(ptr, from, to);
1898 break;
1899 default:
1900 return -EINVAL;
1901 }
1902
1903 return 0;
1904}
1905
1906static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1907 .func = bpf_l3_csum_replace,
1908 .gpl_only = false,
1909 .ret_type = RET_INTEGER,
1910 .arg1_type = ARG_PTR_TO_CTX,
1911 .arg2_type = ARG_ANYTHING,
1912 .arg3_type = ARG_ANYTHING,
1913 .arg4_type = ARG_ANYTHING,
1914 .arg5_type = ARG_ANYTHING,
1915};
1916
1917BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1918 u64, from, u64, to, u64, flags)
1919{
1920 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1921 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1922 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
1923 __sum16 *ptr;
1924
1925 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1926 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
1927 return -EINVAL;
1928 if (unlikely(offset > 0xffff || offset & 1))
1929 return -EFAULT;
1930 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1931 return -EFAULT;
1932
1933 ptr = (__sum16 *)(skb->data + offset);
1934 if (is_mmzero && !do_mforce && !*ptr)
1935 return 0;
1936
1937 switch (flags & BPF_F_HDR_FIELD_MASK) {
1938 case 0:
1939 if (unlikely(from != 0))
1940 return -EINVAL;
1941
1942 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1943 break;
1944 case 2:
1945 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1946 break;
1947 case 4:
1948 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1949 break;
1950 default:
1951 return -EINVAL;
1952 }
1953
1954 if (is_mmzero && !*ptr)
1955 *ptr = CSUM_MANGLED_0;
1956 return 0;
1957}
1958
1959static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1960 .func = bpf_l4_csum_replace,
1961 .gpl_only = false,
1962 .ret_type = RET_INTEGER,
1963 .arg1_type = ARG_PTR_TO_CTX,
1964 .arg2_type = ARG_ANYTHING,
1965 .arg3_type = ARG_ANYTHING,
1966 .arg4_type = ARG_ANYTHING,
1967 .arg5_type = ARG_ANYTHING,
1968};
1969
1970BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1971 __be32 *, to, u32, to_size, __wsum, seed)
1972{
1973 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1974 u32 diff_size = from_size + to_size;
1975 int i, j = 0;
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1986 diff_size > sizeof(sp->diff)))
1987 return -EINVAL;
1988
1989 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1990 sp->diff[j] = ~from[i];
1991 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
1992 sp->diff[j] = to[i];
1993
1994 return csum_partial(sp->diff, diff_size, seed);
1995}
1996
1997static const struct bpf_func_proto bpf_csum_diff_proto = {
1998 .func = bpf_csum_diff,
1999 .gpl_only = false,
2000 .pkt_access = true,
2001 .ret_type = RET_INTEGER,
2002 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
2003 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2004 .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
2005 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
2006 .arg5_type = ARG_ANYTHING,
2007};
2008
2009BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
2010{
2011
2012
2013
2014
2015 if (skb->ip_summed == CHECKSUM_COMPLETE)
2016 return (skb->csum = csum_add(skb->csum, csum));
2017
2018 return -ENOTSUPP;
2019}
2020
2021static const struct bpf_func_proto bpf_csum_update_proto = {
2022 .func = bpf_csum_update,
2023 .gpl_only = false,
2024 .ret_type = RET_INTEGER,
2025 .arg1_type = ARG_PTR_TO_CTX,
2026 .arg2_type = ARG_ANYTHING,
2027};
2028
2029static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
2030{
2031 return dev_forward_skb(dev, skb);
2032}
2033
2034static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
2035 struct sk_buff *skb)
2036{
2037 int ret = ____dev_forward_skb(dev, skb);
2038
2039 if (likely(!ret)) {
2040 skb->dev = dev;
2041 ret = netif_rx(skb);
2042 }
2043
2044 return ret;
2045}
2046
2047static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2048{
2049 int ret;
2050
2051 if (dev_xmit_recursion()) {
2052 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2053 kfree_skb(skb);
2054 return -ENETDOWN;
2055 }
2056
2057 skb->dev = dev;
2058 skb->tstamp = 0;
2059
2060 dev_xmit_recursion_inc();
2061 ret = dev_queue_xmit(skb);
2062 dev_xmit_recursion_dec();
2063
2064 return ret;
2065}
2066
2067static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2068 u32 flags)
2069{
2070 unsigned int mlen = skb_network_offset(skb);
2071
2072 if (mlen) {
2073 __skb_pull(skb, mlen);
2074
2075
2076
2077
2078
2079
2080 if (!skb_at_tc_ingress(skb))
2081 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2082 }
2083 skb_pop_mac_header(skb);
2084 skb_reset_mac_len(skb);
2085 return flags & BPF_F_INGRESS ?
2086 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
2087}
2088
2089static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
2090 u32 flags)
2091{
2092
2093 if (unlikely(skb->mac_header >= skb->network_header)) {
2094 kfree_skb(skb);
2095 return -ERANGE;
2096 }
2097
2098 bpf_push_mac_rcsum(skb);
2099 return flags & BPF_F_INGRESS ?
2100 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
2101}
2102
2103static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
2104 u32 flags)
2105{
2106 if (dev_is_mac_header_xmit(dev))
2107 return __bpf_redirect_common(skb, dev, flags);
2108 else
2109 return __bpf_redirect_no_mac(skb, dev, flags);
2110}
2111
2112BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
2113{
2114 struct net_device *dev;
2115 struct sk_buff *clone;
2116 int ret;
2117
2118 if (unlikely(flags & ~(BPF_F_INGRESS)))
2119 return -EINVAL;
2120
2121 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2122 if (unlikely(!dev))
2123 return -EINVAL;
2124
2125 clone = skb_clone(skb, GFP_ATOMIC);
2126 if (unlikely(!clone))
2127 return -ENOMEM;
2128
2129
2130
2131
2132
2133
2134 ret = bpf_try_make_head_writable(skb);
2135 if (unlikely(ret)) {
2136 kfree_skb(clone);
2137 return -ENOMEM;
2138 }
2139
2140 return __bpf_redirect(clone, dev, flags);
2141}
2142
2143static const struct bpf_func_proto bpf_clone_redirect_proto = {
2144 .func = bpf_clone_redirect,
2145 .gpl_only = false,
2146 .ret_type = RET_INTEGER,
2147 .arg1_type = ARG_PTR_TO_CTX,
2148 .arg2_type = ARG_ANYTHING,
2149 .arg3_type = ARG_ANYTHING,
2150};
2151
2152DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
2153EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
2154
2155BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
2156{
2157 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2158
2159 if (unlikely(flags & ~(BPF_F_INGRESS)))
2160 return TC_ACT_SHOT;
2161
2162 ri->flags = flags;
2163 ri->tgt_index = ifindex;
2164
2165 return TC_ACT_REDIRECT;
2166}
2167
2168int skb_do_redirect(struct sk_buff *skb)
2169{
2170 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2171 struct net_device *dev;
2172
2173 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index);
2174 ri->tgt_index = 0;
2175 if (unlikely(!dev)) {
2176 kfree_skb(skb);
2177 return -EINVAL;
2178 }
2179
2180 return __bpf_redirect(skb, dev, ri->flags);
2181}
2182
2183static const struct bpf_func_proto bpf_redirect_proto = {
2184 .func = bpf_redirect,
2185 .gpl_only = false,
2186 .ret_type = RET_INTEGER,
2187 .arg1_type = ARG_ANYTHING,
2188 .arg2_type = ARG_ANYTHING,
2189};
2190
2191BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
2192{
2193 msg->apply_bytes = bytes;
2194 return 0;
2195}
2196
2197static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2198 .func = bpf_msg_apply_bytes,
2199 .gpl_only = false,
2200 .ret_type = RET_INTEGER,
2201 .arg1_type = ARG_PTR_TO_CTX,
2202 .arg2_type = ARG_ANYTHING,
2203};
2204
2205BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
2206{
2207 msg->cork_bytes = bytes;
2208 return 0;
2209}
2210
2211static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2212 .func = bpf_msg_cork_bytes,
2213 .gpl_only = false,
2214 .ret_type = RET_INTEGER,
2215 .arg1_type = ARG_PTR_TO_CTX,
2216 .arg2_type = ARG_ANYTHING,
2217};
2218
2219BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
2220 u32, end, u64, flags)
2221{
2222 u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
2223 u32 first_sge, last_sge, i, shift, bytes_sg_total;
2224 struct scatterlist *sge;
2225 u8 *raw, *to, *from;
2226 struct page *page;
2227
2228 if (unlikely(flags || end <= start))
2229 return -EINVAL;
2230
2231
2232 i = msg->sg.start;
2233 do {
2234 offset += len;
2235 len = sk_msg_elem(msg, i)->length;
2236 if (start < offset + len)
2237 break;
2238 sk_msg_iter_var_next(i);
2239 } while (i != msg->sg.end);
2240
2241 if (unlikely(start >= offset + len))
2242 return -EINVAL;
2243
2244 first_sge = i;
2245
2246
2247
2248 bytes_sg_total = start - offset + bytes;
2249 if (!test_bit(i, &msg->sg.copy) && bytes_sg_total <= len)
2250 goto out;
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262 do {
2263 copy += sk_msg_elem(msg, i)->length;
2264 sk_msg_iter_var_next(i);
2265 if (bytes_sg_total <= copy)
2266 break;
2267 } while (i != msg->sg.end);
2268 last_sge = i;
2269
2270 if (unlikely(bytes_sg_total > copy))
2271 return -EINVAL;
2272
2273 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2274 get_order(copy));
2275 if (unlikely(!page))
2276 return -ENOMEM;
2277
2278 raw = page_address(page);
2279 i = first_sge;
2280 do {
2281 sge = sk_msg_elem(msg, i);
2282 from = sg_virt(sge);
2283 len = sge->length;
2284 to = raw + poffset;
2285
2286 memcpy(to, from, len);
2287 poffset += len;
2288 sge->length = 0;
2289 put_page(sg_page(sge));
2290
2291 sk_msg_iter_var_next(i);
2292 } while (i != last_sge);
2293
2294 sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
2295
2296
2297
2298
2299
2300 WARN_ON_ONCE(last_sge == first_sge);
2301 shift = last_sge > first_sge ?
2302 last_sge - first_sge - 1 :
2303 NR_MSG_FRAG_IDS - first_sge + last_sge - 1;
2304 if (!shift)
2305 goto out;
2306
2307 i = first_sge;
2308 sk_msg_iter_var_next(i);
2309 do {
2310 u32 move_from;
2311
2312 if (i + shift >= NR_MSG_FRAG_IDS)
2313 move_from = i + shift - NR_MSG_FRAG_IDS;
2314 else
2315 move_from = i + shift;
2316 if (move_from == msg->sg.end)
2317 break;
2318
2319 msg->sg.data[i] = msg->sg.data[move_from];
2320 msg->sg.data[move_from].length = 0;
2321 msg->sg.data[move_from].page_link = 0;
2322 msg->sg.data[move_from].offset = 0;
2323 sk_msg_iter_var_next(i);
2324 } while (1);
2325
2326 msg->sg.end = msg->sg.end - shift > msg->sg.end ?
2327 msg->sg.end - shift + NR_MSG_FRAG_IDS :
2328 msg->sg.end - shift;
2329out:
2330 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
2331 msg->data_end = msg->data + bytes;
2332 return 0;
2333}
2334
2335static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2336 .func = bpf_msg_pull_data,
2337 .gpl_only = false,
2338 .ret_type = RET_INTEGER,
2339 .arg1_type = ARG_PTR_TO_CTX,
2340 .arg2_type = ARG_ANYTHING,
2341 .arg3_type = ARG_ANYTHING,
2342 .arg4_type = ARG_ANYTHING,
2343};
2344
2345BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
2346 u32, len, u64, flags)
2347{
2348 struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
2349 u32 new, i = 0, l = 0, space, copy = 0, offset = 0;
2350 u8 *raw, *to, *from;
2351 struct page *page;
2352
2353 if (unlikely(flags))
2354 return -EINVAL;
2355
2356
2357 i = msg->sg.start;
2358 do {
2359 offset += l;
2360 l = sk_msg_elem(msg, i)->length;
2361
2362 if (start < offset + l)
2363 break;
2364 sk_msg_iter_var_next(i);
2365 } while (i != msg->sg.end);
2366
2367 if (start >= offset + l)
2368 return -EINVAL;
2369
2370 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2371
2372
2373
2374
2375
2376
2377
2378
2379 if (!space || (space == 1 && start != offset))
2380 copy = msg->sg.data[i].length;
2381
2382 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2383 get_order(copy + len));
2384 if (unlikely(!page))
2385 return -ENOMEM;
2386
2387 if (copy) {
2388 int front, back;
2389
2390 raw = page_address(page);
2391
2392 psge = sk_msg_elem(msg, i);
2393 front = start - offset;
2394 back = psge->length - front;
2395 from = sg_virt(psge);
2396
2397 if (front)
2398 memcpy(raw, from, front);
2399
2400 if (back) {
2401 from += front;
2402 to = raw + front + len;
2403
2404 memcpy(to, from, back);
2405 }
2406
2407 put_page(sg_page(psge));
2408 } else if (start - offset) {
2409 psge = sk_msg_elem(msg, i);
2410 rsge = sk_msg_elem_cpy(msg, i);
2411
2412 psge->length = start - offset;
2413 rsge.length -= psge->length;
2414 rsge.offset += start;
2415
2416 sk_msg_iter_var_next(i);
2417 sg_unmark_end(psge);
2418 sg_unmark_end(&rsge);
2419 sk_msg_iter_next(msg, end);
2420 }
2421
2422
2423 new = i;
2424
2425
2426 if (!copy) {
2427 sge = sk_msg_elem_cpy(msg, i);
2428
2429 sk_msg_iter_var_next(i);
2430 sg_unmark_end(&sge);
2431 sk_msg_iter_next(msg, end);
2432
2433 nsge = sk_msg_elem_cpy(msg, i);
2434 if (rsge.length) {
2435 sk_msg_iter_var_next(i);
2436 nnsge = sk_msg_elem_cpy(msg, i);
2437 }
2438
2439 while (i != msg->sg.end) {
2440 msg->sg.data[i] = sge;
2441 sge = nsge;
2442 sk_msg_iter_var_next(i);
2443 if (rsge.length) {
2444 nsge = nnsge;
2445 nnsge = sk_msg_elem_cpy(msg, i);
2446 } else {
2447 nsge = sk_msg_elem_cpy(msg, i);
2448 }
2449 }
2450 }
2451
2452
2453 sk_mem_charge(msg->sk, len);
2454 msg->sg.size += len;
2455 __clear_bit(new, &msg->sg.copy);
2456 sg_set_page(&msg->sg.data[new], page, len + copy, 0);
2457 if (rsge.length) {
2458 get_page(sg_page(&rsge));
2459 sk_msg_iter_var_next(new);
2460 msg->sg.data[new] = rsge;
2461 }
2462
2463 sk_msg_compute_data_pointers(msg);
2464 return 0;
2465}
2466
2467static const struct bpf_func_proto bpf_msg_push_data_proto = {
2468 .func = bpf_msg_push_data,
2469 .gpl_only = false,
2470 .ret_type = RET_INTEGER,
2471 .arg1_type = ARG_PTR_TO_CTX,
2472 .arg2_type = ARG_ANYTHING,
2473 .arg3_type = ARG_ANYTHING,
2474 .arg4_type = ARG_ANYTHING,
2475};
2476
2477static void sk_msg_shift_left(struct sk_msg *msg, int i)
2478{
2479 int prev;
2480
2481 do {
2482 prev = i;
2483 sk_msg_iter_var_next(i);
2484 msg->sg.data[prev] = msg->sg.data[i];
2485 } while (i != msg->sg.end);
2486
2487 sk_msg_iter_prev(msg, end);
2488}
2489
2490static void sk_msg_shift_right(struct sk_msg *msg, int i)
2491{
2492 struct scatterlist tmp, sge;
2493
2494 sk_msg_iter_next(msg, end);
2495 sge = sk_msg_elem_cpy(msg, i);
2496 sk_msg_iter_var_next(i);
2497 tmp = sk_msg_elem_cpy(msg, i);
2498
2499 while (i != msg->sg.end) {
2500 msg->sg.data[i] = sge;
2501 sk_msg_iter_var_next(i);
2502 sge = tmp;
2503 tmp = sk_msg_elem_cpy(msg, i);
2504 }
2505}
2506
2507BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
2508 u32, len, u64, flags)
2509{
2510 u32 i = 0, l = 0, space, offset = 0;
2511 u64 last = start + len;
2512 int pop;
2513
2514 if (unlikely(flags))
2515 return -EINVAL;
2516
2517
2518 i = msg->sg.start;
2519 do {
2520 offset += l;
2521 l = sk_msg_elem(msg, i)->length;
2522
2523 if (start < offset + l)
2524 break;
2525 sk_msg_iter_var_next(i);
2526 } while (i != msg->sg.end);
2527
2528
2529 if (start >= offset + l || last >= msg->sg.size)
2530 return -EINVAL;
2531
2532 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2533
2534 pop = len;
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556 if (start != offset) {
2557 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
2558 int a = start;
2559 int b = sge->length - pop - a;
2560
2561 sk_msg_iter_var_next(i);
2562
2563 if (pop < sge->length - a) {
2564 if (space) {
2565 sge->length = a;
2566 sk_msg_shift_right(msg, i);
2567 nsge = sk_msg_elem(msg, i);
2568 get_page(sg_page(sge));
2569 sg_set_page(nsge,
2570 sg_page(sge),
2571 b, sge->offset + pop + a);
2572 } else {
2573 struct page *page, *orig;
2574 u8 *to, *from;
2575
2576 page = alloc_pages(__GFP_NOWARN |
2577 __GFP_COMP | GFP_ATOMIC,
2578 get_order(a + b));
2579 if (unlikely(!page))
2580 return -ENOMEM;
2581
2582 sge->length = a;
2583 orig = sg_page(sge);
2584 from = sg_virt(sge);
2585 to = page_address(page);
2586 memcpy(to, from, a);
2587 memcpy(to + a, from + a + pop, b);
2588 sg_set_page(sge, page, a + b, 0);
2589 put_page(orig);
2590 }
2591 pop = 0;
2592 } else if (pop >= sge->length - a) {
2593 sge->length = a;
2594 pop -= (sge->length - a);
2595 }
2596 }
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615 while (pop) {
2616 struct scatterlist *sge = sk_msg_elem(msg, i);
2617
2618 if (pop < sge->length) {
2619 sge->length -= pop;
2620 sge->offset += pop;
2621 pop = 0;
2622 } else {
2623 pop -= sge->length;
2624 sk_msg_shift_left(msg, i);
2625 }
2626 sk_msg_iter_var_next(i);
2627 }
2628
2629 sk_mem_uncharge(msg->sk, len - pop);
2630 msg->sg.size -= (len - pop);
2631 sk_msg_compute_data_pointers(msg);
2632 return 0;
2633}
2634
2635static const struct bpf_func_proto bpf_msg_pop_data_proto = {
2636 .func = bpf_msg_pop_data,
2637 .gpl_only = false,
2638 .ret_type = RET_INTEGER,
2639 .arg1_type = ARG_PTR_TO_CTX,
2640 .arg2_type = ARG_ANYTHING,
2641 .arg3_type = ARG_ANYTHING,
2642 .arg4_type = ARG_ANYTHING,
2643};
2644
2645BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
2646{
2647 return task_get_classid(skb);
2648}
2649
2650static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2651 .func = bpf_get_cgroup_classid,
2652 .gpl_only = false,
2653 .ret_type = RET_INTEGER,
2654 .arg1_type = ARG_PTR_TO_CTX,
2655};
2656
2657BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
2658{
2659 return dst_tclassid(skb);
2660}
2661
2662static const struct bpf_func_proto bpf_get_route_realm_proto = {
2663 .func = bpf_get_route_realm,
2664 .gpl_only = false,
2665 .ret_type = RET_INTEGER,
2666 .arg1_type = ARG_PTR_TO_CTX,
2667};
2668
2669BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
2670{
2671
2672
2673
2674
2675
2676 return skb_get_hash(skb);
2677}
2678
2679static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2680 .func = bpf_get_hash_recalc,
2681 .gpl_only = false,
2682 .ret_type = RET_INTEGER,
2683 .arg1_type = ARG_PTR_TO_CTX,
2684};
2685
2686BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2687{
2688
2689
2690
2691 skb_clear_hash(skb);
2692 return 0;
2693}
2694
2695static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2696 .func = bpf_set_hash_invalid,
2697 .gpl_only = false,
2698 .ret_type = RET_INTEGER,
2699 .arg1_type = ARG_PTR_TO_CTX,
2700};
2701
2702BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2703{
2704
2705
2706
2707
2708 __skb_set_sw_hash(skb, hash, true);
2709 return 0;
2710}
2711
2712static const struct bpf_func_proto bpf_set_hash_proto = {
2713 .func = bpf_set_hash,
2714 .gpl_only = false,
2715 .ret_type = RET_INTEGER,
2716 .arg1_type = ARG_PTR_TO_CTX,
2717 .arg2_type = ARG_ANYTHING,
2718};
2719
2720BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2721 u16, vlan_tci)
2722{
2723 int ret;
2724
2725 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2726 vlan_proto != htons(ETH_P_8021AD)))
2727 vlan_proto = htons(ETH_P_8021Q);
2728
2729 bpf_push_mac_rcsum(skb);
2730 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
2731 bpf_pull_mac_rcsum(skb);
2732
2733 bpf_compute_data_pointers(skb);
2734 return ret;
2735}
2736
2737static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
2738 .func = bpf_skb_vlan_push,
2739 .gpl_only = false,
2740 .ret_type = RET_INTEGER,
2741 .arg1_type = ARG_PTR_TO_CTX,
2742 .arg2_type = ARG_ANYTHING,
2743 .arg3_type = ARG_ANYTHING,
2744};
2745
2746BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
2747{
2748 int ret;
2749
2750 bpf_push_mac_rcsum(skb);
2751 ret = skb_vlan_pop(skb);
2752 bpf_pull_mac_rcsum(skb);
2753
2754 bpf_compute_data_pointers(skb);
2755 return ret;
2756}
2757
2758static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
2759 .func = bpf_skb_vlan_pop,
2760 .gpl_only = false,
2761 .ret_type = RET_INTEGER,
2762 .arg1_type = ARG_PTR_TO_CTX,
2763};
2764
2765static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2766{
2767
2768
2769
2770 skb_push(skb, len);
2771 memmove(skb->data, skb->data + len, off);
2772 memset(skb->data + off, 0, len);
2773
2774
2775
2776
2777
2778
2779 return 0;
2780}
2781
2782static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2783{
2784
2785
2786
2787 if (unlikely(!pskb_may_pull(skb, off + len)))
2788 return -ENOMEM;
2789
2790 skb_postpull_rcsum(skb, skb->data + off, len);
2791 memmove(skb->data + len, skb->data, off);
2792 __skb_pull(skb, len);
2793
2794 return 0;
2795}
2796
2797static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2798{
2799 bool trans_same = skb->transport_header == skb->network_header;
2800 int ret;
2801
2802
2803
2804
2805
2806 ret = bpf_skb_generic_push(skb, off, len);
2807 if (likely(!ret)) {
2808 skb->mac_header -= len;
2809 skb->network_header -= len;
2810 if (trans_same)
2811 skb->transport_header = skb->network_header;
2812 }
2813
2814 return ret;
2815}
2816
2817static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2818{
2819 bool trans_same = skb->transport_header == skb->network_header;
2820 int ret;
2821
2822
2823 ret = bpf_skb_generic_pop(skb, off, len);
2824 if (likely(!ret)) {
2825 skb->mac_header += len;
2826 skb->network_header += len;
2827 if (trans_same)
2828 skb->transport_header = skb->network_header;
2829 }
2830
2831 return ret;
2832}
2833
2834static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2835{
2836 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2837 u32 off = skb_mac_header_len(skb);
2838 int ret;
2839
2840 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
2841 return -ENOTSUPP;
2842
2843 ret = skb_cow(skb, len_diff);
2844 if (unlikely(ret < 0))
2845 return ret;
2846
2847 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2848 if (unlikely(ret < 0))
2849 return ret;
2850
2851 if (skb_is_gso(skb)) {
2852 struct skb_shared_info *shinfo = skb_shinfo(skb);
2853
2854
2855
2856
2857 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2858 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2859 shinfo->gso_type |= SKB_GSO_TCPV6;
2860 }
2861
2862
2863 skb_decrease_gso_size(shinfo, len_diff);
2864
2865 shinfo->gso_type |= SKB_GSO_DODGY;
2866 shinfo->gso_segs = 0;
2867 }
2868
2869 skb->protocol = htons(ETH_P_IPV6);
2870 skb_clear_hash(skb);
2871
2872 return 0;
2873}
2874
2875static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2876{
2877 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2878 u32 off = skb_mac_header_len(skb);
2879 int ret;
2880
2881 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
2882 return -ENOTSUPP;
2883
2884 ret = skb_unclone(skb, GFP_ATOMIC);
2885 if (unlikely(ret < 0))
2886 return ret;
2887
2888 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2889 if (unlikely(ret < 0))
2890 return ret;
2891
2892 if (skb_is_gso(skb)) {
2893 struct skb_shared_info *shinfo = skb_shinfo(skb);
2894
2895
2896
2897
2898 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2899 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2900 shinfo->gso_type |= SKB_GSO_TCPV4;
2901 }
2902
2903
2904 skb_increase_gso_size(shinfo, len_diff);
2905
2906 shinfo->gso_type |= SKB_GSO_DODGY;
2907 shinfo->gso_segs = 0;
2908 }
2909
2910 skb->protocol = htons(ETH_P_IP);
2911 skb_clear_hash(skb);
2912
2913 return 0;
2914}
2915
2916static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2917{
2918 __be16 from_proto = skb->protocol;
2919
2920 if (from_proto == htons(ETH_P_IP) &&
2921 to_proto == htons(ETH_P_IPV6))
2922 return bpf_skb_proto_4_to_6(skb);
2923
2924 if (from_proto == htons(ETH_P_IPV6) &&
2925 to_proto == htons(ETH_P_IP))
2926 return bpf_skb_proto_6_to_4(skb);
2927
2928 return -ENOTSUPP;
2929}
2930
2931BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2932 u64, flags)
2933{
2934 int ret;
2935
2936 if (unlikely(flags))
2937 return -EINVAL;
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956 ret = bpf_skb_proto_xlat(skb, proto);
2957 bpf_compute_data_pointers(skb);
2958 return ret;
2959}
2960
2961static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2962 .func = bpf_skb_change_proto,
2963 .gpl_only = false,
2964 .ret_type = RET_INTEGER,
2965 .arg1_type = ARG_PTR_TO_CTX,
2966 .arg2_type = ARG_ANYTHING,
2967 .arg3_type = ARG_ANYTHING,
2968};
2969
2970BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
2971{
2972
2973 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2974 !skb_pkt_type_ok(pkt_type)))
2975 return -EINVAL;
2976
2977 skb->pkt_type = pkt_type;
2978 return 0;
2979}
2980
2981static const struct bpf_func_proto bpf_skb_change_type_proto = {
2982 .func = bpf_skb_change_type,
2983 .gpl_only = false,
2984 .ret_type = RET_INTEGER,
2985 .arg1_type = ARG_PTR_TO_CTX,
2986 .arg2_type = ARG_ANYTHING,
2987};
2988
2989static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2990{
2991 switch (skb->protocol) {
2992 case htons(ETH_P_IP):
2993 return sizeof(struct iphdr);
2994 case htons(ETH_P_IPV6):
2995 return sizeof(struct ipv6hdr);
2996 default:
2997 return ~0U;
2998 }
2999}
3000
3001#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
3002 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3003
3004#define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \
3005 BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
3006 BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
3007 BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
3008 BPF_F_ADJ_ROOM_ENCAP_L2( \
3009 BPF_ADJ_ROOM_ENCAP_L2_MASK))
3010
3011static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
3012 u64 flags)
3013{
3014 u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
3015 bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
3016 u16 mac_len = 0, inner_net = 0, inner_trans = 0;
3017 unsigned int gso_type = SKB_GSO_DODGY;
3018 int ret;
3019
3020 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3021
3022 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3023 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3024 return -ENOTSUPP;
3025 }
3026
3027 ret = skb_cow_head(skb, len_diff);
3028 if (unlikely(ret < 0))
3029 return ret;
3030
3031 if (encap) {
3032 if (skb->protocol != htons(ETH_P_IP) &&
3033 skb->protocol != htons(ETH_P_IPV6))
3034 return -ENOTSUPP;
3035
3036 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
3037 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3038 return -EINVAL;
3039
3040 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
3041 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3042 return -EINVAL;
3043
3044 if (skb->encapsulation)
3045 return -EALREADY;
3046
3047 mac_len = skb->network_header - skb->mac_header;
3048 inner_net = skb->network_header;
3049 if (inner_mac_len > len_diff)
3050 return -EINVAL;
3051 inner_trans = skb->transport_header;
3052 }
3053
3054 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3055 if (unlikely(ret < 0))
3056 return ret;
3057
3058 if (encap) {
3059 skb->inner_mac_header = inner_net - inner_mac_len;
3060 skb->inner_network_header = inner_net;
3061 skb->inner_transport_header = inner_trans;
3062 skb_set_inner_protocol(skb, skb->protocol);
3063
3064 skb->encapsulation = 1;
3065 skb_set_network_header(skb, mac_len);
3066
3067 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3068 gso_type |= SKB_GSO_UDP_TUNNEL;
3069 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
3070 gso_type |= SKB_GSO_GRE;
3071 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3072 gso_type |= SKB_GSO_IPXIP6;
3073 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3074 gso_type |= SKB_GSO_IPXIP4;
3075
3076 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
3077 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
3078 int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
3079 sizeof(struct ipv6hdr) :
3080 sizeof(struct iphdr);
3081
3082 skb_set_transport_header(skb, mac_len + nh_len);
3083 }
3084
3085
3086 if (skb->protocol == htons(ETH_P_IP) &&
3087 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3088 skb->protocol = htons(ETH_P_IPV6);
3089 else if (skb->protocol == htons(ETH_P_IPV6) &&
3090 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3091 skb->protocol = htons(ETH_P_IP);
3092 }
3093
3094 if (skb_is_gso(skb)) {
3095 struct skb_shared_info *shinfo = skb_shinfo(skb);
3096
3097
3098 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3099 skb_decrease_gso_size(shinfo, len_diff);
3100
3101
3102 shinfo->gso_type |= gso_type;
3103 shinfo->gso_segs = 0;
3104 }
3105
3106 return 0;
3107}
3108
3109static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
3110 u64 flags)
3111{
3112 int ret;
3113
3114 if (flags & ~BPF_F_ADJ_ROOM_FIXED_GSO)
3115 return -EINVAL;
3116
3117 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3118
3119 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3120 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3121 return -ENOTSUPP;
3122 }
3123
3124 ret = skb_unclone(skb, GFP_ATOMIC);
3125 if (unlikely(ret < 0))
3126 return ret;
3127
3128 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3129 if (unlikely(ret < 0))
3130 return ret;
3131
3132 if (skb_is_gso(skb)) {
3133 struct skb_shared_info *shinfo = skb_shinfo(skb);
3134
3135
3136 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3137 skb_increase_gso_size(shinfo, len_diff);
3138
3139
3140 shinfo->gso_type |= SKB_GSO_DODGY;
3141 shinfo->gso_segs = 0;
3142 }
3143
3144 return 0;
3145}
3146
3147static u32 __bpf_skb_max_len(const struct sk_buff *skb)
3148{
3149 return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
3150 SKB_MAX_ALLOC;
3151}
3152
3153BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3154 u32, mode, u64, flags)
3155{
3156 u32 len_cur, len_diff_abs = abs(len_diff);
3157 u32 len_min = bpf_skb_net_base_len(skb);
3158 u32 len_max = __bpf_skb_max_len(skb);
3159 __be16 proto = skb->protocol;
3160 bool shrink = len_diff < 0;
3161 u32 off;
3162 int ret;
3163
3164 if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK))
3165 return -EINVAL;
3166 if (unlikely(len_diff_abs > 0xfffU))
3167 return -EFAULT;
3168 if (unlikely(proto != htons(ETH_P_IP) &&
3169 proto != htons(ETH_P_IPV6)))
3170 return -ENOTSUPP;
3171
3172 off = skb_mac_header_len(skb);
3173 switch (mode) {
3174 case BPF_ADJ_ROOM_NET:
3175 off += bpf_skb_net_base_len(skb);
3176 break;
3177 case BPF_ADJ_ROOM_MAC:
3178 break;
3179 default:
3180 return -ENOTSUPP;
3181 }
3182
3183 len_cur = skb->len - skb_network_offset(skb);
3184 if ((shrink && (len_diff_abs >= len_cur ||
3185 len_cur - len_diff_abs < len_min)) ||
3186 (!shrink && (skb->len + len_diff_abs > len_max &&
3187 !skb_is_gso(skb))))
3188 return -ENOTSUPP;
3189
3190 ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
3191 bpf_skb_net_grow(skb, off, len_diff_abs, flags);
3192
3193 bpf_compute_data_pointers(skb);
3194 return ret;
3195}
3196
3197static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
3198 .func = bpf_skb_adjust_room,
3199 .gpl_only = false,
3200 .ret_type = RET_INTEGER,
3201 .arg1_type = ARG_PTR_TO_CTX,
3202 .arg2_type = ARG_ANYTHING,
3203 .arg3_type = ARG_ANYTHING,
3204 .arg4_type = ARG_ANYTHING,
3205};
3206
3207static u32 __bpf_skb_min_len(const struct sk_buff *skb)
3208{
3209 u32 min_len = skb_network_offset(skb);
3210
3211 if (skb_transport_header_was_set(skb))
3212 min_len = skb_transport_offset(skb);
3213 if (skb->ip_summed == CHECKSUM_PARTIAL)
3214 min_len = skb_checksum_start_offset(skb) +
3215 skb->csum_offset + sizeof(__sum16);
3216 return min_len;
3217}
3218
3219static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
3220{
3221 unsigned int old_len = skb->len;
3222 int ret;
3223
3224 ret = __skb_grow_rcsum(skb, new_len);
3225 if (!ret)
3226 memset(skb->data + old_len, 0, new_len - old_len);
3227 return ret;
3228}
3229
3230static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
3231{
3232 return __skb_trim_rcsum(skb, new_len);
3233}
3234
3235static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
3236 u64 flags)
3237{
3238 u32 max_len = __bpf_skb_max_len(skb);
3239 u32 min_len = __bpf_skb_min_len(skb);
3240 int ret;
3241
3242 if (unlikely(flags || new_len > max_len || new_len < min_len))
3243 return -EINVAL;
3244 if (skb->encapsulation)
3245 return -ENOTSUPP;
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263 ret = __bpf_try_make_writable(skb, skb->len);
3264 if (!ret) {
3265 if (new_len > skb->len)
3266 ret = bpf_skb_grow_rcsum(skb, new_len);
3267 else if (new_len < skb->len)
3268 ret = bpf_skb_trim_rcsum(skb, new_len);
3269 if (!ret && skb_is_gso(skb))
3270 skb_gso_reset(skb);
3271 }
3272 return ret;
3273}
3274
3275BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3276 u64, flags)
3277{
3278 int ret = __bpf_skb_change_tail(skb, new_len, flags);
3279
3280 bpf_compute_data_pointers(skb);
3281 return ret;
3282}
3283
3284static const struct bpf_func_proto bpf_skb_change_tail_proto = {
3285 .func = bpf_skb_change_tail,
3286 .gpl_only = false,
3287 .ret_type = RET_INTEGER,
3288 .arg1_type = ARG_PTR_TO_CTX,
3289 .arg2_type = ARG_ANYTHING,
3290 .arg3_type = ARG_ANYTHING,
3291};
3292
3293BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3294 u64, flags)
3295{
3296 int ret = __bpf_skb_change_tail(skb, new_len, flags);
3297
3298 bpf_compute_data_end_sk_skb(skb);
3299 return ret;
3300}
3301
3302static const struct bpf_func_proto sk_skb_change_tail_proto = {
3303 .func = sk_skb_change_tail,
3304 .gpl_only = false,
3305 .ret_type = RET_INTEGER,
3306 .arg1_type = ARG_PTR_TO_CTX,
3307 .arg2_type = ARG_ANYTHING,
3308 .arg3_type = ARG_ANYTHING,
3309};
3310
3311static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
3312 u64 flags)
3313{
3314 u32 max_len = __bpf_skb_max_len(skb);
3315 u32 new_len = skb->len + head_room;
3316 int ret;
3317
3318 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
3319 new_len < skb->len))
3320 return -EINVAL;
3321
3322 ret = skb_cow(skb, head_room);
3323 if (likely(!ret)) {
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333 __skb_push(skb, head_room);
3334 memset(skb->data, 0, head_room);
3335 skb_reset_mac_header(skb);
3336 }
3337
3338 return ret;
3339}
3340
3341BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
3342 u64, flags)
3343{
3344 int ret = __bpf_skb_change_head(skb, head_room, flags);
3345
3346 bpf_compute_data_pointers(skb);
3347 return ret;
3348}
3349
3350static const struct bpf_func_proto bpf_skb_change_head_proto = {
3351 .func = bpf_skb_change_head,
3352 .gpl_only = false,
3353 .ret_type = RET_INTEGER,
3354 .arg1_type = ARG_PTR_TO_CTX,
3355 .arg2_type = ARG_ANYTHING,
3356 .arg3_type = ARG_ANYTHING,
3357};
3358
3359BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
3360 u64, flags)
3361{
3362 int ret = __bpf_skb_change_head(skb, head_room, flags);
3363
3364 bpf_compute_data_end_sk_skb(skb);
3365 return ret;
3366}
3367
3368static const struct bpf_func_proto sk_skb_change_head_proto = {
3369 .func = sk_skb_change_head,
3370 .gpl_only = false,
3371 .ret_type = RET_INTEGER,
3372 .arg1_type = ARG_PTR_TO_CTX,
3373 .arg2_type = ARG_ANYTHING,
3374 .arg3_type = ARG_ANYTHING,
3375};
3376static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
3377{
3378 return xdp_data_meta_unsupported(xdp) ? 0 :
3379 xdp->data - xdp->data_meta;
3380}
3381
3382BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
3383{
3384 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3385 unsigned long metalen = xdp_get_metalen(xdp);
3386 void *data_start = xdp_frame_end + metalen;
3387 void *data = xdp->data + offset;
3388
3389 if (unlikely(data < data_start ||
3390 data > xdp->data_end - ETH_HLEN))
3391 return -EINVAL;
3392
3393 if (metalen)
3394 memmove(xdp->data_meta + offset,
3395 xdp->data_meta, metalen);
3396 xdp->data_meta += offset;
3397 xdp->data = data;
3398
3399 return 0;
3400}
3401
3402static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
3403 .func = bpf_xdp_adjust_head,
3404 .gpl_only = false,
3405 .ret_type = RET_INTEGER,
3406 .arg1_type = ARG_PTR_TO_CTX,
3407 .arg2_type = ARG_ANYTHING,
3408};
3409
3410BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
3411{
3412 void *data_end = xdp->data_end + offset;
3413
3414
3415 if (unlikely(offset >= 0))
3416 return -EINVAL;
3417
3418 if (unlikely(data_end < xdp->data + ETH_HLEN))
3419 return -EINVAL;
3420
3421 xdp->data_end = data_end;
3422
3423 return 0;
3424}
3425
3426static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3427 .func = bpf_xdp_adjust_tail,
3428 .gpl_only = false,
3429 .ret_type = RET_INTEGER,
3430 .arg1_type = ARG_PTR_TO_CTX,
3431 .arg2_type = ARG_ANYTHING,
3432};
3433
3434BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3435{
3436 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3437 void *meta = xdp->data_meta + offset;
3438 unsigned long metalen = xdp->data - meta;
3439
3440 if (xdp_data_meta_unsupported(xdp))
3441 return -ENOTSUPP;
3442 if (unlikely(meta < xdp_frame_end ||
3443 meta > xdp->data))
3444 return -EINVAL;
3445 if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3446 (metalen > 32)))
3447 return -EACCES;
3448
3449 xdp->data_meta = meta;
3450
3451 return 0;
3452}
3453
3454static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3455 .func = bpf_xdp_adjust_meta,
3456 .gpl_only = false,
3457 .ret_type = RET_INTEGER,
3458 .arg1_type = ARG_PTR_TO_CTX,
3459 .arg2_type = ARG_ANYTHING,
3460};
3461
3462static int __bpf_tx_xdp(struct net_device *dev,
3463 struct bpf_map *map,
3464 struct xdp_buff *xdp,
3465 u32 index)
3466{
3467 struct xdp_frame *xdpf;
3468 int err, sent;
3469
3470 if (!dev->netdev_ops->ndo_xdp_xmit) {
3471 return -EOPNOTSUPP;
3472 }
3473
3474 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
3475 if (unlikely(err))
3476 return err;
3477
3478 xdpf = convert_to_xdp_frame(xdp);
3479 if (unlikely(!xdpf))
3480 return -EOVERFLOW;
3481
3482 sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH);
3483 if (sent <= 0)
3484 return sent;
3485 return 0;
3486}
3487
3488static noinline int
3489xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
3490 struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
3491{
3492 struct net_device *fwd;
3493 u32 index = ri->tgt_index;
3494 int err;
3495
3496 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3497 ri->tgt_index = 0;
3498 if (unlikely(!fwd)) {
3499 err = -EINVAL;
3500 goto err;
3501 }
3502
3503 err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
3504 if (unlikely(err))
3505 goto err;
3506
3507 _trace_xdp_redirect(dev, xdp_prog, index);
3508 return 0;
3509err:
3510 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3511 return err;
3512}
3513
3514static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3515 struct bpf_map *map,
3516 struct xdp_buff *xdp,
3517 u32 index)
3518{
3519 int err;
3520
3521 switch (map->map_type) {
3522 case BPF_MAP_TYPE_DEVMAP:
3523 case BPF_MAP_TYPE_DEVMAP_HASH: {
3524 struct bpf_dtab_netdev *dst = fwd;
3525
3526 err = dev_map_enqueue(dst, xdp, dev_rx);
3527 if (unlikely(err))
3528 return err;
3529 break;
3530 }
3531 case BPF_MAP_TYPE_CPUMAP: {
3532 struct bpf_cpu_map_entry *rcpu = fwd;
3533
3534 err = cpu_map_enqueue(rcpu, xdp, dev_rx);
3535 if (unlikely(err))
3536 return err;
3537 break;
3538 }
3539 case BPF_MAP_TYPE_XSKMAP: {
3540 struct xdp_sock *xs = fwd;
3541
3542 err = __xsk_map_redirect(map, xdp, xs);
3543 return err;
3544 }
3545 default:
3546 break;
3547 }
3548 return 0;
3549}
3550
3551void xdp_do_flush_map(void)
3552{
3553 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3554 struct bpf_map *map = ri->map_to_flush;
3555
3556 ri->map_to_flush = NULL;
3557 if (map) {
3558 switch (map->map_type) {
3559 case BPF_MAP_TYPE_DEVMAP:
3560 case BPF_MAP_TYPE_DEVMAP_HASH:
3561 __dev_map_flush(map);
3562 break;
3563 case BPF_MAP_TYPE_CPUMAP:
3564 __cpu_map_flush(map);
3565 break;
3566 case BPF_MAP_TYPE_XSKMAP:
3567 __xsk_map_flush(map);
3568 break;
3569 default:
3570 break;
3571 }
3572 }
3573}
3574EXPORT_SYMBOL_GPL(xdp_do_flush_map);
3575
3576static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
3577{
3578 switch (map->map_type) {
3579 case BPF_MAP_TYPE_DEVMAP:
3580 return __dev_map_lookup_elem(map, index);
3581 case BPF_MAP_TYPE_DEVMAP_HASH:
3582 return __dev_map_hash_lookup_elem(map, index);
3583 case BPF_MAP_TYPE_CPUMAP:
3584 return __cpu_map_lookup_elem(map, index);
3585 case BPF_MAP_TYPE_XSKMAP:
3586 return __xsk_map_lookup_elem(map, index);
3587 default:
3588 return NULL;
3589 }
3590}
3591
3592void bpf_clear_redirect_map(struct bpf_map *map)
3593{
3594 struct bpf_redirect_info *ri;
3595 int cpu;
3596
3597 for_each_possible_cpu(cpu) {
3598 ri = per_cpu_ptr(&bpf_redirect_info, cpu);
3599
3600
3601
3602
3603
3604 if (unlikely(READ_ONCE(ri->map) == map))
3605 cmpxchg(&ri->map, map, NULL);
3606 }
3607}
3608
3609static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
3610 struct bpf_prog *xdp_prog, struct bpf_map *map,
3611 struct bpf_redirect_info *ri)
3612{
3613 u32 index = ri->tgt_index;
3614 void *fwd = ri->tgt_value;
3615 int err;
3616
3617 ri->tgt_index = 0;
3618 ri->tgt_value = NULL;
3619 WRITE_ONCE(ri->map, NULL);
3620
3621 if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
3622 xdp_do_flush_map();
3623
3624 err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
3625 if (unlikely(err))
3626 goto err;
3627
3628 ri->map_to_flush = map;
3629 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3630 return 0;
3631err:
3632 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3633 return err;
3634}
3635
3636int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3637 struct bpf_prog *xdp_prog)
3638{
3639 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3640 struct bpf_map *map = READ_ONCE(ri->map);
3641
3642 if (likely(map))
3643 return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
3644
3645 return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
3646}
3647EXPORT_SYMBOL_GPL(xdp_do_redirect);
3648
3649static int xdp_do_generic_redirect_map(struct net_device *dev,
3650 struct sk_buff *skb,
3651 struct xdp_buff *xdp,
3652 struct bpf_prog *xdp_prog,
3653 struct bpf_map *map)
3654{
3655 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3656 u32 index = ri->tgt_index;
3657 void *fwd = ri->tgt_value;
3658 int err = 0;
3659
3660 ri->tgt_index = 0;
3661 ri->tgt_value = NULL;
3662 WRITE_ONCE(ri->map, NULL);
3663
3664 if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
3665 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
3666 struct bpf_dtab_netdev *dst = fwd;
3667
3668 err = dev_map_generic_redirect(dst, skb, xdp_prog);
3669 if (unlikely(err))
3670 goto err;
3671 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
3672 struct xdp_sock *xs = fwd;
3673
3674 err = xsk_generic_rcv(xs, xdp);
3675 if (err)
3676 goto err;
3677 consume_skb(skb);
3678 } else {
3679
3680 err = -EBADRQC;
3681 goto err;
3682 }
3683
3684 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3685 return 0;
3686err:
3687 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3688 return err;
3689}
3690
3691int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
3692 struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
3693{
3694 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3695 struct bpf_map *map = READ_ONCE(ri->map);
3696 u32 index = ri->tgt_index;
3697 struct net_device *fwd;
3698 int err = 0;
3699
3700 if (map)
3701 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
3702 map);
3703 ri->tgt_index = 0;
3704 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3705 if (unlikely(!fwd)) {
3706 err = -EINVAL;
3707 goto err;
3708 }
3709
3710 err = xdp_ok_fwd_dev(fwd, skb->len);
3711 if (unlikely(err))
3712 goto err;
3713
3714 skb->dev = fwd;
3715 _trace_xdp_redirect(dev, xdp_prog, index);
3716 generic_xdp_tx(skb, xdp_prog);
3717 return 0;
3718err:
3719 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3720 return err;
3721}
3722EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
3723
3724BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3725{
3726 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3727
3728 if (unlikely(flags))
3729 return XDP_ABORTED;
3730
3731 ri->flags = flags;
3732 ri->tgt_index = ifindex;
3733 ri->tgt_value = NULL;
3734 WRITE_ONCE(ri->map, NULL);
3735
3736 return XDP_REDIRECT;
3737}
3738
3739static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3740 .func = bpf_xdp_redirect,
3741 .gpl_only = false,
3742 .ret_type = RET_INTEGER,
3743 .arg1_type = ARG_ANYTHING,
3744 .arg2_type = ARG_ANYTHING,
3745};
3746
3747BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
3748 u64, flags)
3749{
3750 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3751
3752
3753 if (unlikely(flags > XDP_TX))
3754 return XDP_ABORTED;
3755
3756 ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
3757 if (unlikely(!ri->tgt_value)) {
3758
3759
3760
3761
3762
3763 WRITE_ONCE(ri->map, NULL);
3764 return flags;
3765 }
3766
3767 ri->flags = flags;
3768 ri->tgt_index = ifindex;
3769 WRITE_ONCE(ri->map, map);
3770
3771 return XDP_REDIRECT;
3772}
3773
3774static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3775 .func = bpf_xdp_redirect_map,
3776 .gpl_only = false,
3777 .ret_type = RET_INTEGER,
3778 .arg1_type = ARG_CONST_MAP_PTR,
3779 .arg2_type = ARG_ANYTHING,
3780 .arg3_type = ARG_ANYTHING,
3781};
3782
3783static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
3784 unsigned long off, unsigned long len)
3785{
3786 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
3787
3788 if (unlikely(!ptr))
3789 return len;
3790 if (ptr != dst_buff)
3791 memcpy(dst_buff, ptr, len);
3792
3793 return 0;
3794}
3795
3796BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3797 u64, flags, void *, meta, u64, meta_size)
3798{
3799 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
3800
3801 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3802 return -EINVAL;
3803 if (unlikely(!skb || skb_size > skb->len))
3804 return -EFAULT;
3805
3806 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3807 bpf_skb_copy);
3808}
3809
3810static const struct bpf_func_proto bpf_skb_event_output_proto = {
3811 .func = bpf_skb_event_output,
3812 .gpl_only = true,
3813 .ret_type = RET_INTEGER,
3814 .arg1_type = ARG_PTR_TO_CTX,
3815 .arg2_type = ARG_CONST_MAP_PTR,
3816 .arg3_type = ARG_ANYTHING,
3817 .arg4_type = ARG_PTR_TO_MEM,
3818 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
3819};
3820
3821static int bpf_skb_output_btf_ids[5];
3822const struct bpf_func_proto bpf_skb_output_proto = {
3823 .func = bpf_skb_event_output,
3824 .gpl_only = true,
3825 .ret_type = RET_INTEGER,
3826 .arg1_type = ARG_PTR_TO_BTF_ID,
3827 .arg2_type = ARG_CONST_MAP_PTR,
3828 .arg3_type = ARG_ANYTHING,
3829 .arg4_type = ARG_PTR_TO_MEM,
3830 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
3831 .btf_id = bpf_skb_output_btf_ids,
3832};
3833
3834static unsigned short bpf_tunnel_key_af(u64 flags)
3835{
3836 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3837}
3838
3839BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3840 u32, size, u64, flags)
3841{
3842 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3843 u8 compat[sizeof(struct bpf_tunnel_key)];
3844 void *to_orig = to;
3845 int err;
3846
3847 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3848 err = -EINVAL;
3849 goto err_clear;
3850 }
3851 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3852 err = -EPROTO;
3853 goto err_clear;
3854 }
3855 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3856 err = -EINVAL;
3857 switch (size) {
3858 case offsetof(struct bpf_tunnel_key, tunnel_label):
3859 case offsetof(struct bpf_tunnel_key, tunnel_ext):
3860 goto set_compat;
3861 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3862
3863
3864
3865 if (ip_tunnel_info_af(info) != AF_INET)
3866 goto err_clear;
3867set_compat:
3868 to = (struct bpf_tunnel_key *)compat;
3869 break;
3870 default:
3871 goto err_clear;
3872 }
3873 }
3874
3875 to->tunnel_id = be64_to_cpu(info->key.tun_id);
3876 to->tunnel_tos = info->key.tos;
3877 to->tunnel_ttl = info->key.ttl;
3878 to->tunnel_ext = 0;
3879
3880 if (flags & BPF_F_TUNINFO_IPV6) {
3881 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3882 sizeof(to->remote_ipv6));
3883 to->tunnel_label = be32_to_cpu(info->key.label);
3884 } else {
3885 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
3886 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
3887 to->tunnel_label = 0;
3888 }
3889
3890 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
3891 memcpy(to_orig, to, size);
3892
3893 return 0;
3894err_clear:
3895 memset(to_orig, 0, size);
3896 return err;
3897}
3898
3899static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
3900 .func = bpf_skb_get_tunnel_key,
3901 .gpl_only = false,
3902 .ret_type = RET_INTEGER,
3903 .arg1_type = ARG_PTR_TO_CTX,
3904 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3905 .arg3_type = ARG_CONST_SIZE,
3906 .arg4_type = ARG_ANYTHING,
3907};
3908
3909BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
3910{
3911 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3912 int err;
3913
3914 if (unlikely(!info ||
3915 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3916 err = -ENOENT;
3917 goto err_clear;
3918 }
3919 if (unlikely(size < info->options_len)) {
3920 err = -ENOMEM;
3921 goto err_clear;
3922 }
3923
3924 ip_tunnel_info_opts_get(to, info);
3925 if (size > info->options_len)
3926 memset(to + info->options_len, 0, size - info->options_len);
3927
3928 return info->options_len;
3929err_clear:
3930 memset(to, 0, size);
3931 return err;
3932}
3933
3934static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3935 .func = bpf_skb_get_tunnel_opt,
3936 .gpl_only = false,
3937 .ret_type = RET_INTEGER,
3938 .arg1_type = ARG_PTR_TO_CTX,
3939 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3940 .arg3_type = ARG_CONST_SIZE,
3941};
3942
3943static struct metadata_dst __percpu *md_dst;
3944
3945BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3946 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
3947{
3948 struct metadata_dst *md = this_cpu_ptr(md_dst);
3949 u8 compat[sizeof(struct bpf_tunnel_key)];
3950 struct ip_tunnel_info *info;
3951
3952 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
3953 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
3954 return -EINVAL;
3955 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3956 switch (size) {
3957 case offsetof(struct bpf_tunnel_key, tunnel_label):
3958 case offsetof(struct bpf_tunnel_key, tunnel_ext):
3959 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3960
3961
3962
3963 memcpy(compat, from, size);
3964 memset(compat + size, 0, sizeof(compat) - size);
3965 from = (const struct bpf_tunnel_key *) compat;
3966 break;
3967 default:
3968 return -EINVAL;
3969 }
3970 }
3971 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3972 from->tunnel_ext))
3973 return -EINVAL;
3974
3975 skb_dst_drop(skb);
3976 dst_hold((struct dst_entry *) md);
3977 skb_dst_set(skb, (struct dst_entry *) md);
3978
3979 info = &md->u.tun_info;
3980 memset(info, 0, sizeof(*info));
3981 info->mode = IP_TUNNEL_INFO_TX;
3982
3983 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
3984 if (flags & BPF_F_DONT_FRAGMENT)
3985 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
3986 if (flags & BPF_F_ZERO_CSUM_TX)
3987 info->key.tun_flags &= ~TUNNEL_CSUM;
3988 if (flags & BPF_F_SEQ_NUMBER)
3989 info->key.tun_flags |= TUNNEL_SEQ;
3990
3991 info->key.tun_id = cpu_to_be64(from->tunnel_id);
3992 info->key.tos = from->tunnel_tos;
3993 info->key.ttl = from->tunnel_ttl;
3994
3995 if (flags & BPF_F_TUNINFO_IPV6) {
3996 info->mode |= IP_TUNNEL_INFO_IPV6;
3997 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3998 sizeof(from->remote_ipv6));
3999 info->key.label = cpu_to_be32(from->tunnel_label) &
4000 IPV6_FLOWLABEL_MASK;
4001 } else {
4002 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
4003 }
4004
4005 return 0;
4006}
4007
4008static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
4009 .func = bpf_skb_set_tunnel_key,
4010 .gpl_only = false,
4011 .ret_type = RET_INTEGER,
4012 .arg1_type = ARG_PTR_TO_CTX,
4013 .arg2_type = ARG_PTR_TO_MEM,
4014 .arg3_type = ARG_CONST_SIZE,
4015 .arg4_type = ARG_ANYTHING,
4016};
4017
4018BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
4019 const u8 *, from, u32, size)
4020{
4021 struct ip_tunnel_info *info = skb_tunnel_info(skb);
4022 const struct metadata_dst *md = this_cpu_ptr(md_dst);
4023
4024 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
4025 return -EINVAL;
4026 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
4027 return -ENOMEM;
4028
4029 ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
4030
4031 return 0;
4032}
4033
4034static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
4035 .func = bpf_skb_set_tunnel_opt,
4036 .gpl_only = false,
4037 .ret_type = RET_INTEGER,
4038 .arg1_type = ARG_PTR_TO_CTX,
4039 .arg2_type = ARG_PTR_TO_MEM,
4040 .arg3_type = ARG_CONST_SIZE,
4041};
4042
4043static const struct bpf_func_proto *
4044bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
4045{
4046 if (!md_dst) {
4047 struct metadata_dst __percpu *tmp;
4048
4049 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
4050 METADATA_IP_TUNNEL,
4051 GFP_KERNEL);
4052 if (!tmp)
4053 return NULL;
4054 if (cmpxchg(&md_dst, NULL, tmp))
4055 metadata_dst_free_percpu(tmp);
4056 }
4057
4058 switch (which) {
4059 case BPF_FUNC_skb_set_tunnel_key:
4060 return &bpf_skb_set_tunnel_key_proto;
4061 case BPF_FUNC_skb_set_tunnel_opt:
4062 return &bpf_skb_set_tunnel_opt_proto;
4063 default:
4064 return NULL;
4065 }
4066}
4067
4068BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
4069 u32, idx)
4070{
4071 struct bpf_array *array = container_of(map, struct bpf_array, map);
4072 struct cgroup *cgrp;
4073 struct sock *sk;
4074
4075 sk = skb_to_full_sk(skb);
4076 if (!sk || !sk_fullsock(sk))
4077 return -ENOENT;
4078 if (unlikely(idx >= array->map.max_entries))
4079 return -E2BIG;
4080
4081 cgrp = READ_ONCE(array->ptrs[idx]);
4082 if (unlikely(!cgrp))
4083 return -EAGAIN;
4084
4085 return sk_under_cgroup_hierarchy(sk, cgrp);
4086}
4087
4088static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
4089 .func = bpf_skb_under_cgroup,
4090 .gpl_only = false,
4091 .ret_type = RET_INTEGER,
4092 .arg1_type = ARG_PTR_TO_CTX,
4093 .arg2_type = ARG_CONST_MAP_PTR,
4094 .arg3_type = ARG_ANYTHING,
4095};
4096
4097#ifdef CONFIG_SOCK_CGROUP_DATA
4098BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
4099{
4100 struct sock *sk = skb_to_full_sk(skb);
4101 struct cgroup *cgrp;
4102
4103 if (!sk || !sk_fullsock(sk))
4104 return 0;
4105
4106 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4107 return cgroup_id(cgrp);
4108}
4109
4110static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
4111 .func = bpf_skb_cgroup_id,
4112 .gpl_only = false,
4113 .ret_type = RET_INTEGER,
4114 .arg1_type = ARG_PTR_TO_CTX,
4115};
4116
4117BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
4118 ancestor_level)
4119{
4120 struct sock *sk = skb_to_full_sk(skb);
4121 struct cgroup *ancestor;
4122 struct cgroup *cgrp;
4123
4124 if (!sk || !sk_fullsock(sk))
4125 return 0;
4126
4127 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4128 ancestor = cgroup_ancestor(cgrp, ancestor_level);
4129 if (!ancestor)
4130 return 0;
4131
4132 return cgroup_id(ancestor);
4133}
4134
4135static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
4136 .func = bpf_skb_ancestor_cgroup_id,
4137 .gpl_only = false,
4138 .ret_type = RET_INTEGER,
4139 .arg1_type = ARG_PTR_TO_CTX,
4140 .arg2_type = ARG_ANYTHING,
4141};
4142#endif
4143
4144static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
4145 unsigned long off, unsigned long len)
4146{
4147 memcpy(dst_buff, src_buff + off, len);
4148 return 0;
4149}
4150
4151BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
4152 u64, flags, void *, meta, u64, meta_size)
4153{
4154 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4155
4156 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4157 return -EINVAL;
4158 if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
4159 return -EFAULT;
4160
4161 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
4162 xdp_size, bpf_xdp_copy);
4163}
4164
4165static const struct bpf_func_proto bpf_xdp_event_output_proto = {
4166 .func = bpf_xdp_event_output,
4167 .gpl_only = true,
4168 .ret_type = RET_INTEGER,
4169 .arg1_type = ARG_PTR_TO_CTX,
4170 .arg2_type = ARG_CONST_MAP_PTR,
4171 .arg3_type = ARG_ANYTHING,
4172 .arg4_type = ARG_PTR_TO_MEM,
4173 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4174};
4175
4176BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
4177{
4178 return skb->sk ? sock_gen_cookie(skb->sk) : 0;
4179}
4180
4181static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
4182 .func = bpf_get_socket_cookie,
4183 .gpl_only = false,
4184 .ret_type = RET_INTEGER,
4185 .arg1_type = ARG_PTR_TO_CTX,
4186};
4187
4188BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4189{
4190 return sock_gen_cookie(ctx->sk);
4191}
4192
4193static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
4194 .func = bpf_get_socket_cookie_sock_addr,
4195 .gpl_only = false,
4196 .ret_type = RET_INTEGER,
4197 .arg1_type = ARG_PTR_TO_CTX,
4198};
4199
4200BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
4201{
4202 return sock_gen_cookie(ctx->sk);
4203}
4204
4205static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
4206 .func = bpf_get_socket_cookie_sock_ops,
4207 .gpl_only = false,
4208 .ret_type = RET_INTEGER,
4209 .arg1_type = ARG_PTR_TO_CTX,
4210};
4211
4212BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
4213{
4214 struct sock *sk = sk_to_full_sk(skb->sk);
4215 kuid_t kuid;
4216
4217 if (!sk || !sk_fullsock(sk))
4218 return overflowuid;
4219 kuid = sock_net_uid(sock_net(sk), sk);
4220 return from_kuid_munged(sock_net(sk)->user_ns, kuid);
4221}
4222
4223static const struct bpf_func_proto bpf_get_socket_uid_proto = {
4224 .func = bpf_get_socket_uid,
4225 .gpl_only = false,
4226 .ret_type = RET_INTEGER,
4227 .arg1_type = ARG_PTR_TO_CTX,
4228};
4229
4230BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
4231 struct bpf_map *, map, u64, flags, void *, data, u64, size)
4232{
4233 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
4234 return -EINVAL;
4235
4236 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
4237}
4238
4239static const struct bpf_func_proto bpf_sockopt_event_output_proto = {
4240 .func = bpf_sockopt_event_output,
4241 .gpl_only = true,
4242 .ret_type = RET_INTEGER,
4243 .arg1_type = ARG_PTR_TO_CTX,
4244 .arg2_type = ARG_CONST_MAP_PTR,
4245 .arg3_type = ARG_ANYTHING,
4246 .arg4_type = ARG_PTR_TO_MEM,
4247 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4248};
4249
4250BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4251 int, level, int, optname, char *, optval, int, optlen)
4252{
4253 struct sock *sk = bpf_sock->sk;
4254 int ret = 0;
4255 int val;
4256
4257 if (!sk_fullsock(sk))
4258 return -EINVAL;
4259
4260 if (level == SOL_SOCKET) {
4261 if (optlen != sizeof(int))
4262 return -EINVAL;
4263 val = *((int *)optval);
4264
4265
4266 switch (optname) {
4267 case SO_RCVBUF:
4268 val = min_t(u32, val, sysctl_rmem_max);
4269 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4270 WRITE_ONCE(sk->sk_rcvbuf,
4271 max_t(int, val * 2, SOCK_MIN_RCVBUF));
4272 break;
4273 case SO_SNDBUF:
4274 val = min_t(u32, val, sysctl_wmem_max);
4275 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4276 WRITE_ONCE(sk->sk_sndbuf,
4277 max_t(int, val * 2, SOCK_MIN_SNDBUF));
4278 break;
4279 case SO_MAX_PACING_RATE:
4280 if (val != ~0U)
4281 cmpxchg(&sk->sk_pacing_status,
4282 SK_PACING_NONE,
4283 SK_PACING_NEEDED);
4284 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
4285 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4286 sk->sk_max_pacing_rate);
4287 break;
4288 case SO_PRIORITY:
4289 sk->sk_priority = val;
4290 break;
4291 case SO_RCVLOWAT:
4292 if (val < 0)
4293 val = INT_MAX;
4294 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
4295 break;
4296 case SO_MARK:
4297 if (sk->sk_mark != val) {
4298 sk->sk_mark = val;
4299 sk_dst_reset(sk);
4300 }
4301 break;
4302 default:
4303 ret = -EINVAL;
4304 }
4305#ifdef CONFIG_INET
4306 } else if (level == SOL_IP) {
4307 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4308 return -EINVAL;
4309
4310 val = *((int *)optval);
4311
4312 switch (optname) {
4313 case IP_TOS:
4314 if (val < -1 || val > 0xff) {
4315 ret = -EINVAL;
4316 } else {
4317 struct inet_sock *inet = inet_sk(sk);
4318
4319 if (val == -1)
4320 val = 0;
4321 inet->tos = val;
4322 }
4323 break;
4324 default:
4325 ret = -EINVAL;
4326 }
4327#if IS_ENABLED(CONFIG_IPV6)
4328 } else if (level == SOL_IPV6) {
4329 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4330 return -EINVAL;
4331
4332 val = *((int *)optval);
4333
4334 switch (optname) {
4335 case IPV6_TCLASS:
4336 if (val < -1 || val > 0xff) {
4337 ret = -EINVAL;
4338 } else {
4339 struct ipv6_pinfo *np = inet6_sk(sk);
4340
4341 if (val == -1)
4342 val = 0;
4343 np->tclass = val;
4344 }
4345 break;
4346 default:
4347 ret = -EINVAL;
4348 }
4349#endif
4350 } else if (level == SOL_TCP &&
4351 sk->sk_prot->setsockopt == tcp_setsockopt) {
4352 if (optname == TCP_CONGESTION) {
4353 char name[TCP_CA_NAME_MAX];
4354 bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
4355
4356 strncpy(name, optval, min_t(long, optlen,
4357 TCP_CA_NAME_MAX-1));
4358 name[TCP_CA_NAME_MAX-1] = 0;
4359 ret = tcp_set_congestion_control(sk, name, false,
4360 reinit, true);
4361 } else {
4362 struct tcp_sock *tp = tcp_sk(sk);
4363
4364 if (optlen != sizeof(int))
4365 return -EINVAL;
4366
4367 val = *((int *)optval);
4368
4369 switch (optname) {
4370 case TCP_BPF_IW:
4371 if (val <= 0 || tp->data_segs_out > tp->syn_data)
4372 ret = -EINVAL;
4373 else
4374 tp->snd_cwnd = val;
4375 break;
4376 case TCP_BPF_SNDCWND_CLAMP:
4377 if (val <= 0) {
4378 ret = -EINVAL;
4379 } else {
4380 tp->snd_cwnd_clamp = val;
4381 tp->snd_ssthresh = val;
4382 }
4383 break;
4384 case TCP_SAVE_SYN:
4385 if (val < 0 || val > 1)
4386 ret = -EINVAL;
4387 else
4388 tp->save_syn = val;
4389 break;
4390 default:
4391 ret = -EINVAL;
4392 }
4393 }
4394#endif
4395 } else {
4396 ret = -EINVAL;
4397 }
4398 return ret;
4399}
4400
4401static const struct bpf_func_proto bpf_setsockopt_proto = {
4402 .func = bpf_setsockopt,
4403 .gpl_only = false,
4404 .ret_type = RET_INTEGER,
4405 .arg1_type = ARG_PTR_TO_CTX,
4406 .arg2_type = ARG_ANYTHING,
4407 .arg3_type = ARG_ANYTHING,
4408 .arg4_type = ARG_PTR_TO_MEM,
4409 .arg5_type = ARG_CONST_SIZE,
4410};
4411
4412BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4413 int, level, int, optname, char *, optval, int, optlen)
4414{
4415 struct sock *sk = bpf_sock->sk;
4416
4417 if (!sk_fullsock(sk))
4418 goto err_clear;
4419#ifdef CONFIG_INET
4420 if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
4421 struct inet_connection_sock *icsk;
4422 struct tcp_sock *tp;
4423
4424 switch (optname) {
4425 case TCP_CONGESTION:
4426 icsk = inet_csk(sk);
4427
4428 if (!icsk->icsk_ca_ops || optlen <= 1)
4429 goto err_clear;
4430 strncpy(optval, icsk->icsk_ca_ops->name, optlen);
4431 optval[optlen - 1] = 0;
4432 break;
4433 case TCP_SAVED_SYN:
4434 tp = tcp_sk(sk);
4435
4436 if (optlen <= 0 || !tp->saved_syn ||
4437 optlen > tp->saved_syn[0])
4438 goto err_clear;
4439 memcpy(optval, tp->saved_syn + 1, optlen);
4440 break;
4441 default:
4442 goto err_clear;
4443 }
4444 } else if (level == SOL_IP) {
4445 struct inet_sock *inet = inet_sk(sk);
4446
4447 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4448 goto err_clear;
4449
4450
4451 switch (optname) {
4452 case IP_TOS:
4453 *((int *)optval) = (int)inet->tos;
4454 break;
4455 default:
4456 goto err_clear;
4457 }
4458#if IS_ENABLED(CONFIG_IPV6)
4459 } else if (level == SOL_IPV6) {
4460 struct ipv6_pinfo *np = inet6_sk(sk);
4461
4462 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4463 goto err_clear;
4464
4465
4466 switch (optname) {
4467 case IPV6_TCLASS:
4468 *((int *)optval) = (int)np->tclass;
4469 break;
4470 default:
4471 goto err_clear;
4472 }
4473#endif
4474 } else {
4475 goto err_clear;
4476 }
4477 return 0;
4478#endif
4479err_clear:
4480 memset(optval, 0, optlen);
4481 return -EINVAL;
4482}
4483
4484static const struct bpf_func_proto bpf_getsockopt_proto = {
4485 .func = bpf_getsockopt,
4486 .gpl_only = false,
4487 .ret_type = RET_INTEGER,
4488 .arg1_type = ARG_PTR_TO_CTX,
4489 .arg2_type = ARG_ANYTHING,
4490 .arg3_type = ARG_ANYTHING,
4491 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
4492 .arg5_type = ARG_CONST_SIZE,
4493};
4494
4495BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
4496 int, argval)
4497{
4498 struct sock *sk = bpf_sock->sk;
4499 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
4500
4501 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
4502 return -EINVAL;
4503
4504 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
4505
4506 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
4507}
4508
4509static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
4510 .func = bpf_sock_ops_cb_flags_set,
4511 .gpl_only = false,
4512 .ret_type = RET_INTEGER,
4513 .arg1_type = ARG_PTR_TO_CTX,
4514 .arg2_type = ARG_ANYTHING,
4515};
4516
4517const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
4518EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
4519
4520BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
4521 int, addr_len)
4522{
4523#ifdef CONFIG_INET
4524 struct sock *sk = ctx->sk;
4525 int err;
4526
4527
4528
4529
4530 err = -EINVAL;
4531 if (addr_len < offsetofend(struct sockaddr, sa_family))
4532 return err;
4533 if (addr->sa_family == AF_INET) {
4534 if (addr_len < sizeof(struct sockaddr_in))
4535 return err;
4536 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
4537 return err;
4538 return __inet_bind(sk, addr, addr_len, true, false);
4539#if IS_ENABLED(CONFIG_IPV6)
4540 } else if (addr->sa_family == AF_INET6) {
4541 if (addr_len < SIN6_LEN_RFC2133)
4542 return err;
4543 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
4544 return err;
4545
4546
4547
4548 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
4549#endif
4550 }
4551#endif
4552
4553 return -EAFNOSUPPORT;
4554}
4555
4556static const struct bpf_func_proto bpf_bind_proto = {
4557 .func = bpf_bind,
4558 .gpl_only = false,
4559 .ret_type = RET_INTEGER,
4560 .arg1_type = ARG_PTR_TO_CTX,
4561 .arg2_type = ARG_PTR_TO_MEM,
4562 .arg3_type = ARG_CONST_SIZE,
4563};
4564
4565#ifdef CONFIG_XFRM
4566BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
4567 struct bpf_xfrm_state *, to, u32, size, u64, flags)
4568{
4569 const struct sec_path *sp = skb_sec_path(skb);
4570 const struct xfrm_state *x;
4571
4572 if (!sp || unlikely(index >= sp->len || flags))
4573 goto err_clear;
4574
4575 x = sp->xvec[index];
4576
4577 if (unlikely(size != sizeof(struct bpf_xfrm_state)))
4578 goto err_clear;
4579
4580 to->reqid = x->props.reqid;
4581 to->spi = x->id.spi;
4582 to->family = x->props.family;
4583 to->ext = 0;
4584
4585 if (to->family == AF_INET6) {
4586 memcpy(to->remote_ipv6, x->props.saddr.a6,
4587 sizeof(to->remote_ipv6));
4588 } else {
4589 to->remote_ipv4 = x->props.saddr.a4;
4590 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
4591 }
4592
4593 return 0;
4594err_clear:
4595 memset(to, 0, size);
4596 return -EINVAL;
4597}
4598
4599static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
4600 .func = bpf_skb_get_xfrm_state,
4601 .gpl_only = false,
4602 .ret_type = RET_INTEGER,
4603 .arg1_type = ARG_PTR_TO_CTX,
4604 .arg2_type = ARG_ANYTHING,
4605 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
4606 .arg4_type = ARG_CONST_SIZE,
4607 .arg5_type = ARG_ANYTHING,
4608};
4609#endif
4610
4611#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
4612static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
4613 const struct neighbour *neigh,
4614 const struct net_device *dev)
4615{
4616 memcpy(params->dmac, neigh->ha, ETH_ALEN);
4617 memcpy(params->smac, dev->dev_addr, ETH_ALEN);
4618 params->h_vlan_TCI = 0;
4619 params->h_vlan_proto = 0;
4620 params->ifindex = dev->ifindex;
4621
4622 return 0;
4623}
4624#endif
4625
4626#if IS_ENABLED(CONFIG_INET)
4627static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4628 u32 flags, bool check_mtu)
4629{
4630 struct fib_nh_common *nhc;
4631 struct in_device *in_dev;
4632 struct neighbour *neigh;
4633 struct net_device *dev;
4634 struct fib_result res;
4635 struct flowi4 fl4;
4636 int err;
4637 u32 mtu;
4638
4639 dev = dev_get_by_index_rcu(net, params->ifindex);
4640 if (unlikely(!dev))
4641 return -ENODEV;
4642
4643
4644 in_dev = __in_dev_get_rcu(dev);
4645 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
4646 return BPF_FIB_LKUP_RET_FWD_DISABLED;
4647
4648 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4649 fl4.flowi4_iif = 1;
4650 fl4.flowi4_oif = params->ifindex;
4651 } else {
4652 fl4.flowi4_iif = params->ifindex;
4653 fl4.flowi4_oif = 0;
4654 }
4655 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
4656 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
4657 fl4.flowi4_flags = 0;
4658
4659 fl4.flowi4_proto = params->l4_protocol;
4660 fl4.daddr = params->ipv4_dst;
4661 fl4.saddr = params->ipv4_src;
4662 fl4.fl4_sport = params->sport;
4663 fl4.fl4_dport = params->dport;
4664
4665 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4666 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4667 struct fib_table *tb;
4668
4669 tb = fib_get_table(net, tbid);
4670 if (unlikely(!tb))
4671 return BPF_FIB_LKUP_RET_NOT_FWDED;
4672
4673 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
4674 } else {
4675 fl4.flowi4_mark = 0;
4676 fl4.flowi4_secid = 0;
4677 fl4.flowi4_tun_key.tun_id = 0;
4678 fl4.flowi4_uid = sock_net_uid(net, NULL);
4679
4680 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
4681 }
4682
4683 if (err) {
4684
4685 if (err == -EINVAL)
4686 return BPF_FIB_LKUP_RET_BLACKHOLE;
4687 if (err == -EHOSTUNREACH)
4688 return BPF_FIB_LKUP_RET_UNREACHABLE;
4689 if (err == -EACCES)
4690 return BPF_FIB_LKUP_RET_PROHIBIT;
4691
4692 return BPF_FIB_LKUP_RET_NOT_FWDED;
4693 }
4694
4695 if (res.type != RTN_UNICAST)
4696 return BPF_FIB_LKUP_RET_NOT_FWDED;
4697
4698 if (fib_info_num_path(res.fi) > 1)
4699 fib_select_path(net, &res, &fl4, NULL);
4700
4701 if (check_mtu) {
4702 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
4703 if (params->tot_len > mtu)
4704 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4705 }
4706
4707 nhc = res.nhc;
4708
4709
4710 if (nhc->nhc_lwtstate)
4711 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
4712
4713 dev = nhc->nhc_dev;
4714
4715 params->rt_metric = res.fi->fib_priority;
4716
4717
4718
4719
4720 if (likely(nhc->nhc_gw_family != AF_INET6)) {
4721 if (nhc->nhc_gw_family)
4722 params->ipv4_dst = nhc->nhc_gw.ipv4;
4723
4724 neigh = __ipv4_neigh_lookup_noref(dev,
4725 (__force u32)params->ipv4_dst);
4726 } else {
4727 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
4728
4729 params->family = AF_INET6;
4730 *dst = nhc->nhc_gw.ipv6;
4731 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
4732 }
4733
4734 if (!neigh)
4735 return BPF_FIB_LKUP_RET_NO_NEIGH;
4736
4737 return bpf_fib_set_fwd_params(params, neigh, dev);
4738}
4739#endif
4740
4741#if IS_ENABLED(CONFIG_IPV6)
4742static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4743 u32 flags, bool check_mtu)
4744{
4745 struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
4746 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
4747 struct fib6_result res = {};
4748 struct neighbour *neigh;
4749 struct net_device *dev;
4750 struct inet6_dev *idev;
4751 struct flowi6 fl6;
4752 int strict = 0;
4753 int oif, err;
4754 u32 mtu;
4755
4756
4757 if (rt6_need_strict(dst) || rt6_need_strict(src))
4758 return BPF_FIB_LKUP_RET_NOT_FWDED;
4759
4760 dev = dev_get_by_index_rcu(net, params->ifindex);
4761 if (unlikely(!dev))
4762 return -ENODEV;
4763
4764 idev = __in6_dev_get_safely(dev);
4765 if (unlikely(!idev || !idev->cnf.forwarding))
4766 return BPF_FIB_LKUP_RET_FWD_DISABLED;
4767
4768 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4769 fl6.flowi6_iif = 1;
4770 oif = fl6.flowi6_oif = params->ifindex;
4771 } else {
4772 oif = fl6.flowi6_iif = params->ifindex;
4773 fl6.flowi6_oif = 0;
4774 strict = RT6_LOOKUP_F_HAS_SADDR;
4775 }
4776 fl6.flowlabel = params->flowinfo;
4777 fl6.flowi6_scope = 0;
4778 fl6.flowi6_flags = 0;
4779 fl6.mp_hash = 0;
4780
4781 fl6.flowi6_proto = params->l4_protocol;
4782 fl6.daddr = *dst;
4783 fl6.saddr = *src;
4784 fl6.fl6_sport = params->sport;
4785 fl6.fl6_dport = params->dport;
4786
4787 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4788 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4789 struct fib6_table *tb;
4790
4791 tb = ipv6_stub->fib6_get_table(net, tbid);
4792 if (unlikely(!tb))
4793 return BPF_FIB_LKUP_RET_NOT_FWDED;
4794
4795 err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
4796 strict);
4797 } else {
4798 fl6.flowi6_mark = 0;
4799 fl6.flowi6_secid = 0;
4800 fl6.flowi6_tun_key.tun_id = 0;
4801 fl6.flowi6_uid = sock_net_uid(net, NULL);
4802
4803 err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
4804 }
4805
4806 if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
4807 res.f6i == net->ipv6.fib6_null_entry))
4808 return BPF_FIB_LKUP_RET_NOT_FWDED;
4809
4810 switch (res.fib6_type) {
4811
4812 case RTN_UNICAST:
4813 break;
4814 case RTN_BLACKHOLE:
4815 return BPF_FIB_LKUP_RET_BLACKHOLE;
4816 case RTN_UNREACHABLE:
4817 return BPF_FIB_LKUP_RET_UNREACHABLE;
4818 case RTN_PROHIBIT:
4819 return BPF_FIB_LKUP_RET_PROHIBIT;
4820 default:
4821 return BPF_FIB_LKUP_RET_NOT_FWDED;
4822 }
4823
4824 ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
4825 fl6.flowi6_oif != 0, NULL, strict);
4826
4827 if (check_mtu) {
4828 mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
4829 if (params->tot_len > mtu)
4830 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4831 }
4832
4833 if (res.nh->fib_nh_lws)
4834 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
4835
4836 if (res.nh->fib_nh_gw_family)
4837 *dst = res.nh->fib_nh_gw6;
4838
4839 dev = res.nh->fib_nh_dev;
4840 params->rt_metric = res.f6i->fib6_metric;
4841
4842
4843
4844
4845 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
4846 if (!neigh)
4847 return BPF_FIB_LKUP_RET_NO_NEIGH;
4848
4849 return bpf_fib_set_fwd_params(params, neigh, dev);
4850}
4851#endif
4852
4853BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
4854 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4855{
4856 if (plen < sizeof(*params))
4857 return -EINVAL;
4858
4859 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4860 return -EINVAL;
4861
4862 switch (params->family) {
4863#if IS_ENABLED(CONFIG_INET)
4864 case AF_INET:
4865 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
4866 flags, true);
4867#endif
4868#if IS_ENABLED(CONFIG_IPV6)
4869 case AF_INET6:
4870 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
4871 flags, true);
4872#endif
4873 }
4874 return -EAFNOSUPPORT;
4875}
4876
4877static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
4878 .func = bpf_xdp_fib_lookup,
4879 .gpl_only = true,
4880 .ret_type = RET_INTEGER,
4881 .arg1_type = ARG_PTR_TO_CTX,
4882 .arg2_type = ARG_PTR_TO_MEM,
4883 .arg3_type = ARG_CONST_SIZE,
4884 .arg4_type = ARG_ANYTHING,
4885};
4886
4887BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
4888 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4889{
4890 struct net *net = dev_net(skb->dev);
4891 int rc = -EAFNOSUPPORT;
4892
4893 if (plen < sizeof(*params))
4894 return -EINVAL;
4895
4896 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4897 return -EINVAL;
4898
4899 switch (params->family) {
4900#if IS_ENABLED(CONFIG_INET)
4901 case AF_INET:
4902 rc = bpf_ipv4_fib_lookup(net, params, flags, false);
4903 break;
4904#endif
4905#if IS_ENABLED(CONFIG_IPV6)
4906 case AF_INET6:
4907 rc = bpf_ipv6_fib_lookup(net, params, flags, false);
4908 break;
4909#endif
4910 }
4911
4912 if (!rc) {
4913 struct net_device *dev;
4914
4915 dev = dev_get_by_index_rcu(net, params->ifindex);
4916 if (!is_skb_forwardable(dev, skb))
4917 rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
4918 }
4919
4920 return rc;
4921}
4922
4923static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
4924 .func = bpf_skb_fib_lookup,
4925 .gpl_only = true,
4926 .ret_type = RET_INTEGER,
4927 .arg1_type = ARG_PTR_TO_CTX,
4928 .arg2_type = ARG_PTR_TO_MEM,
4929 .arg3_type = ARG_CONST_SIZE,
4930 .arg4_type = ARG_ANYTHING,
4931};
4932
4933#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4934static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
4935{
4936 int err;
4937 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
4938
4939 if (!seg6_validate_srh(srh, len))
4940 return -EINVAL;
4941
4942 switch (type) {
4943 case BPF_LWT_ENCAP_SEG6_INLINE:
4944 if (skb->protocol != htons(ETH_P_IPV6))
4945 return -EBADMSG;
4946
4947 err = seg6_do_srh_inline(skb, srh);
4948 break;
4949 case BPF_LWT_ENCAP_SEG6:
4950 skb_reset_inner_headers(skb);
4951 skb->encapsulation = 1;
4952 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
4953 break;
4954 default:
4955 return -EINVAL;
4956 }
4957
4958 bpf_compute_data_pointers(skb);
4959 if (err)
4960 return err;
4961
4962 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4963 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
4964
4965 return seg6_lookup_nexthop(skb, NULL, 0);
4966}
4967#endif
4968
4969#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4970static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
4971 bool ingress)
4972{
4973 return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
4974}
4975#endif
4976
4977BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
4978 u32, len)
4979{
4980 switch (type) {
4981#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4982 case BPF_LWT_ENCAP_SEG6:
4983 case BPF_LWT_ENCAP_SEG6_INLINE:
4984 return bpf_push_seg6_encap(skb, type, hdr, len);
4985#endif
4986#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4987 case BPF_LWT_ENCAP_IP:
4988 return bpf_push_ip_encap(skb, hdr, len, true );
4989#endif
4990 default:
4991 return -EINVAL;
4992 }
4993}
4994
4995BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
4996 void *, hdr, u32, len)
4997{
4998 switch (type) {
4999#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5000 case BPF_LWT_ENCAP_IP:
5001 return bpf_push_ip_encap(skb, hdr, len, false );
5002#endif
5003 default:
5004 return -EINVAL;
5005 }
5006}
5007
5008static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
5009 .func = bpf_lwt_in_push_encap,
5010 .gpl_only = false,
5011 .ret_type = RET_INTEGER,
5012 .arg1_type = ARG_PTR_TO_CTX,
5013 .arg2_type = ARG_ANYTHING,
5014 .arg3_type = ARG_PTR_TO_MEM,
5015 .arg4_type = ARG_CONST_SIZE
5016};
5017
5018static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
5019 .func = bpf_lwt_xmit_push_encap,
5020 .gpl_only = false,
5021 .ret_type = RET_INTEGER,
5022 .arg1_type = ARG_PTR_TO_CTX,
5023 .arg2_type = ARG_ANYTHING,
5024 .arg3_type = ARG_PTR_TO_MEM,
5025 .arg4_type = ARG_CONST_SIZE
5026};
5027
5028#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5029BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
5030 const void *, from, u32, len)
5031{
5032 struct seg6_bpf_srh_state *srh_state =
5033 this_cpu_ptr(&seg6_bpf_srh_states);
5034 struct ipv6_sr_hdr *srh = srh_state->srh;
5035 void *srh_tlvs, *srh_end, *ptr;
5036 int srhoff = 0;
5037
5038 if (srh == NULL)
5039 return -EINVAL;
5040
5041 srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
5042 srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
5043
5044 ptr = skb->data + offset;
5045 if (ptr >= srh_tlvs && ptr + len <= srh_end)
5046 srh_state->valid = false;
5047 else if (ptr < (void *)&srh->flags ||
5048 ptr + len > (void *)&srh->segments)
5049 return -EFAULT;
5050
5051 if (unlikely(bpf_try_make_writable(skb, offset + len)))
5052 return -EFAULT;
5053 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5054 return -EINVAL;
5055 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5056
5057 memcpy(skb->data + offset, from, len);
5058 return 0;
5059}
5060
5061static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
5062 .func = bpf_lwt_seg6_store_bytes,
5063 .gpl_only = false,
5064 .ret_type = RET_INTEGER,
5065 .arg1_type = ARG_PTR_TO_CTX,
5066 .arg2_type = ARG_ANYTHING,
5067 .arg3_type = ARG_PTR_TO_MEM,
5068 .arg4_type = ARG_CONST_SIZE
5069};
5070
5071static void bpf_update_srh_state(struct sk_buff *skb)
5072{
5073 struct seg6_bpf_srh_state *srh_state =
5074 this_cpu_ptr(&seg6_bpf_srh_states);
5075 int srhoff = 0;
5076
5077 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
5078 srh_state->srh = NULL;
5079 } else {
5080 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5081 srh_state->hdrlen = srh_state->srh->hdrlen << 3;
5082 srh_state->valid = true;
5083 }
5084}
5085
5086BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
5087 u32, action, void *, param, u32, param_len)
5088{
5089 struct seg6_bpf_srh_state *srh_state =
5090 this_cpu_ptr(&seg6_bpf_srh_states);
5091 int hdroff = 0;
5092 int err;
5093
5094 switch (action) {
5095 case SEG6_LOCAL_ACTION_END_X:
5096 if (!seg6_bpf_has_valid_srh(skb))
5097 return -EBADMSG;
5098 if (param_len != sizeof(struct in6_addr))
5099 return -EINVAL;
5100 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
5101 case SEG6_LOCAL_ACTION_END_T:
5102 if (!seg6_bpf_has_valid_srh(skb))
5103 return -EBADMSG;
5104 if (param_len != sizeof(int))
5105 return -EINVAL;
5106 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5107 case SEG6_LOCAL_ACTION_END_DT6:
5108 if (!seg6_bpf_has_valid_srh(skb))
5109 return -EBADMSG;
5110 if (param_len != sizeof(int))
5111 return -EINVAL;
5112
5113 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
5114 return -EBADMSG;
5115 if (!pskb_pull(skb, hdroff))
5116 return -EBADMSG;
5117
5118 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
5119 skb_reset_network_header(skb);
5120 skb_reset_transport_header(skb);
5121 skb->encapsulation = 0;
5122
5123 bpf_compute_data_pointers(skb);
5124 bpf_update_srh_state(skb);
5125 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5126 case SEG6_LOCAL_ACTION_END_B6:
5127 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5128 return -EBADMSG;
5129 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
5130 param, param_len);
5131 if (!err)
5132 bpf_update_srh_state(skb);
5133
5134 return err;
5135 case SEG6_LOCAL_ACTION_END_B6_ENCAP:
5136 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5137 return -EBADMSG;
5138 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
5139 param, param_len);
5140 if (!err)
5141 bpf_update_srh_state(skb);
5142
5143 return err;
5144 default:
5145 return -EINVAL;
5146 }
5147}
5148
5149static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
5150 .func = bpf_lwt_seg6_action,
5151 .gpl_only = false,
5152 .ret_type = RET_INTEGER,
5153 .arg1_type = ARG_PTR_TO_CTX,
5154 .arg2_type = ARG_ANYTHING,
5155 .arg3_type = ARG_PTR_TO_MEM,
5156 .arg4_type = ARG_CONST_SIZE
5157};
5158
5159BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
5160 s32, len)
5161{
5162 struct seg6_bpf_srh_state *srh_state =
5163 this_cpu_ptr(&seg6_bpf_srh_states);
5164 struct ipv6_sr_hdr *srh = srh_state->srh;
5165 void *srh_end, *srh_tlvs, *ptr;
5166 struct ipv6hdr *hdr;
5167 int srhoff = 0;
5168 int ret;
5169
5170 if (unlikely(srh == NULL))
5171 return -EINVAL;
5172
5173 srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
5174 ((srh->first_segment + 1) << 4));
5175 srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
5176 srh_state->hdrlen);
5177 ptr = skb->data + offset;
5178
5179 if (unlikely(ptr < srh_tlvs || ptr > srh_end))
5180 return -EFAULT;
5181 if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
5182 return -EFAULT;
5183
5184 if (len > 0) {
5185 ret = skb_cow_head(skb, len);
5186 if (unlikely(ret < 0))
5187 return ret;
5188
5189 ret = bpf_skb_net_hdr_push(skb, offset, len);
5190 } else {
5191 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
5192 }
5193
5194 bpf_compute_data_pointers(skb);
5195 if (unlikely(ret < 0))
5196 return ret;
5197
5198 hdr = (struct ipv6hdr *)skb->data;
5199 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5200
5201 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5202 return -EINVAL;
5203 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5204 srh_state->hdrlen += len;
5205 srh_state->valid = false;
5206 return 0;
5207}
5208
5209static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
5210 .func = bpf_lwt_seg6_adjust_srh,
5211 .gpl_only = false,
5212 .ret_type = RET_INTEGER,
5213 .arg1_type = ARG_PTR_TO_CTX,
5214 .arg2_type = ARG_ANYTHING,
5215 .arg3_type = ARG_ANYTHING,
5216};
5217#endif
5218
5219#ifdef CONFIG_INET
5220static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
5221 int dif, int sdif, u8 family, u8 proto)
5222{
5223 bool refcounted = false;
5224 struct sock *sk = NULL;
5225
5226 if (family == AF_INET) {
5227 __be32 src4 = tuple->ipv4.saddr;
5228 __be32 dst4 = tuple->ipv4.daddr;
5229
5230 if (proto == IPPROTO_TCP)
5231 sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
5232 src4, tuple->ipv4.sport,
5233 dst4, tuple->ipv4.dport,
5234 dif, sdif, &refcounted);
5235 else
5236 sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
5237 dst4, tuple->ipv4.dport,
5238 dif, sdif, &udp_table, NULL);
5239#if IS_ENABLED(CONFIG_IPV6)
5240 } else {
5241 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
5242 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
5243
5244 if (proto == IPPROTO_TCP)
5245 sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
5246 src6, tuple->ipv6.sport,
5247 dst6, ntohs(tuple->ipv6.dport),
5248 dif, sdif, &refcounted);
5249 else if (likely(ipv6_bpf_stub))
5250 sk = ipv6_bpf_stub->udp6_lib_lookup(net,
5251 src6, tuple->ipv6.sport,
5252 dst6, tuple->ipv6.dport,
5253 dif, sdif,
5254 &udp_table, NULL);
5255#endif
5256 }
5257
5258 if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
5259 WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
5260 sk = NULL;
5261 }
5262 return sk;
5263}
5264
5265
5266
5267
5268
5269
5270static struct sock *
5271__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5272 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5273 u64 flags)
5274{
5275 struct sock *sk = NULL;
5276 u8 family = AF_UNSPEC;
5277 struct net *net;
5278 int sdif;
5279
5280 if (len == sizeof(tuple->ipv4))
5281 family = AF_INET;
5282 else if (len == sizeof(tuple->ipv6))
5283 family = AF_INET6;
5284 else
5285 return NULL;
5286
5287 if (unlikely(family == AF_UNSPEC || flags ||
5288 !((s32)netns_id < 0 || netns_id <= S32_MAX)))
5289 goto out;
5290
5291 if (family == AF_INET)
5292 sdif = inet_sdif(skb);
5293 else
5294 sdif = inet6_sdif(skb);
5295
5296 if ((s32)netns_id < 0) {
5297 net = caller_net;
5298 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5299 } else {
5300 net = get_net_ns_by_id(caller_net, netns_id);
5301 if (unlikely(!net))
5302 goto out;
5303 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5304 put_net(net);
5305 }
5306
5307out:
5308 return sk;
5309}
5310
5311static struct sock *
5312__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5313 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5314 u64 flags)
5315{
5316 struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
5317 ifindex, proto, netns_id, flags);
5318
5319 if (sk) {
5320 sk = sk_to_full_sk(sk);
5321 if (!sk_fullsock(sk)) {
5322 sock_gen_put(sk);
5323 return NULL;
5324 }
5325 }
5326
5327 return sk;
5328}
5329
5330static struct sock *
5331bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5332 u8 proto, u64 netns_id, u64 flags)
5333{
5334 struct net *caller_net;
5335 int ifindex;
5336
5337 if (skb->dev) {
5338 caller_net = dev_net(skb->dev);
5339 ifindex = skb->dev->ifindex;
5340 } else {
5341 caller_net = sock_net(skb->sk);
5342 ifindex = 0;
5343 }
5344
5345 return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
5346 netns_id, flags);
5347}
5348
5349static struct sock *
5350bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5351 u8 proto, u64 netns_id, u64 flags)
5352{
5353 struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
5354 flags);
5355
5356 if (sk) {
5357 sk = sk_to_full_sk(sk);
5358 if (!sk_fullsock(sk)) {
5359 sock_gen_put(sk);
5360 return NULL;
5361 }
5362 }
5363
5364 return sk;
5365}
5366
5367BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
5368 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5369{
5370 return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
5371 netns_id, flags);
5372}
5373
5374static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
5375 .func = bpf_skc_lookup_tcp,
5376 .gpl_only = false,
5377 .pkt_access = true,
5378 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5379 .arg1_type = ARG_PTR_TO_CTX,
5380 .arg2_type = ARG_PTR_TO_MEM,
5381 .arg3_type = ARG_CONST_SIZE,
5382 .arg4_type = ARG_ANYTHING,
5383 .arg5_type = ARG_ANYTHING,
5384};
5385
5386BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
5387 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5388{
5389 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
5390 netns_id, flags);
5391}
5392
5393static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
5394 .func = bpf_sk_lookup_tcp,
5395 .gpl_only = false,
5396 .pkt_access = true,
5397 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5398 .arg1_type = ARG_PTR_TO_CTX,
5399 .arg2_type = ARG_PTR_TO_MEM,
5400 .arg3_type = ARG_CONST_SIZE,
5401 .arg4_type = ARG_ANYTHING,
5402 .arg5_type = ARG_ANYTHING,
5403};
5404
5405BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
5406 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5407{
5408 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
5409 netns_id, flags);
5410}
5411
5412static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
5413 .func = bpf_sk_lookup_udp,
5414 .gpl_only = false,
5415 .pkt_access = true,
5416 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5417 .arg1_type = ARG_PTR_TO_CTX,
5418 .arg2_type = ARG_PTR_TO_MEM,
5419 .arg3_type = ARG_CONST_SIZE,
5420 .arg4_type = ARG_ANYTHING,
5421 .arg5_type = ARG_ANYTHING,
5422};
5423
5424BPF_CALL_1(bpf_sk_release, struct sock *, sk)
5425{
5426
5427 if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE))
5428 sock_gen_put(sk);
5429 return 0;
5430}
5431
5432static const struct bpf_func_proto bpf_sk_release_proto = {
5433 .func = bpf_sk_release,
5434 .gpl_only = false,
5435 .ret_type = RET_INTEGER,
5436 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5437};
5438
5439BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
5440 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5441{
5442 struct net *caller_net = dev_net(ctx->rxq->dev);
5443 int ifindex = ctx->rxq->dev->ifindex;
5444
5445 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5446 ifindex, IPPROTO_UDP, netns_id,
5447 flags);
5448}
5449
5450static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
5451 .func = bpf_xdp_sk_lookup_udp,
5452 .gpl_only = false,
5453 .pkt_access = true,
5454 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5455 .arg1_type = ARG_PTR_TO_CTX,
5456 .arg2_type = ARG_PTR_TO_MEM,
5457 .arg3_type = ARG_CONST_SIZE,
5458 .arg4_type = ARG_ANYTHING,
5459 .arg5_type = ARG_ANYTHING,
5460};
5461
5462BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
5463 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5464{
5465 struct net *caller_net = dev_net(ctx->rxq->dev);
5466 int ifindex = ctx->rxq->dev->ifindex;
5467
5468 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
5469 ifindex, IPPROTO_TCP, netns_id,
5470 flags);
5471}
5472
5473static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
5474 .func = bpf_xdp_skc_lookup_tcp,
5475 .gpl_only = false,
5476 .pkt_access = true,
5477 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5478 .arg1_type = ARG_PTR_TO_CTX,
5479 .arg2_type = ARG_PTR_TO_MEM,
5480 .arg3_type = ARG_CONST_SIZE,
5481 .arg4_type = ARG_ANYTHING,
5482 .arg5_type = ARG_ANYTHING,
5483};
5484
5485BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
5486 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5487{
5488 struct net *caller_net = dev_net(ctx->rxq->dev);
5489 int ifindex = ctx->rxq->dev->ifindex;
5490
5491 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5492 ifindex, IPPROTO_TCP, netns_id,
5493 flags);
5494}
5495
5496static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
5497 .func = bpf_xdp_sk_lookup_tcp,
5498 .gpl_only = false,
5499 .pkt_access = true,
5500 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5501 .arg1_type = ARG_PTR_TO_CTX,
5502 .arg2_type = ARG_PTR_TO_MEM,
5503 .arg3_type = ARG_CONST_SIZE,
5504 .arg4_type = ARG_ANYTHING,
5505 .arg5_type = ARG_ANYTHING,
5506};
5507
5508BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5509 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5510{
5511 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
5512 sock_net(ctx->sk), 0,
5513 IPPROTO_TCP, netns_id, flags);
5514}
5515
5516static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
5517 .func = bpf_sock_addr_skc_lookup_tcp,
5518 .gpl_only = false,
5519 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5520 .arg1_type = ARG_PTR_TO_CTX,
5521 .arg2_type = ARG_PTR_TO_MEM,
5522 .arg3_type = ARG_CONST_SIZE,
5523 .arg4_type = ARG_ANYTHING,
5524 .arg5_type = ARG_ANYTHING,
5525};
5526
5527BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5528 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5529{
5530 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5531 sock_net(ctx->sk), 0, IPPROTO_TCP,
5532 netns_id, flags);
5533}
5534
5535static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
5536 .func = bpf_sock_addr_sk_lookup_tcp,
5537 .gpl_only = false,
5538 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5539 .arg1_type = ARG_PTR_TO_CTX,
5540 .arg2_type = ARG_PTR_TO_MEM,
5541 .arg3_type = ARG_CONST_SIZE,
5542 .arg4_type = ARG_ANYTHING,
5543 .arg5_type = ARG_ANYTHING,
5544};
5545
5546BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
5547 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5548{
5549 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5550 sock_net(ctx->sk), 0, IPPROTO_UDP,
5551 netns_id, flags);
5552}
5553
5554static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
5555 .func = bpf_sock_addr_sk_lookup_udp,
5556 .gpl_only = false,
5557 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5558 .arg1_type = ARG_PTR_TO_CTX,
5559 .arg2_type = ARG_PTR_TO_MEM,
5560 .arg3_type = ARG_CONST_SIZE,
5561 .arg4_type = ARG_ANYTHING,
5562 .arg5_type = ARG_ANYTHING,
5563};
5564
5565bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
5566 struct bpf_insn_access_aux *info)
5567{
5568 if (off < 0 || off >= offsetofend(struct bpf_tcp_sock,
5569 icsk_retransmits))
5570 return false;
5571
5572 if (off % size != 0)
5573 return false;
5574
5575 switch (off) {
5576 case offsetof(struct bpf_tcp_sock, bytes_received):
5577 case offsetof(struct bpf_tcp_sock, bytes_acked):
5578 return size == sizeof(__u64);
5579 default:
5580 return size == sizeof(__u32);
5581 }
5582}
5583
5584u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
5585 const struct bpf_insn *si,
5586 struct bpf_insn *insn_buf,
5587 struct bpf_prog *prog, u32 *target_size)
5588{
5589 struct bpf_insn *insn = insn_buf;
5590
5591#define BPF_TCP_SOCK_GET_COMMON(FIELD) \
5592 do { \
5593 BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \
5594 sizeof_field(struct bpf_tcp_sock, FIELD)); \
5595 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
5596 si->dst_reg, si->src_reg, \
5597 offsetof(struct tcp_sock, FIELD)); \
5598 } while (0)
5599
5600#define BPF_INET_SOCK_GET_COMMON(FIELD) \
5601 do { \
5602 BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \
5603 FIELD) > \
5604 sizeof_field(struct bpf_tcp_sock, FIELD)); \
5605 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5606 struct inet_connection_sock, \
5607 FIELD), \
5608 si->dst_reg, si->src_reg, \
5609 offsetof( \
5610 struct inet_connection_sock, \
5611 FIELD)); \
5612 } while (0)
5613
5614 if (insn > insn_buf)
5615 return insn - insn_buf;
5616
5617 switch (si->off) {
5618 case offsetof(struct bpf_tcp_sock, rtt_min):
5619 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
5620 sizeof(struct minmax));
5621 BUILD_BUG_ON(sizeof(struct minmax) <
5622 sizeof(struct minmax_sample));
5623
5624 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5625 offsetof(struct tcp_sock, rtt_min) +
5626 offsetof(struct minmax_sample, v));
5627 break;
5628 case offsetof(struct bpf_tcp_sock, snd_cwnd):
5629 BPF_TCP_SOCK_GET_COMMON(snd_cwnd);
5630 break;
5631 case offsetof(struct bpf_tcp_sock, srtt_us):
5632 BPF_TCP_SOCK_GET_COMMON(srtt_us);
5633 break;
5634 case offsetof(struct bpf_tcp_sock, snd_ssthresh):
5635 BPF_TCP_SOCK_GET_COMMON(snd_ssthresh);
5636 break;
5637 case offsetof(struct bpf_tcp_sock, rcv_nxt):
5638 BPF_TCP_SOCK_GET_COMMON(rcv_nxt);
5639 break;
5640 case offsetof(struct bpf_tcp_sock, snd_nxt):
5641 BPF_TCP_SOCK_GET_COMMON(snd_nxt);
5642 break;
5643 case offsetof(struct bpf_tcp_sock, snd_una):
5644 BPF_TCP_SOCK_GET_COMMON(snd_una);
5645 break;
5646 case offsetof(struct bpf_tcp_sock, mss_cache):
5647 BPF_TCP_SOCK_GET_COMMON(mss_cache);
5648 break;
5649 case offsetof(struct bpf_tcp_sock, ecn_flags):
5650 BPF_TCP_SOCK_GET_COMMON(ecn_flags);
5651 break;
5652 case offsetof(struct bpf_tcp_sock, rate_delivered):
5653 BPF_TCP_SOCK_GET_COMMON(rate_delivered);
5654 break;
5655 case offsetof(struct bpf_tcp_sock, rate_interval_us):
5656 BPF_TCP_SOCK_GET_COMMON(rate_interval_us);
5657 break;
5658 case offsetof(struct bpf_tcp_sock, packets_out):
5659 BPF_TCP_SOCK_GET_COMMON(packets_out);
5660 break;
5661 case offsetof(struct bpf_tcp_sock, retrans_out):
5662 BPF_TCP_SOCK_GET_COMMON(retrans_out);
5663 break;
5664 case offsetof(struct bpf_tcp_sock, total_retrans):
5665 BPF_TCP_SOCK_GET_COMMON(total_retrans);
5666 break;
5667 case offsetof(struct bpf_tcp_sock, segs_in):
5668 BPF_TCP_SOCK_GET_COMMON(segs_in);
5669 break;
5670 case offsetof(struct bpf_tcp_sock, data_segs_in):
5671 BPF_TCP_SOCK_GET_COMMON(data_segs_in);
5672 break;
5673 case offsetof(struct bpf_tcp_sock, segs_out):
5674 BPF_TCP_SOCK_GET_COMMON(segs_out);
5675 break;
5676 case offsetof(struct bpf_tcp_sock, data_segs_out):
5677 BPF_TCP_SOCK_GET_COMMON(data_segs_out);
5678 break;
5679 case offsetof(struct bpf_tcp_sock, lost_out):
5680 BPF_TCP_SOCK_GET_COMMON(lost_out);
5681 break;
5682 case offsetof(struct bpf_tcp_sock, sacked_out):
5683 BPF_TCP_SOCK_GET_COMMON(sacked_out);
5684 break;
5685 case offsetof(struct bpf_tcp_sock, bytes_received):
5686 BPF_TCP_SOCK_GET_COMMON(bytes_received);
5687 break;
5688 case offsetof(struct bpf_tcp_sock, bytes_acked):
5689 BPF_TCP_SOCK_GET_COMMON(bytes_acked);
5690 break;
5691 case offsetof(struct bpf_tcp_sock, dsack_dups):
5692 BPF_TCP_SOCK_GET_COMMON(dsack_dups);
5693 break;
5694 case offsetof(struct bpf_tcp_sock, delivered):
5695 BPF_TCP_SOCK_GET_COMMON(delivered);
5696 break;
5697 case offsetof(struct bpf_tcp_sock, delivered_ce):
5698 BPF_TCP_SOCK_GET_COMMON(delivered_ce);
5699 break;
5700 case offsetof(struct bpf_tcp_sock, icsk_retransmits):
5701 BPF_INET_SOCK_GET_COMMON(icsk_retransmits);
5702 break;
5703 }
5704
5705 return insn - insn_buf;
5706}
5707
5708BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
5709{
5710 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
5711 return (unsigned long)sk;
5712
5713 return (unsigned long)NULL;
5714}
5715
5716const struct bpf_func_proto bpf_tcp_sock_proto = {
5717 .func = bpf_tcp_sock,
5718 .gpl_only = false,
5719 .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL,
5720 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5721};
5722
5723BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
5724{
5725 sk = sk_to_full_sk(sk);
5726
5727 if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
5728 return (unsigned long)sk;
5729
5730 return (unsigned long)NULL;
5731}
5732
5733static const struct bpf_func_proto bpf_get_listener_sock_proto = {
5734 .func = bpf_get_listener_sock,
5735 .gpl_only = false,
5736 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5737 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5738};
5739
5740BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
5741{
5742 unsigned int iphdr_len;
5743
5744 if (skb->protocol == cpu_to_be16(ETH_P_IP))
5745 iphdr_len = sizeof(struct iphdr);
5746 else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
5747 iphdr_len = sizeof(struct ipv6hdr);
5748 else
5749 return 0;
5750
5751 if (skb_headlen(skb) < iphdr_len)
5752 return 0;
5753
5754 if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
5755 return 0;
5756
5757 return INET_ECN_set_ce(skb);
5758}
5759
5760bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
5761 struct bpf_insn_access_aux *info)
5762{
5763 if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
5764 return false;
5765
5766 if (off % size != 0)
5767 return false;
5768
5769 switch (off) {
5770 default:
5771 return size == sizeof(__u32);
5772 }
5773}
5774
5775u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
5776 const struct bpf_insn *si,
5777 struct bpf_insn *insn_buf,
5778 struct bpf_prog *prog, u32 *target_size)
5779{
5780 struct bpf_insn *insn = insn_buf;
5781
5782#define BPF_XDP_SOCK_GET(FIELD) \
5783 do { \
5784 BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \
5785 sizeof_field(struct bpf_xdp_sock, FIELD)); \
5786 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
5787 si->dst_reg, si->src_reg, \
5788 offsetof(struct xdp_sock, FIELD)); \
5789 } while (0)
5790
5791 switch (si->off) {
5792 case offsetof(struct bpf_xdp_sock, queue_id):
5793 BPF_XDP_SOCK_GET(queue_id);
5794 break;
5795 }
5796
5797 return insn - insn_buf;
5798}
5799
5800static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
5801 .func = bpf_skb_ecn_set_ce,
5802 .gpl_only = false,
5803 .ret_type = RET_INTEGER,
5804 .arg1_type = ARG_PTR_TO_CTX,
5805};
5806
5807BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
5808 struct tcphdr *, th, u32, th_len)
5809{
5810#ifdef CONFIG_SYN_COOKIES
5811 u32 cookie;
5812 int ret;
5813
5814 if (unlikely(th_len < sizeof(*th)))
5815 return -EINVAL;
5816
5817
5818 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
5819 return -EINVAL;
5820
5821 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
5822 return -EINVAL;
5823
5824 if (!th->ack || th->rst || th->syn)
5825 return -ENOENT;
5826
5827 if (tcp_synq_no_recent_overflow(sk))
5828 return -ENOENT;
5829
5830 cookie = ntohl(th->ack_seq) - 1;
5831
5832 switch (sk->sk_family) {
5833 case AF_INET:
5834 if (unlikely(iph_len < sizeof(struct iphdr)))
5835 return -EINVAL;
5836
5837 ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
5838 break;
5839
5840#if IS_BUILTIN(CONFIG_IPV6)
5841 case AF_INET6:
5842 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
5843 return -EINVAL;
5844
5845 ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
5846 break;
5847#endif
5848
5849 default:
5850 return -EPROTONOSUPPORT;
5851 }
5852
5853 if (ret > 0)
5854 return 0;
5855
5856 return -ENOENT;
5857#else
5858 return -ENOTSUPP;
5859#endif
5860}
5861
5862static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
5863 .func = bpf_tcp_check_syncookie,
5864 .gpl_only = true,
5865 .pkt_access = true,
5866 .ret_type = RET_INTEGER,
5867 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5868 .arg2_type = ARG_PTR_TO_MEM,
5869 .arg3_type = ARG_CONST_SIZE,
5870 .arg4_type = ARG_PTR_TO_MEM,
5871 .arg5_type = ARG_CONST_SIZE,
5872};
5873
5874BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
5875 struct tcphdr *, th, u32, th_len)
5876{
5877#ifdef CONFIG_SYN_COOKIES
5878 u32 cookie;
5879 u16 mss;
5880
5881 if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4))
5882 return -EINVAL;
5883
5884 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
5885 return -EINVAL;
5886
5887 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
5888 return -ENOENT;
5889
5890 if (!th->syn || th->ack || th->fin || th->rst)
5891 return -EINVAL;
5892
5893 if (unlikely(iph_len < sizeof(struct iphdr)))
5894 return -EINVAL;
5895
5896
5897
5898
5899 switch (((struct iphdr *)iph)->version) {
5900 case 4:
5901 if (sk->sk_family == AF_INET6 && sk->sk_ipv6only)
5902 return -EINVAL;
5903
5904 mss = tcp_v4_get_syncookie(sk, iph, th, &cookie);
5905 break;
5906
5907#if IS_BUILTIN(CONFIG_IPV6)
5908 case 6:
5909 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
5910 return -EINVAL;
5911
5912 if (sk->sk_family != AF_INET6)
5913 return -EINVAL;
5914
5915 mss = tcp_v6_get_syncookie(sk, iph, th, &cookie);
5916 break;
5917#endif
5918
5919 default:
5920 return -EPROTONOSUPPORT;
5921 }
5922 if (mss == 0)
5923 return -ENOENT;
5924
5925 return cookie | ((u64)mss << 32);
5926#else
5927 return -EOPNOTSUPP;
5928#endif
5929}
5930
5931static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
5932 .func = bpf_tcp_gen_syncookie,
5933 .gpl_only = true,
5934 .pkt_access = true,
5935 .ret_type = RET_INTEGER,
5936 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5937 .arg2_type = ARG_PTR_TO_MEM,
5938 .arg3_type = ARG_CONST_SIZE,
5939 .arg4_type = ARG_PTR_TO_MEM,
5940 .arg5_type = ARG_CONST_SIZE,
5941};
5942
5943#endif
5944
5945bool bpf_helper_changes_pkt_data(void *func)
5946{
5947 if (func == bpf_skb_vlan_push ||
5948 func == bpf_skb_vlan_pop ||
5949 func == bpf_skb_store_bytes ||
5950 func == bpf_skb_change_proto ||
5951 func == bpf_skb_change_head ||
5952 func == sk_skb_change_head ||
5953 func == bpf_skb_change_tail ||
5954 func == sk_skb_change_tail ||
5955 func == bpf_skb_adjust_room ||
5956 func == bpf_skb_pull_data ||
5957 func == sk_skb_pull_data ||
5958 func == bpf_clone_redirect ||
5959 func == bpf_l3_csum_replace ||
5960 func == bpf_l4_csum_replace ||
5961 func == bpf_xdp_adjust_head ||
5962 func == bpf_xdp_adjust_meta ||
5963 func == bpf_msg_pull_data ||
5964 func == bpf_msg_push_data ||
5965 func == bpf_msg_pop_data ||
5966 func == bpf_xdp_adjust_tail ||
5967#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5968 func == bpf_lwt_seg6_store_bytes ||
5969 func == bpf_lwt_seg6_adjust_srh ||
5970 func == bpf_lwt_seg6_action ||
5971#endif
5972 func == bpf_lwt_in_push_encap ||
5973 func == bpf_lwt_xmit_push_encap)
5974 return true;
5975
5976 return false;
5977}
5978
5979static const struct bpf_func_proto *
5980bpf_base_func_proto(enum bpf_func_id func_id)
5981{
5982 switch (func_id) {
5983 case BPF_FUNC_map_lookup_elem:
5984 return &bpf_map_lookup_elem_proto;
5985 case BPF_FUNC_map_update_elem:
5986 return &bpf_map_update_elem_proto;
5987 case BPF_FUNC_map_delete_elem:
5988 return &bpf_map_delete_elem_proto;
5989 case BPF_FUNC_map_push_elem:
5990 return &bpf_map_push_elem_proto;
5991 case BPF_FUNC_map_pop_elem:
5992 return &bpf_map_pop_elem_proto;
5993 case BPF_FUNC_map_peek_elem:
5994 return &bpf_map_peek_elem_proto;
5995 case BPF_FUNC_get_prandom_u32:
5996 return &bpf_get_prandom_u32_proto;
5997 case BPF_FUNC_get_smp_processor_id:
5998 return &bpf_get_raw_smp_processor_id_proto;
5999 case BPF_FUNC_get_numa_node_id:
6000 return &bpf_get_numa_node_id_proto;
6001 case BPF_FUNC_tail_call:
6002 return &bpf_tail_call_proto;
6003 case BPF_FUNC_ktime_get_ns:
6004 return &bpf_ktime_get_ns_proto;
6005 default:
6006 break;
6007 }
6008
6009 if (!capable(CAP_SYS_ADMIN))
6010 return NULL;
6011
6012 switch (func_id) {
6013 case BPF_FUNC_spin_lock:
6014 return &bpf_spin_lock_proto;
6015 case BPF_FUNC_spin_unlock:
6016 return &bpf_spin_unlock_proto;
6017 case BPF_FUNC_trace_printk:
6018 return bpf_get_trace_printk_proto();
6019 default:
6020 return NULL;
6021 }
6022}
6023
6024static const struct bpf_func_proto *
6025sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6026{
6027 switch (func_id) {
6028
6029
6030
6031 case BPF_FUNC_get_current_uid_gid:
6032 return &bpf_get_current_uid_gid_proto;
6033 case BPF_FUNC_get_local_storage:
6034 return &bpf_get_local_storage_proto;
6035 default:
6036 return bpf_base_func_proto(func_id);
6037 }
6038}
6039
6040static const struct bpf_func_proto *
6041sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6042{
6043 switch (func_id) {
6044
6045
6046
6047 case BPF_FUNC_get_current_uid_gid:
6048 return &bpf_get_current_uid_gid_proto;
6049 case BPF_FUNC_bind:
6050 switch (prog->expected_attach_type) {
6051 case BPF_CGROUP_INET4_CONNECT:
6052 case BPF_CGROUP_INET6_CONNECT:
6053 return &bpf_bind_proto;
6054 default:
6055 return NULL;
6056 }
6057 case BPF_FUNC_get_socket_cookie:
6058 return &bpf_get_socket_cookie_sock_addr_proto;
6059 case BPF_FUNC_get_local_storage:
6060 return &bpf_get_local_storage_proto;
6061#ifdef CONFIG_INET
6062 case BPF_FUNC_sk_lookup_tcp:
6063 return &bpf_sock_addr_sk_lookup_tcp_proto;
6064 case BPF_FUNC_sk_lookup_udp:
6065 return &bpf_sock_addr_sk_lookup_udp_proto;
6066 case BPF_FUNC_sk_release:
6067 return &bpf_sk_release_proto;
6068 case BPF_FUNC_skc_lookup_tcp:
6069 return &bpf_sock_addr_skc_lookup_tcp_proto;
6070#endif
6071 case BPF_FUNC_sk_storage_get:
6072 return &bpf_sk_storage_get_proto;
6073 case BPF_FUNC_sk_storage_delete:
6074 return &bpf_sk_storage_delete_proto;
6075 default:
6076 return bpf_base_func_proto(func_id);
6077 }
6078}
6079
6080static const struct bpf_func_proto *
6081sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6082{
6083 switch (func_id) {
6084 case BPF_FUNC_skb_load_bytes:
6085 return &bpf_skb_load_bytes_proto;
6086 case BPF_FUNC_skb_load_bytes_relative:
6087 return &bpf_skb_load_bytes_relative_proto;
6088 case BPF_FUNC_get_socket_cookie:
6089 return &bpf_get_socket_cookie_proto;
6090 case BPF_FUNC_get_socket_uid:
6091 return &bpf_get_socket_uid_proto;
6092 case BPF_FUNC_perf_event_output:
6093 return &bpf_skb_event_output_proto;
6094 default:
6095 return bpf_base_func_proto(func_id);
6096 }
6097}
6098
6099const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
6100const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
6101
6102static const struct bpf_func_proto *
6103cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6104{
6105 switch (func_id) {
6106 case BPF_FUNC_get_local_storage:
6107 return &bpf_get_local_storage_proto;
6108 case BPF_FUNC_sk_fullsock:
6109 return &bpf_sk_fullsock_proto;
6110 case BPF_FUNC_sk_storage_get:
6111 return &bpf_sk_storage_get_proto;
6112 case BPF_FUNC_sk_storage_delete:
6113 return &bpf_sk_storage_delete_proto;
6114 case BPF_FUNC_perf_event_output:
6115 return &bpf_skb_event_output_proto;
6116#ifdef CONFIG_SOCK_CGROUP_DATA
6117 case BPF_FUNC_skb_cgroup_id:
6118 return &bpf_skb_cgroup_id_proto;
6119#endif
6120#ifdef CONFIG_INET
6121 case BPF_FUNC_tcp_sock:
6122 return &bpf_tcp_sock_proto;
6123 case BPF_FUNC_get_listener_sock:
6124 return &bpf_get_listener_sock_proto;
6125 case BPF_FUNC_skb_ecn_set_ce:
6126 return &bpf_skb_ecn_set_ce_proto;
6127#endif
6128 default:
6129 return sk_filter_func_proto(func_id, prog);
6130 }
6131}
6132
6133static const struct bpf_func_proto *
6134tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6135{
6136 switch (func_id) {
6137 case BPF_FUNC_skb_store_bytes:
6138 return &bpf_skb_store_bytes_proto;
6139 case BPF_FUNC_skb_load_bytes:
6140 return &bpf_skb_load_bytes_proto;
6141 case BPF_FUNC_skb_load_bytes_relative:
6142 return &bpf_skb_load_bytes_relative_proto;
6143 case BPF_FUNC_skb_pull_data:
6144 return &bpf_skb_pull_data_proto;
6145 case BPF_FUNC_csum_diff:
6146 return &bpf_csum_diff_proto;
6147 case BPF_FUNC_csum_update:
6148 return &bpf_csum_update_proto;
6149 case BPF_FUNC_l3_csum_replace:
6150 return &bpf_l3_csum_replace_proto;
6151 case BPF_FUNC_l4_csum_replace:
6152 return &bpf_l4_csum_replace_proto;
6153 case BPF_FUNC_clone_redirect:
6154 return &bpf_clone_redirect_proto;
6155 case BPF_FUNC_get_cgroup_classid:
6156 return &bpf_get_cgroup_classid_proto;
6157 case BPF_FUNC_skb_vlan_push:
6158 return &bpf_skb_vlan_push_proto;
6159 case BPF_FUNC_skb_vlan_pop:
6160 return &bpf_skb_vlan_pop_proto;
6161 case BPF_FUNC_skb_change_proto:
6162 return &bpf_skb_change_proto_proto;
6163 case BPF_FUNC_skb_change_type:
6164 return &bpf_skb_change_type_proto;
6165 case BPF_FUNC_skb_adjust_room:
6166 return &bpf_skb_adjust_room_proto;
6167 case BPF_FUNC_skb_change_tail:
6168 return &bpf_skb_change_tail_proto;
6169 case BPF_FUNC_skb_get_tunnel_key:
6170 return &bpf_skb_get_tunnel_key_proto;
6171 case BPF_FUNC_skb_set_tunnel_key:
6172 return bpf_get_skb_set_tunnel_proto(func_id);
6173 case BPF_FUNC_skb_get_tunnel_opt:
6174 return &bpf_skb_get_tunnel_opt_proto;
6175 case BPF_FUNC_skb_set_tunnel_opt:
6176 return bpf_get_skb_set_tunnel_proto(func_id);
6177 case BPF_FUNC_redirect:
6178 return &bpf_redirect_proto;
6179 case BPF_FUNC_get_route_realm:
6180 return &bpf_get_route_realm_proto;
6181 case BPF_FUNC_get_hash_recalc:
6182 return &bpf_get_hash_recalc_proto;
6183 case BPF_FUNC_set_hash_invalid:
6184 return &bpf_set_hash_invalid_proto;
6185 case BPF_FUNC_set_hash:
6186 return &bpf_set_hash_proto;
6187 case BPF_FUNC_perf_event_output:
6188 return &bpf_skb_event_output_proto;
6189 case BPF_FUNC_get_smp_processor_id:
6190 return &bpf_get_smp_processor_id_proto;
6191 case BPF_FUNC_skb_under_cgroup:
6192 return &bpf_skb_under_cgroup_proto;
6193 case BPF_FUNC_get_socket_cookie:
6194 return &bpf_get_socket_cookie_proto;
6195 case BPF_FUNC_get_socket_uid:
6196 return &bpf_get_socket_uid_proto;
6197 case BPF_FUNC_fib_lookup:
6198 return &bpf_skb_fib_lookup_proto;
6199 case BPF_FUNC_sk_fullsock:
6200 return &bpf_sk_fullsock_proto;
6201 case BPF_FUNC_sk_storage_get:
6202 return &bpf_sk_storage_get_proto;
6203 case BPF_FUNC_sk_storage_delete:
6204 return &bpf_sk_storage_delete_proto;
6205#ifdef CONFIG_XFRM
6206 case BPF_FUNC_skb_get_xfrm_state:
6207 return &bpf_skb_get_xfrm_state_proto;
6208#endif
6209#ifdef CONFIG_SOCK_CGROUP_DATA
6210 case BPF_FUNC_skb_cgroup_id:
6211 return &bpf_skb_cgroup_id_proto;
6212 case BPF_FUNC_skb_ancestor_cgroup_id:
6213 return &bpf_skb_ancestor_cgroup_id_proto;
6214#endif
6215#ifdef CONFIG_INET
6216 case BPF_FUNC_sk_lookup_tcp:
6217 return &bpf_sk_lookup_tcp_proto;
6218 case BPF_FUNC_sk_lookup_udp:
6219 return &bpf_sk_lookup_udp_proto;
6220 case BPF_FUNC_sk_release:
6221 return &bpf_sk_release_proto;
6222 case BPF_FUNC_tcp_sock:
6223 return &bpf_tcp_sock_proto;
6224 case BPF_FUNC_get_listener_sock:
6225 return &bpf_get_listener_sock_proto;
6226 case BPF_FUNC_skc_lookup_tcp:
6227 return &bpf_skc_lookup_tcp_proto;
6228 case BPF_FUNC_tcp_check_syncookie:
6229 return &bpf_tcp_check_syncookie_proto;
6230 case BPF_FUNC_skb_ecn_set_ce:
6231 return &bpf_skb_ecn_set_ce_proto;
6232 case BPF_FUNC_tcp_gen_syncookie:
6233 return &bpf_tcp_gen_syncookie_proto;
6234#endif
6235 default:
6236 return bpf_base_func_proto(func_id);
6237 }
6238}
6239
6240static const struct bpf_func_proto *
6241xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6242{
6243 switch (func_id) {
6244 case BPF_FUNC_perf_event_output:
6245 return &bpf_xdp_event_output_proto;
6246 case BPF_FUNC_get_smp_processor_id:
6247 return &bpf_get_smp_processor_id_proto;
6248 case BPF_FUNC_csum_diff:
6249 return &bpf_csum_diff_proto;
6250 case BPF_FUNC_xdp_adjust_head:
6251 return &bpf_xdp_adjust_head_proto;
6252 case BPF_FUNC_xdp_adjust_meta:
6253 return &bpf_xdp_adjust_meta_proto;
6254 case BPF_FUNC_redirect:
6255 return &bpf_xdp_redirect_proto;
6256 case BPF_FUNC_redirect_map:
6257 return &bpf_xdp_redirect_map_proto;
6258 case BPF_FUNC_xdp_adjust_tail:
6259 return &bpf_xdp_adjust_tail_proto;
6260 case BPF_FUNC_fib_lookup:
6261 return &bpf_xdp_fib_lookup_proto;
6262#ifdef CONFIG_INET
6263 case BPF_FUNC_sk_lookup_udp:
6264 return &bpf_xdp_sk_lookup_udp_proto;
6265 case BPF_FUNC_sk_lookup_tcp:
6266 return &bpf_xdp_sk_lookup_tcp_proto;
6267 case BPF_FUNC_sk_release:
6268 return &bpf_sk_release_proto;
6269 case BPF_FUNC_skc_lookup_tcp:
6270 return &bpf_xdp_skc_lookup_tcp_proto;
6271 case BPF_FUNC_tcp_check_syncookie:
6272 return &bpf_tcp_check_syncookie_proto;
6273 case BPF_FUNC_tcp_gen_syncookie:
6274 return &bpf_tcp_gen_syncookie_proto;
6275#endif
6276 default:
6277 return bpf_base_func_proto(func_id);
6278 }
6279}
6280
6281const struct bpf_func_proto bpf_sock_map_update_proto __weak;
6282const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
6283
6284static const struct bpf_func_proto *
6285sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6286{
6287 switch (func_id) {
6288 case BPF_FUNC_setsockopt:
6289 return &bpf_setsockopt_proto;
6290 case BPF_FUNC_getsockopt:
6291 return &bpf_getsockopt_proto;
6292 case BPF_FUNC_sock_ops_cb_flags_set:
6293 return &bpf_sock_ops_cb_flags_set_proto;
6294 case BPF_FUNC_sock_map_update:
6295 return &bpf_sock_map_update_proto;
6296 case BPF_FUNC_sock_hash_update:
6297 return &bpf_sock_hash_update_proto;
6298 case BPF_FUNC_get_socket_cookie:
6299 return &bpf_get_socket_cookie_sock_ops_proto;
6300 case BPF_FUNC_get_local_storage:
6301 return &bpf_get_local_storage_proto;
6302 case BPF_FUNC_perf_event_output:
6303 return &bpf_sockopt_event_output_proto;
6304 case BPF_FUNC_sk_storage_get:
6305 return &bpf_sk_storage_get_proto;
6306 case BPF_FUNC_sk_storage_delete:
6307 return &bpf_sk_storage_delete_proto;
6308#ifdef CONFIG_INET
6309 case BPF_FUNC_tcp_sock:
6310 return &bpf_tcp_sock_proto;
6311#endif
6312 default:
6313 return bpf_base_func_proto(func_id);
6314 }
6315}
6316
6317const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
6318const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
6319
6320static const struct bpf_func_proto *
6321sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6322{
6323 switch (func_id) {
6324 case BPF_FUNC_msg_redirect_map:
6325 return &bpf_msg_redirect_map_proto;
6326 case BPF_FUNC_msg_redirect_hash:
6327 return &bpf_msg_redirect_hash_proto;
6328 case BPF_FUNC_msg_apply_bytes:
6329 return &bpf_msg_apply_bytes_proto;
6330 case BPF_FUNC_msg_cork_bytes:
6331 return &bpf_msg_cork_bytes_proto;
6332 case BPF_FUNC_msg_pull_data:
6333 return &bpf_msg_pull_data_proto;
6334 case BPF_FUNC_msg_push_data:
6335 return &bpf_msg_push_data_proto;
6336 case BPF_FUNC_msg_pop_data:
6337 return &bpf_msg_pop_data_proto;
6338 default:
6339 return bpf_base_func_proto(func_id);
6340 }
6341}
6342
6343const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
6344const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
6345
6346static const struct bpf_func_proto *
6347sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6348{
6349 switch (func_id) {
6350 case BPF_FUNC_skb_store_bytes:
6351 return &bpf_skb_store_bytes_proto;
6352 case BPF_FUNC_skb_load_bytes:
6353 return &bpf_skb_load_bytes_proto;
6354 case BPF_FUNC_skb_pull_data:
6355 return &sk_skb_pull_data_proto;
6356 case BPF_FUNC_skb_change_tail:
6357 return &sk_skb_change_tail_proto;
6358 case BPF_FUNC_skb_change_head:
6359 return &sk_skb_change_head_proto;
6360 case BPF_FUNC_get_socket_cookie:
6361 return &bpf_get_socket_cookie_proto;
6362 case BPF_FUNC_get_socket_uid:
6363 return &bpf_get_socket_uid_proto;
6364 case BPF_FUNC_sk_redirect_map:
6365 return &bpf_sk_redirect_map_proto;
6366 case BPF_FUNC_sk_redirect_hash:
6367 return &bpf_sk_redirect_hash_proto;
6368 case BPF_FUNC_perf_event_output:
6369 return &bpf_skb_event_output_proto;
6370#ifdef CONFIG_INET
6371 case BPF_FUNC_sk_lookup_tcp:
6372 return &bpf_sk_lookup_tcp_proto;
6373 case BPF_FUNC_sk_lookup_udp:
6374 return &bpf_sk_lookup_udp_proto;
6375 case BPF_FUNC_sk_release:
6376 return &bpf_sk_release_proto;
6377 case BPF_FUNC_skc_lookup_tcp:
6378 return &bpf_skc_lookup_tcp_proto;
6379#endif
6380 default:
6381 return bpf_base_func_proto(func_id);
6382 }
6383}
6384
6385static const struct bpf_func_proto *
6386flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6387{
6388 switch (func_id) {
6389 case BPF_FUNC_skb_load_bytes:
6390 return &bpf_flow_dissector_load_bytes_proto;
6391 default:
6392 return bpf_base_func_proto(func_id);
6393 }
6394}
6395
6396static const struct bpf_func_proto *
6397lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6398{
6399 switch (func_id) {
6400 case BPF_FUNC_skb_load_bytes:
6401 return &bpf_skb_load_bytes_proto;
6402 case BPF_FUNC_skb_pull_data:
6403 return &bpf_skb_pull_data_proto;
6404 case BPF_FUNC_csum_diff:
6405 return &bpf_csum_diff_proto;
6406 case BPF_FUNC_get_cgroup_classid:
6407 return &bpf_get_cgroup_classid_proto;
6408 case BPF_FUNC_get_route_realm:
6409 return &bpf_get_route_realm_proto;
6410 case BPF_FUNC_get_hash_recalc:
6411 return &bpf_get_hash_recalc_proto;
6412 case BPF_FUNC_perf_event_output:
6413 return &bpf_skb_event_output_proto;
6414 case BPF_FUNC_get_smp_processor_id:
6415 return &bpf_get_smp_processor_id_proto;
6416 case BPF_FUNC_skb_under_cgroup:
6417 return &bpf_skb_under_cgroup_proto;
6418 default:
6419 return bpf_base_func_proto(func_id);
6420 }
6421}
6422
6423static const struct bpf_func_proto *
6424lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6425{
6426 switch (func_id) {
6427 case BPF_FUNC_lwt_push_encap:
6428 return &bpf_lwt_in_push_encap_proto;
6429 default:
6430 return lwt_out_func_proto(func_id, prog);
6431 }
6432}
6433
6434static const struct bpf_func_proto *
6435lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6436{
6437 switch (func_id) {
6438 case BPF_FUNC_skb_get_tunnel_key:
6439 return &bpf_skb_get_tunnel_key_proto;
6440 case BPF_FUNC_skb_set_tunnel_key:
6441 return bpf_get_skb_set_tunnel_proto(func_id);
6442 case BPF_FUNC_skb_get_tunnel_opt:
6443 return &bpf_skb_get_tunnel_opt_proto;
6444 case BPF_FUNC_skb_set_tunnel_opt:
6445 return bpf_get_skb_set_tunnel_proto(func_id);
6446 case BPF_FUNC_redirect:
6447 return &bpf_redirect_proto;
6448 case BPF_FUNC_clone_redirect:
6449 return &bpf_clone_redirect_proto;
6450 case BPF_FUNC_skb_change_tail:
6451 return &bpf_skb_change_tail_proto;
6452 case BPF_FUNC_skb_change_head:
6453 return &bpf_skb_change_head_proto;
6454 case BPF_FUNC_skb_store_bytes:
6455 return &bpf_skb_store_bytes_proto;
6456 case BPF_FUNC_csum_update:
6457 return &bpf_csum_update_proto;
6458 case BPF_FUNC_l3_csum_replace:
6459 return &bpf_l3_csum_replace_proto;
6460 case BPF_FUNC_l4_csum_replace:
6461 return &bpf_l4_csum_replace_proto;
6462 case BPF_FUNC_set_hash_invalid:
6463 return &bpf_set_hash_invalid_proto;
6464 case BPF_FUNC_lwt_push_encap:
6465 return &bpf_lwt_xmit_push_encap_proto;
6466 default:
6467 return lwt_out_func_proto(func_id, prog);
6468 }
6469}
6470
6471static const struct bpf_func_proto *
6472lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6473{
6474 switch (func_id) {
6475#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
6476 case BPF_FUNC_lwt_seg6_store_bytes:
6477 return &bpf_lwt_seg6_store_bytes_proto;
6478 case BPF_FUNC_lwt_seg6_action:
6479 return &bpf_lwt_seg6_action_proto;
6480 case BPF_FUNC_lwt_seg6_adjust_srh:
6481 return &bpf_lwt_seg6_adjust_srh_proto;
6482#endif
6483 default:
6484 return lwt_out_func_proto(func_id, prog);
6485 }
6486}
6487
6488static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
6489 const struct bpf_prog *prog,
6490 struct bpf_insn_access_aux *info)
6491{
6492 const int size_default = sizeof(__u32);
6493
6494 if (off < 0 || off >= sizeof(struct __sk_buff))
6495 return false;
6496
6497
6498 if (off % size != 0)
6499 return false;
6500
6501 switch (off) {
6502 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6503 if (off + size > offsetofend(struct __sk_buff, cb[4]))
6504 return false;
6505 break;
6506 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
6507 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
6508 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
6509 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
6510 case bpf_ctx_range(struct __sk_buff, data):
6511 case bpf_ctx_range(struct __sk_buff, data_meta):
6512 case bpf_ctx_range(struct __sk_buff, data_end):
6513 if (size != size_default)
6514 return false;
6515 break;
6516 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
6517 return false;
6518 case bpf_ctx_range(struct __sk_buff, tstamp):
6519 if (size != sizeof(__u64))
6520 return false;
6521 break;
6522 case offsetof(struct __sk_buff, sk):
6523 if (type == BPF_WRITE || size != sizeof(__u64))
6524 return false;
6525 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
6526 break;
6527 default:
6528
6529 if (type == BPF_WRITE) {
6530 if (size != size_default)
6531 return false;
6532 } else {
6533 bpf_ctx_record_field_size(info, size_default);
6534 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
6535 return false;
6536 }
6537 }
6538
6539 return true;
6540}
6541
6542static bool sk_filter_is_valid_access(int off, int size,
6543 enum bpf_access_type type,
6544 const struct bpf_prog *prog,
6545 struct bpf_insn_access_aux *info)
6546{
6547 switch (off) {
6548 case bpf_ctx_range(struct __sk_buff, tc_classid):
6549 case bpf_ctx_range(struct __sk_buff, data):
6550 case bpf_ctx_range(struct __sk_buff, data_meta):
6551 case bpf_ctx_range(struct __sk_buff, data_end):
6552 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6553 case bpf_ctx_range(struct __sk_buff, tstamp):
6554 case bpf_ctx_range(struct __sk_buff, wire_len):
6555 return false;
6556 }
6557
6558 if (type == BPF_WRITE) {
6559 switch (off) {
6560 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6561 break;
6562 default:
6563 return false;
6564 }
6565 }
6566
6567 return bpf_skb_is_valid_access(off, size, type, prog, info);
6568}
6569
6570static bool cg_skb_is_valid_access(int off, int size,
6571 enum bpf_access_type type,
6572 const struct bpf_prog *prog,
6573 struct bpf_insn_access_aux *info)
6574{
6575 switch (off) {
6576 case bpf_ctx_range(struct __sk_buff, tc_classid):
6577 case bpf_ctx_range(struct __sk_buff, data_meta):
6578 case bpf_ctx_range(struct __sk_buff, wire_len):
6579 return false;
6580 case bpf_ctx_range(struct __sk_buff, data):
6581 case bpf_ctx_range(struct __sk_buff, data_end):
6582 if (!capable(CAP_SYS_ADMIN))
6583 return false;
6584 break;
6585 }
6586
6587 if (type == BPF_WRITE) {
6588 switch (off) {
6589 case bpf_ctx_range(struct __sk_buff, mark):
6590 case bpf_ctx_range(struct __sk_buff, priority):
6591 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6592 break;
6593 case bpf_ctx_range(struct __sk_buff, tstamp):
6594 if (!capable(CAP_SYS_ADMIN))
6595 return false;
6596 break;
6597 default:
6598 return false;
6599 }
6600 }
6601
6602 switch (off) {
6603 case bpf_ctx_range(struct __sk_buff, data):
6604 info->reg_type = PTR_TO_PACKET;
6605 break;
6606 case bpf_ctx_range(struct __sk_buff, data_end):
6607 info->reg_type = PTR_TO_PACKET_END;
6608 break;
6609 }
6610
6611 return bpf_skb_is_valid_access(off, size, type, prog, info);
6612}
6613
6614static bool lwt_is_valid_access(int off, int size,
6615 enum bpf_access_type type,
6616 const struct bpf_prog *prog,
6617 struct bpf_insn_access_aux *info)
6618{
6619 switch (off) {
6620 case bpf_ctx_range(struct __sk_buff, tc_classid):
6621 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6622 case bpf_ctx_range(struct __sk_buff, data_meta):
6623 case bpf_ctx_range(struct __sk_buff, tstamp):
6624 case bpf_ctx_range(struct __sk_buff, wire_len):
6625 return false;
6626 }
6627
6628 if (type == BPF_WRITE) {
6629 switch (off) {
6630 case bpf_ctx_range(struct __sk_buff, mark):
6631 case bpf_ctx_range(struct __sk_buff, priority):
6632 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6633 break;
6634 default:
6635 return false;
6636 }
6637 }
6638
6639 switch (off) {
6640 case bpf_ctx_range(struct __sk_buff, data):
6641 info->reg_type = PTR_TO_PACKET;
6642 break;
6643 case bpf_ctx_range(struct __sk_buff, data_end):
6644 info->reg_type = PTR_TO_PACKET_END;
6645 break;
6646 }
6647
6648 return bpf_skb_is_valid_access(off, size, type, prog, info);
6649}
6650
6651
6652static bool __sock_filter_check_attach_type(int off,
6653 enum bpf_access_type access_type,
6654 enum bpf_attach_type attach_type)
6655{
6656 switch (off) {
6657 case offsetof(struct bpf_sock, bound_dev_if):
6658 case offsetof(struct bpf_sock, mark):
6659 case offsetof(struct bpf_sock, priority):
6660 switch (attach_type) {
6661 case BPF_CGROUP_INET_SOCK_CREATE:
6662 goto full_access;
6663 default:
6664 return false;
6665 }
6666 case bpf_ctx_range(struct bpf_sock, src_ip4):
6667 switch (attach_type) {
6668 case BPF_CGROUP_INET4_POST_BIND:
6669 goto read_only;
6670 default:
6671 return false;
6672 }
6673 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6674 switch (attach_type) {
6675 case BPF_CGROUP_INET6_POST_BIND:
6676 goto read_only;
6677 default:
6678 return false;
6679 }
6680 case bpf_ctx_range(struct bpf_sock, src_port):
6681 switch (attach_type) {
6682 case BPF_CGROUP_INET4_POST_BIND:
6683 case BPF_CGROUP_INET6_POST_BIND:
6684 goto read_only;
6685 default:
6686 return false;
6687 }
6688 }
6689read_only:
6690 return access_type == BPF_READ;
6691full_access:
6692 return true;
6693}
6694
6695bool bpf_sock_common_is_valid_access(int off, int size,
6696 enum bpf_access_type type,
6697 struct bpf_insn_access_aux *info)
6698{
6699 switch (off) {
6700 case bpf_ctx_range_till(struct bpf_sock, type, priority):
6701 return false;
6702 default:
6703 return bpf_sock_is_valid_access(off, size, type, info);
6704 }
6705}
6706
6707bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6708 struct bpf_insn_access_aux *info)
6709{
6710 const int size_default = sizeof(__u32);
6711
6712 if (off < 0 || off >= sizeof(struct bpf_sock))
6713 return false;
6714 if (off % size != 0)
6715 return false;
6716
6717 switch (off) {
6718 case offsetof(struct bpf_sock, state):
6719 case offsetof(struct bpf_sock, family):
6720 case offsetof(struct bpf_sock, type):
6721 case offsetof(struct bpf_sock, protocol):
6722 case offsetof(struct bpf_sock, dst_port):
6723 case offsetof(struct bpf_sock, src_port):
6724 case bpf_ctx_range(struct bpf_sock, src_ip4):
6725 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6726 case bpf_ctx_range(struct bpf_sock, dst_ip4):
6727 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
6728 bpf_ctx_record_field_size(info, size_default);
6729 return bpf_ctx_narrow_access_ok(off, size, size_default);
6730 }
6731
6732 return size == size_default;
6733}
6734
6735static bool sock_filter_is_valid_access(int off, int size,
6736 enum bpf_access_type type,
6737 const struct bpf_prog *prog,
6738 struct bpf_insn_access_aux *info)
6739{
6740 if (!bpf_sock_is_valid_access(off, size, type, info))
6741 return false;
6742 return __sock_filter_check_attach_type(off, type,
6743 prog->expected_attach_type);
6744}
6745
6746static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
6747 const struct bpf_prog *prog)
6748{
6749
6750
6751
6752 return 0;
6753}
6754
6755static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
6756 const struct bpf_prog *prog, int drop_verdict)
6757{
6758 struct bpf_insn *insn = insn_buf;
6759
6760 if (!direct_write)
6761 return 0;
6762
6763
6764
6765
6766
6767
6768
6769 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
6770 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
6771 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
6772
6773
6774 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
6775 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
6776 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6777 BPF_FUNC_skb_pull_data);
6778
6779
6780
6781
6782 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
6783 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
6784 *insn++ = BPF_EXIT_INSN();
6785
6786
6787 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
6788
6789 *insn++ = prog->insnsi[0];
6790
6791 return insn - insn_buf;
6792}
6793
6794static int bpf_gen_ld_abs(const struct bpf_insn *orig,
6795 struct bpf_insn *insn_buf)
6796{
6797 bool indirect = BPF_MODE(orig->code) == BPF_IND;
6798 struct bpf_insn *insn = insn_buf;
6799
6800
6801 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
6802 if (!indirect) {
6803 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
6804 } else {
6805 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
6806 if (orig->imm)
6807 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
6808 }
6809
6810 switch (BPF_SIZE(orig->code)) {
6811 case BPF_B:
6812 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
6813 break;
6814 case BPF_H:
6815 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
6816 break;
6817 case BPF_W:
6818 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
6819 break;
6820 }
6821
6822 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
6823 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
6824 *insn++ = BPF_EXIT_INSN();
6825
6826 return insn - insn_buf;
6827}
6828
6829static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
6830 const struct bpf_prog *prog)
6831{
6832 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
6833}
6834
6835static bool tc_cls_act_is_valid_access(int off, int size,
6836 enum bpf_access_type type,
6837 const struct bpf_prog *prog,
6838 struct bpf_insn_access_aux *info)
6839{
6840 if (type == BPF_WRITE) {
6841 switch (off) {
6842 case bpf_ctx_range(struct __sk_buff, mark):
6843 case bpf_ctx_range(struct __sk_buff, tc_index):
6844 case bpf_ctx_range(struct __sk_buff, priority):
6845 case bpf_ctx_range(struct __sk_buff, tc_classid):
6846 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6847 case bpf_ctx_range(struct __sk_buff, tstamp):
6848 case bpf_ctx_range(struct __sk_buff, queue_mapping):
6849 break;
6850 default:
6851 return false;
6852 }
6853 }
6854
6855 switch (off) {
6856 case bpf_ctx_range(struct __sk_buff, data):
6857 info->reg_type = PTR_TO_PACKET;
6858 break;
6859 case bpf_ctx_range(struct __sk_buff, data_meta):
6860 info->reg_type = PTR_TO_PACKET_META;
6861 break;
6862 case bpf_ctx_range(struct __sk_buff, data_end):
6863 info->reg_type = PTR_TO_PACKET_END;
6864 break;
6865 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6866 return false;
6867 }
6868
6869 return bpf_skb_is_valid_access(off, size, type, prog, info);
6870}
6871
6872static bool __is_valid_xdp_access(int off, int size)
6873{
6874 if (off < 0 || off >= sizeof(struct xdp_md))
6875 return false;
6876 if (off % size != 0)
6877 return false;
6878 if (size != sizeof(__u32))
6879 return false;
6880
6881 return true;
6882}
6883
6884static bool xdp_is_valid_access(int off, int size,
6885 enum bpf_access_type type,
6886 const struct bpf_prog *prog,
6887 struct bpf_insn_access_aux *info)
6888{
6889 if (type == BPF_WRITE) {
6890 if (bpf_prog_is_dev_bound(prog->aux)) {
6891 switch (off) {
6892 case offsetof(struct xdp_md, rx_queue_index):
6893 return __is_valid_xdp_access(off, size);
6894 }
6895 }
6896 return false;
6897 }
6898
6899 switch (off) {
6900 case offsetof(struct xdp_md, data):
6901 info->reg_type = PTR_TO_PACKET;
6902 break;
6903 case offsetof(struct xdp_md, data_meta):
6904 info->reg_type = PTR_TO_PACKET_META;
6905 break;
6906 case offsetof(struct xdp_md, data_end):
6907 info->reg_type = PTR_TO_PACKET_END;
6908 break;
6909 }
6910
6911 return __is_valid_xdp_access(off, size);
6912}
6913
6914void bpf_warn_invalid_xdp_action(u32 act)
6915{
6916 const u32 act_max = XDP_REDIRECT;
6917
6918 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
6919 act > act_max ? "Illegal" : "Driver unsupported",
6920 act);
6921}
6922EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
6923
6924static bool sock_addr_is_valid_access(int off, int size,
6925 enum bpf_access_type type,
6926 const struct bpf_prog *prog,
6927 struct bpf_insn_access_aux *info)
6928{
6929 const int size_default = sizeof(__u32);
6930
6931 if (off < 0 || off >= sizeof(struct bpf_sock_addr))
6932 return false;
6933 if (off % size != 0)
6934 return false;
6935
6936
6937
6938
6939 switch (off) {
6940 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6941 switch (prog->expected_attach_type) {
6942 case BPF_CGROUP_INET4_BIND:
6943 case BPF_CGROUP_INET4_CONNECT:
6944 case BPF_CGROUP_UDP4_SENDMSG:
6945 case BPF_CGROUP_UDP4_RECVMSG:
6946 break;
6947 default:
6948 return false;
6949 }
6950 break;
6951 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6952 switch (prog->expected_attach_type) {
6953 case BPF_CGROUP_INET6_BIND:
6954 case BPF_CGROUP_INET6_CONNECT:
6955 case BPF_CGROUP_UDP6_SENDMSG:
6956 case BPF_CGROUP_UDP6_RECVMSG:
6957 break;
6958 default:
6959 return false;
6960 }
6961 break;
6962 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6963 switch (prog->expected_attach_type) {
6964 case BPF_CGROUP_UDP4_SENDMSG:
6965 break;
6966 default:
6967 return false;
6968 }
6969 break;
6970 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6971 msg_src_ip6[3]):
6972 switch (prog->expected_attach_type) {
6973 case BPF_CGROUP_UDP6_SENDMSG:
6974 break;
6975 default:
6976 return false;
6977 }
6978 break;
6979 }
6980
6981 switch (off) {
6982 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6983 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6984 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6985 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6986 msg_src_ip6[3]):
6987 if (type == BPF_READ) {
6988 bpf_ctx_record_field_size(info, size_default);
6989
6990 if (bpf_ctx_wide_access_ok(off, size,
6991 struct bpf_sock_addr,
6992 user_ip6))
6993 return true;
6994
6995 if (bpf_ctx_wide_access_ok(off, size,
6996 struct bpf_sock_addr,
6997 msg_src_ip6))
6998 return true;
6999
7000 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
7001 return false;
7002 } else {
7003 if (bpf_ctx_wide_access_ok(off, size,
7004 struct bpf_sock_addr,
7005 user_ip6))
7006 return true;
7007
7008 if (bpf_ctx_wide_access_ok(off, size,
7009 struct bpf_sock_addr,
7010 msg_src_ip6))
7011 return true;
7012
7013 if (size != size_default)
7014 return false;
7015 }
7016 break;
7017 case bpf_ctx_range(struct bpf_sock_addr, user_port):
7018 if (size != size_default)
7019 return false;
7020 break;
7021 case offsetof(struct bpf_sock_addr, sk):
7022 if (type != BPF_READ)
7023 return false;
7024 if (size != sizeof(__u64))
7025 return false;
7026 info->reg_type = PTR_TO_SOCKET;
7027 break;
7028 default:
7029 if (type == BPF_READ) {
7030 if (size != size_default)
7031 return false;
7032 } else {
7033 return false;
7034 }
7035 }
7036
7037 return true;
7038}
7039
7040static bool sock_ops_is_valid_access(int off, int size,
7041 enum bpf_access_type type,
7042 const struct bpf_prog *prog,
7043 struct bpf_insn_access_aux *info)
7044{
7045 const int size_default = sizeof(__u32);
7046
7047 if (off < 0 || off >= sizeof(struct bpf_sock_ops))
7048 return false;
7049
7050
7051 if (off % size != 0)
7052 return false;
7053
7054 if (type == BPF_WRITE) {
7055 switch (off) {
7056 case offsetof(struct bpf_sock_ops, reply):
7057 case offsetof(struct bpf_sock_ops, sk_txhash):
7058 if (size != size_default)
7059 return false;
7060 break;
7061 default:
7062 return false;
7063 }
7064 } else {
7065 switch (off) {
7066 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
7067 bytes_acked):
7068 if (size != sizeof(__u64))
7069 return false;
7070 break;
7071 case offsetof(struct bpf_sock_ops, sk):
7072 if (size != sizeof(__u64))
7073 return false;
7074 info->reg_type = PTR_TO_SOCKET_OR_NULL;
7075 break;
7076 default:
7077 if (size != size_default)
7078 return false;
7079 break;
7080 }
7081 }
7082
7083 return true;
7084}
7085
7086static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
7087 const struct bpf_prog *prog)
7088{
7089 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
7090}
7091
7092static bool sk_skb_is_valid_access(int off, int size,
7093 enum bpf_access_type type,
7094 const struct bpf_prog *prog,
7095 struct bpf_insn_access_aux *info)
7096{
7097 switch (off) {
7098 case bpf_ctx_range(struct __sk_buff, tc_classid):
7099 case bpf_ctx_range(struct __sk_buff, data_meta):
7100 case bpf_ctx_range(struct __sk_buff, tstamp):
7101 case bpf_ctx_range(struct __sk_buff, wire_len):
7102 return false;
7103 }
7104
7105 if (type == BPF_WRITE) {
7106 switch (off) {
7107 case bpf_ctx_range(struct __sk_buff, tc_index):
7108 case bpf_ctx_range(struct __sk_buff, priority):
7109 break;
7110 default:
7111 return false;
7112 }
7113 }
7114
7115 switch (off) {
7116 case bpf_ctx_range(struct __sk_buff, mark):
7117 return false;
7118 case bpf_ctx_range(struct __sk_buff, data):
7119 info->reg_type = PTR_TO_PACKET;
7120 break;
7121 case bpf_ctx_range(struct __sk_buff, data_end):
7122 info->reg_type = PTR_TO_PACKET_END;
7123 break;
7124 }
7125
7126 return bpf_skb_is_valid_access(off, size, type, prog, info);
7127}
7128
7129static bool sk_msg_is_valid_access(int off, int size,
7130 enum bpf_access_type type,
7131 const struct bpf_prog *prog,
7132 struct bpf_insn_access_aux *info)
7133{
7134 if (type == BPF_WRITE)
7135 return false;
7136
7137 if (off % size != 0)
7138 return false;
7139
7140 switch (off) {
7141 case offsetof(struct sk_msg_md, data):
7142 info->reg_type = PTR_TO_PACKET;
7143 if (size != sizeof(__u64))
7144 return false;
7145 break;
7146 case offsetof(struct sk_msg_md, data_end):
7147 info->reg_type = PTR_TO_PACKET_END;
7148 if (size != sizeof(__u64))
7149 return false;
7150 break;
7151 case bpf_ctx_range(struct sk_msg_md, family):
7152 case bpf_ctx_range(struct sk_msg_md, remote_ip4):
7153 case bpf_ctx_range(struct sk_msg_md, local_ip4):
7154 case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
7155 case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
7156 case bpf_ctx_range(struct sk_msg_md, remote_port):
7157 case bpf_ctx_range(struct sk_msg_md, local_port):
7158 case bpf_ctx_range(struct sk_msg_md, size):
7159 if (size != sizeof(__u32))
7160 return false;
7161 break;
7162 default:
7163 return false;
7164 }
7165 return true;
7166}
7167
7168static bool flow_dissector_is_valid_access(int off, int size,
7169 enum bpf_access_type type,
7170 const struct bpf_prog *prog,
7171 struct bpf_insn_access_aux *info)
7172{
7173 const int size_default = sizeof(__u32);
7174
7175 if (off < 0 || off >= sizeof(struct __sk_buff))
7176 return false;
7177
7178 if (type == BPF_WRITE)
7179 return false;
7180
7181 switch (off) {
7182 case bpf_ctx_range(struct __sk_buff, data):
7183 if (size != size_default)
7184 return false;
7185 info->reg_type = PTR_TO_PACKET;
7186 return true;
7187 case bpf_ctx_range(struct __sk_buff, data_end):
7188 if (size != size_default)
7189 return false;
7190 info->reg_type = PTR_TO_PACKET_END;
7191 return true;
7192 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
7193 if (size != sizeof(__u64))
7194 return false;
7195 info->reg_type = PTR_TO_FLOW_KEYS;
7196 return true;
7197 default:
7198 return false;
7199 }
7200}
7201
7202static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
7203 const struct bpf_insn *si,
7204 struct bpf_insn *insn_buf,
7205 struct bpf_prog *prog,
7206 u32 *target_size)
7207
7208{
7209 struct bpf_insn *insn = insn_buf;
7210
7211 switch (si->off) {
7212 case offsetof(struct __sk_buff, data):
7213 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
7214 si->dst_reg, si->src_reg,
7215 offsetof(struct bpf_flow_dissector, data));
7216 break;
7217
7218 case offsetof(struct __sk_buff, data_end):
7219 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
7220 si->dst_reg, si->src_reg,
7221 offsetof(struct bpf_flow_dissector, data_end));
7222 break;
7223
7224 case offsetof(struct __sk_buff, flow_keys):
7225 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
7226 si->dst_reg, si->src_reg,
7227 offsetof(struct bpf_flow_dissector, flow_keys));
7228 break;
7229 }
7230
7231 return insn - insn_buf;
7232}
7233
7234static u32 bpf_convert_ctx_access(enum bpf_access_type type,
7235 const struct bpf_insn *si,
7236 struct bpf_insn *insn_buf,
7237 struct bpf_prog *prog, u32 *target_size)
7238{
7239 struct bpf_insn *insn = insn_buf;
7240 int off;
7241
7242 switch (si->off) {
7243 case offsetof(struct __sk_buff, len):
7244 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7245 bpf_target_off(struct sk_buff, len, 4,
7246 target_size));
7247 break;
7248
7249 case offsetof(struct __sk_buff, protocol):
7250 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7251 bpf_target_off(struct sk_buff, protocol, 2,
7252 target_size));
7253 break;
7254
7255 case offsetof(struct __sk_buff, vlan_proto):
7256 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7257 bpf_target_off(struct sk_buff, vlan_proto, 2,
7258 target_size));
7259 break;
7260
7261 case offsetof(struct __sk_buff, priority):
7262 if (type == BPF_WRITE)
7263 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7264 bpf_target_off(struct sk_buff, priority, 4,
7265 target_size));
7266 else
7267 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7268 bpf_target_off(struct sk_buff, priority, 4,
7269 target_size));
7270 break;
7271
7272 case offsetof(struct __sk_buff, ingress_ifindex):
7273 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7274 bpf_target_off(struct sk_buff, skb_iif, 4,
7275 target_size));
7276 break;
7277
7278 case offsetof(struct __sk_buff, ifindex):
7279 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
7280 si->dst_reg, si->src_reg,
7281 offsetof(struct sk_buff, dev));
7282 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
7283 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7284 bpf_target_off(struct net_device, ifindex, 4,
7285 target_size));
7286 break;
7287
7288 case offsetof(struct __sk_buff, hash):
7289 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7290 bpf_target_off(struct sk_buff, hash, 4,
7291 target_size));
7292 break;
7293
7294 case offsetof(struct __sk_buff, mark):
7295 if (type == BPF_WRITE)
7296 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7297 bpf_target_off(struct sk_buff, mark, 4,
7298 target_size));
7299 else
7300 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7301 bpf_target_off(struct sk_buff, mark, 4,
7302 target_size));
7303 break;
7304
7305 case offsetof(struct __sk_buff, pkt_type):
7306 *target_size = 1;
7307 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
7308 PKT_TYPE_OFFSET());
7309 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
7310#ifdef __BIG_ENDIAN_BITFIELD
7311 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
7312#endif
7313 break;
7314
7315 case offsetof(struct __sk_buff, queue_mapping):
7316 if (type == BPF_WRITE) {
7317 *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
7318 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
7319 bpf_target_off(struct sk_buff,
7320 queue_mapping,
7321 2, target_size));
7322 } else {
7323 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7324 bpf_target_off(struct sk_buff,
7325 queue_mapping,
7326 2, target_size));
7327 }
7328 break;
7329
7330 case offsetof(struct __sk_buff, vlan_present):
7331 *target_size = 1;
7332 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
7333 PKT_VLAN_PRESENT_OFFSET());
7334 if (PKT_VLAN_PRESENT_BIT)
7335 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
7336 if (PKT_VLAN_PRESENT_BIT < 7)
7337 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
7338 break;
7339
7340 case offsetof(struct __sk_buff, vlan_tci):
7341 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7342 bpf_target_off(struct sk_buff, vlan_tci, 2,
7343 target_size));
7344 break;
7345
7346 case offsetof(struct __sk_buff, cb[0]) ...
7347 offsetofend(struct __sk_buff, cb[4]) - 1:
7348 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20);
7349 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
7350 offsetof(struct qdisc_skb_cb, data)) %
7351 sizeof(__u64));
7352
7353 prog->cb_access = 1;
7354 off = si->off;
7355 off -= offsetof(struct __sk_buff, cb[0]);
7356 off += offsetof(struct sk_buff, cb);
7357 off += offsetof(struct qdisc_skb_cb, data);
7358 if (type == BPF_WRITE)
7359 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
7360 si->src_reg, off);
7361 else
7362 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
7363 si->src_reg, off);
7364 break;
7365
7366 case offsetof(struct __sk_buff, tc_classid):
7367 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2);
7368
7369 off = si->off;
7370 off -= offsetof(struct __sk_buff, tc_classid);
7371 off += offsetof(struct sk_buff, cb);
7372 off += offsetof(struct qdisc_skb_cb, tc_classid);
7373 *target_size = 2;
7374 if (type == BPF_WRITE)
7375 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
7376 si->src_reg, off);
7377 else
7378 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
7379 si->src_reg, off);
7380 break;
7381
7382 case offsetof(struct __sk_buff, data):
7383 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
7384 si->dst_reg, si->src_reg,
7385 offsetof(struct sk_buff, data));
7386 break;
7387
7388 case offsetof(struct __sk_buff, data_meta):
7389 off = si->off;
7390 off -= offsetof(struct __sk_buff, data_meta);
7391 off += offsetof(struct sk_buff, cb);
7392 off += offsetof(struct bpf_skb_data_end, data_meta);
7393 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7394 si->src_reg, off);
7395 break;
7396
7397 case offsetof(struct __sk_buff, data_end):
7398 off = si->off;
7399 off -= offsetof(struct __sk_buff, data_end);
7400 off += offsetof(struct sk_buff, cb);
7401 off += offsetof(struct bpf_skb_data_end, data_end);
7402 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7403 si->src_reg, off);
7404 break;
7405
7406 case offsetof(struct __sk_buff, tc_index):
7407#ifdef CONFIG_NET_SCHED
7408 if (type == BPF_WRITE)
7409 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
7410 bpf_target_off(struct sk_buff, tc_index, 2,
7411 target_size));
7412 else
7413 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7414 bpf_target_off(struct sk_buff, tc_index, 2,
7415 target_size));
7416#else
7417 *target_size = 2;
7418 if (type == BPF_WRITE)
7419 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
7420 else
7421 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7422#endif
7423 break;
7424
7425 case offsetof(struct __sk_buff, napi_id):
7426#if defined(CONFIG_NET_RX_BUSY_POLL)
7427 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7428 bpf_target_off(struct sk_buff, napi_id, 4,
7429 target_size));
7430 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
7431 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7432#else
7433 *target_size = 4;
7434 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7435#endif
7436 break;
7437 case offsetof(struct __sk_buff, family):
7438 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
7439
7440 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7441 si->dst_reg, si->src_reg,
7442 offsetof(struct sk_buff, sk));
7443 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7444 bpf_target_off(struct sock_common,
7445 skc_family,
7446 2, target_size));
7447 break;
7448 case offsetof(struct __sk_buff, remote_ip4):
7449 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
7450
7451 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7452 si->dst_reg, si->src_reg,
7453 offsetof(struct sk_buff, sk));
7454 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7455 bpf_target_off(struct sock_common,
7456 skc_daddr,
7457 4, target_size));
7458 break;
7459 case offsetof(struct __sk_buff, local_ip4):
7460 BUILD_BUG_ON(sizeof_field(struct sock_common,
7461 skc_rcv_saddr) != 4);
7462
7463 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7464 si->dst_reg, si->src_reg,
7465 offsetof(struct sk_buff, sk));
7466 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7467 bpf_target_off(struct sock_common,
7468 skc_rcv_saddr,
7469 4, target_size));
7470 break;
7471 case offsetof(struct __sk_buff, remote_ip6[0]) ...
7472 offsetof(struct __sk_buff, remote_ip6[3]):
7473#if IS_ENABLED(CONFIG_IPV6)
7474 BUILD_BUG_ON(sizeof_field(struct sock_common,
7475 skc_v6_daddr.s6_addr32[0]) != 4);
7476
7477 off = si->off;
7478 off -= offsetof(struct __sk_buff, remote_ip6[0]);
7479
7480 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7481 si->dst_reg, si->src_reg,
7482 offsetof(struct sk_buff, sk));
7483 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7484 offsetof(struct sock_common,
7485 skc_v6_daddr.s6_addr32[0]) +
7486 off);
7487#else
7488 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7489#endif
7490 break;
7491 case offsetof(struct __sk_buff, local_ip6[0]) ...
7492 offsetof(struct __sk_buff, local_ip6[3]):
7493#if IS_ENABLED(CONFIG_IPV6)
7494 BUILD_BUG_ON(sizeof_field(struct sock_common,
7495 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
7496
7497 off = si->off;
7498 off -= offsetof(struct __sk_buff, local_ip6[0]);
7499
7500 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7501 si->dst_reg, si->src_reg,
7502 offsetof(struct sk_buff, sk));
7503 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7504 offsetof(struct sock_common,
7505 skc_v6_rcv_saddr.s6_addr32[0]) +
7506 off);
7507#else
7508 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7509#endif
7510 break;
7511
7512 case offsetof(struct __sk_buff, remote_port):
7513 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
7514
7515 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7516 si->dst_reg, si->src_reg,
7517 offsetof(struct sk_buff, sk));
7518 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7519 bpf_target_off(struct sock_common,
7520 skc_dport,
7521 2, target_size));
7522#ifndef __BIG_ENDIAN_BITFIELD
7523 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
7524#endif
7525 break;
7526
7527 case offsetof(struct __sk_buff, local_port):
7528 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
7529
7530 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7531 si->dst_reg, si->src_reg,
7532 offsetof(struct sk_buff, sk));
7533 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7534 bpf_target_off(struct sock_common,
7535 skc_num, 2, target_size));
7536 break;
7537
7538 case offsetof(struct __sk_buff, tstamp):
7539 BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8);
7540
7541 if (type == BPF_WRITE)
7542 *insn++ = BPF_STX_MEM(BPF_DW,
7543 si->dst_reg, si->src_reg,
7544 bpf_target_off(struct sk_buff,
7545 tstamp, 8,
7546 target_size));
7547 else
7548 *insn++ = BPF_LDX_MEM(BPF_DW,
7549 si->dst_reg, si->src_reg,
7550 bpf_target_off(struct sk_buff,
7551 tstamp, 8,
7552 target_size));
7553 break;
7554
7555 case offsetof(struct __sk_buff, gso_segs):
7556
7557#ifdef NET_SKBUFF_DATA_USES_OFFSET
7558 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7559 BPF_REG_AX, si->src_reg,
7560 offsetof(struct sk_buff, end));
7561 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
7562 si->dst_reg, si->src_reg,
7563 offsetof(struct sk_buff, head));
7564 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
7565#else
7566 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7567 si->dst_reg, si->src_reg,
7568 offsetof(struct sk_buff, end));
7569#endif
7570 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
7571 si->dst_reg, si->dst_reg,
7572 bpf_target_off(struct skb_shared_info,
7573 gso_segs, 2,
7574 target_size));
7575 break;
7576 case offsetof(struct __sk_buff, wire_len):
7577 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4);
7578
7579 off = si->off;
7580 off -= offsetof(struct __sk_buff, wire_len);
7581 off += offsetof(struct sk_buff, cb);
7582 off += offsetof(struct qdisc_skb_cb, pkt_len);
7583 *target_size = 4;
7584 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
7585 break;
7586
7587 case offsetof(struct __sk_buff, sk):
7588 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7589 si->dst_reg, si->src_reg,
7590 offsetof(struct sk_buff, sk));
7591 break;
7592 }
7593
7594 return insn - insn_buf;
7595}
7596
7597u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
7598 const struct bpf_insn *si,
7599 struct bpf_insn *insn_buf,
7600 struct bpf_prog *prog, u32 *target_size)
7601{
7602 struct bpf_insn *insn = insn_buf;
7603 int off;
7604
7605 switch (si->off) {
7606 case offsetof(struct bpf_sock, bound_dev_if):
7607 BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
7608
7609 if (type == BPF_WRITE)
7610 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7611 offsetof(struct sock, sk_bound_dev_if));
7612 else
7613 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7614 offsetof(struct sock, sk_bound_dev_if));
7615 break;
7616
7617 case offsetof(struct bpf_sock, mark):
7618 BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
7619
7620 if (type == BPF_WRITE)
7621 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7622 offsetof(struct sock, sk_mark));
7623 else
7624 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7625 offsetof(struct sock, sk_mark));
7626 break;
7627
7628 case offsetof(struct bpf_sock, priority):
7629 BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
7630
7631 if (type == BPF_WRITE)
7632 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7633 offsetof(struct sock, sk_priority));
7634 else
7635 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7636 offsetof(struct sock, sk_priority));
7637 break;
7638
7639 case offsetof(struct bpf_sock, family):
7640 *insn++ = BPF_LDX_MEM(
7641 BPF_FIELD_SIZEOF(struct sock_common, skc_family),
7642 si->dst_reg, si->src_reg,
7643 bpf_target_off(struct sock_common,
7644 skc_family,
7645 sizeof_field(struct sock_common,
7646 skc_family),
7647 target_size));
7648 break;
7649
7650 case offsetof(struct bpf_sock, type):
7651 BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);
7652 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7653 offsetof(struct sock, __sk_flags_offset));
7654 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7655 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
7656 *target_size = 2;
7657 break;
7658
7659 case offsetof(struct bpf_sock, protocol):
7660 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
7661 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7662 offsetof(struct sock, __sk_flags_offset));
7663 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7664 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
7665 *target_size = 1;
7666 break;
7667
7668 case offsetof(struct bpf_sock, src_ip4):
7669 *insn++ = BPF_LDX_MEM(
7670 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7671 bpf_target_off(struct sock_common, skc_rcv_saddr,
7672 sizeof_field(struct sock_common,
7673 skc_rcv_saddr),
7674 target_size));
7675 break;
7676
7677 case offsetof(struct bpf_sock, dst_ip4):
7678 *insn++ = BPF_LDX_MEM(
7679 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7680 bpf_target_off(struct sock_common, skc_daddr,
7681 sizeof_field(struct sock_common,
7682 skc_daddr),
7683 target_size));
7684 break;
7685
7686 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7687#if IS_ENABLED(CONFIG_IPV6)
7688 off = si->off;
7689 off -= offsetof(struct bpf_sock, src_ip6[0]);
7690 *insn++ = BPF_LDX_MEM(
7691 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7692 bpf_target_off(
7693 struct sock_common,
7694 skc_v6_rcv_saddr.s6_addr32[0],
7695 sizeof_field(struct sock_common,
7696 skc_v6_rcv_saddr.s6_addr32[0]),
7697 target_size) + off);
7698#else
7699 (void)off;
7700 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7701#endif
7702 break;
7703
7704 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
7705#if IS_ENABLED(CONFIG_IPV6)
7706 off = si->off;
7707 off -= offsetof(struct bpf_sock, dst_ip6[0]);
7708 *insn++ = BPF_LDX_MEM(
7709 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7710 bpf_target_off(struct sock_common,
7711 skc_v6_daddr.s6_addr32[0],
7712 sizeof_field(struct sock_common,
7713 skc_v6_daddr.s6_addr32[0]),
7714 target_size) + off);
7715#else
7716 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7717 *target_size = 4;
7718#endif
7719 break;
7720
7721 case offsetof(struct bpf_sock, src_port):
7722 *insn++ = BPF_LDX_MEM(
7723 BPF_FIELD_SIZEOF(struct sock_common, skc_num),
7724 si->dst_reg, si->src_reg,
7725 bpf_target_off(struct sock_common, skc_num,
7726 sizeof_field(struct sock_common,
7727 skc_num),
7728 target_size));
7729 break;
7730
7731 case offsetof(struct bpf_sock, dst_port):
7732 *insn++ = BPF_LDX_MEM(
7733 BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
7734 si->dst_reg, si->src_reg,
7735 bpf_target_off(struct sock_common, skc_dport,
7736 sizeof_field(struct sock_common,
7737 skc_dport),
7738 target_size));
7739 break;
7740
7741 case offsetof(struct bpf_sock, state):
7742 *insn++ = BPF_LDX_MEM(
7743 BPF_FIELD_SIZEOF(struct sock_common, skc_state),
7744 si->dst_reg, si->src_reg,
7745 bpf_target_off(struct sock_common, skc_state,
7746 sizeof_field(struct sock_common,
7747 skc_state),
7748 target_size));
7749 break;
7750 }
7751
7752 return insn - insn_buf;
7753}
7754
7755static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
7756 const struct bpf_insn *si,
7757 struct bpf_insn *insn_buf,
7758 struct bpf_prog *prog, u32 *target_size)
7759{
7760 struct bpf_insn *insn = insn_buf;
7761
7762 switch (si->off) {
7763 case offsetof(struct __sk_buff, ifindex):
7764 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
7765 si->dst_reg, si->src_reg,
7766 offsetof(struct sk_buff, dev));
7767 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7768 bpf_target_off(struct net_device, ifindex, 4,
7769 target_size));
7770 break;
7771 default:
7772 return bpf_convert_ctx_access(type, si, insn_buf, prog,
7773 target_size);
7774 }
7775
7776 return insn - insn_buf;
7777}
7778
7779static u32 xdp_convert_ctx_access(enum bpf_access_type type,
7780 const struct bpf_insn *si,
7781 struct bpf_insn *insn_buf,
7782 struct bpf_prog *prog, u32 *target_size)
7783{
7784 struct bpf_insn *insn = insn_buf;
7785
7786 switch (si->off) {
7787 case offsetof(struct xdp_md, data):
7788 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
7789 si->dst_reg, si->src_reg,
7790 offsetof(struct xdp_buff, data));
7791 break;
7792 case offsetof(struct xdp_md, data_meta):
7793 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
7794 si->dst_reg, si->src_reg,
7795 offsetof(struct xdp_buff, data_meta));
7796 break;
7797 case offsetof(struct xdp_md, data_end):
7798 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
7799 si->dst_reg, si->src_reg,
7800 offsetof(struct xdp_buff, data_end));
7801 break;
7802 case offsetof(struct xdp_md, ingress_ifindex):
7803 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7804 si->dst_reg, si->src_reg,
7805 offsetof(struct xdp_buff, rxq));
7806 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
7807 si->dst_reg, si->dst_reg,
7808 offsetof(struct xdp_rxq_info, dev));
7809 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7810 offsetof(struct net_device, ifindex));
7811 break;
7812 case offsetof(struct xdp_md, rx_queue_index):
7813 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7814 si->dst_reg, si->src_reg,
7815 offsetof(struct xdp_buff, rxq));
7816 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7817 offsetof(struct xdp_rxq_info,
7818 queue_index));
7819 break;
7820 }
7821
7822 return insn - insn_buf;
7823}
7824
7825
7826
7827
7828
7829
7830
7831
7832
7833
7834
7835#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
7836 do { \
7837 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
7838 si->src_reg, offsetof(S, F)); \
7839 *insn++ = BPF_LDX_MEM( \
7840 SIZE, si->dst_reg, si->dst_reg, \
7841 bpf_target_off(NS, NF, sizeof_field(NS, NF), \
7842 target_size) \
7843 + OFF); \
7844 } while (0)
7845
7846#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
7847 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
7848 BPF_FIELD_SIZEOF(NS, NF), 0)
7849
7850
7851
7852
7853
7854
7855
7856
7857
7858
7859
7860#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \
7861 do { \
7862 int tmp_reg = BPF_REG_9; \
7863 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
7864 --tmp_reg; \
7865 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
7866 --tmp_reg; \
7867 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
7868 offsetof(S, TF)); \
7869 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
7870 si->dst_reg, offsetof(S, F)); \
7871 *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \
7872 bpf_target_off(NS, NF, sizeof_field(NS, NF), \
7873 target_size) \
7874 + OFF); \
7875 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
7876 offsetof(S, TF)); \
7877 } while (0)
7878
7879#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
7880 TF) \
7881 do { \
7882 if (type == BPF_WRITE) { \
7883 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \
7884 OFF, TF); \
7885 } else { \
7886 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
7887 S, NS, F, NF, SIZE, OFF); \
7888 } \
7889 } while (0)
7890
7891#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
7892 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
7893 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
7894
7895static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
7896 const struct bpf_insn *si,
7897 struct bpf_insn *insn_buf,
7898 struct bpf_prog *prog, u32 *target_size)
7899{
7900 struct bpf_insn *insn = insn_buf;
7901 int off;
7902
7903 switch (si->off) {
7904 case offsetof(struct bpf_sock_addr, user_family):
7905 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7906 struct sockaddr, uaddr, sa_family);
7907 break;
7908
7909 case offsetof(struct bpf_sock_addr, user_ip4):
7910 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7911 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
7912 sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
7913 break;
7914
7915 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
7916 off = si->off;
7917 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
7918 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7919 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
7920 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
7921 tmp_reg);
7922 break;
7923
7924 case offsetof(struct bpf_sock_addr, user_port):
7925
7926
7927
7928
7929
7930
7931
7932 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
7933 offsetof(struct sockaddr_in6, sin6_port));
7934 BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) !=
7935 sizeof_field(struct sockaddr_in6, sin6_port));
7936 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
7937 struct sockaddr_in6, uaddr,
7938 sin6_port, tmp_reg);
7939 break;
7940
7941 case offsetof(struct bpf_sock_addr, family):
7942 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7943 struct sock, sk, sk_family);
7944 break;
7945
7946 case offsetof(struct bpf_sock_addr, type):
7947 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7948 struct bpf_sock_addr_kern, struct sock, sk,
7949 __sk_flags_offset, BPF_W, 0);
7950 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7951 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
7952 break;
7953
7954 case offsetof(struct bpf_sock_addr, protocol):
7955 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7956 struct bpf_sock_addr_kern, struct sock, sk,
7957 __sk_flags_offset, BPF_W, 0);
7958 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7959 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
7960 SK_FL_PROTO_SHIFT);
7961 break;
7962
7963 case offsetof(struct bpf_sock_addr, msg_src_ip4):
7964
7965 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7966 struct bpf_sock_addr_kern, struct in_addr, t_ctx,
7967 s_addr, BPF_SIZE(si->code), 0, tmp_reg);
7968 break;
7969
7970 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
7971 msg_src_ip6[3]):
7972 off = si->off;
7973 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
7974
7975 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7976 struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
7977 s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
7978 break;
7979 case offsetof(struct bpf_sock_addr, sk):
7980 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
7981 si->dst_reg, si->src_reg,
7982 offsetof(struct bpf_sock_addr_kern, sk));
7983 break;
7984 }
7985
7986 return insn - insn_buf;
7987}
7988
7989static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
7990 const struct bpf_insn *si,
7991 struct bpf_insn *insn_buf,
7992 struct bpf_prog *prog,
7993 u32 *target_size)
7994{
7995 struct bpf_insn *insn = insn_buf;
7996 int off;
7997
7998
7999#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
8000 do { \
8001 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
8002 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
8003 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
8004 struct bpf_sock_ops_kern, \
8005 is_fullsock), \
8006 si->dst_reg, si->src_reg, \
8007 offsetof(struct bpf_sock_ops_kern, \
8008 is_fullsock)); \
8009 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
8010 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
8011 struct bpf_sock_ops_kern, sk),\
8012 si->dst_reg, si->src_reg, \
8013 offsetof(struct bpf_sock_ops_kern, sk));\
8014 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
8015 OBJ_FIELD), \
8016 si->dst_reg, si->dst_reg, \
8017 offsetof(OBJ, OBJ_FIELD)); \
8018 } while (0)
8019
8020#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
8021 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
8022
8023
8024
8025
8026
8027
8028
8029
8030
8031
8032#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
8033 do { \
8034 int reg = BPF_REG_9; \
8035 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
8036 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
8037 if (si->dst_reg == reg || si->src_reg == reg) \
8038 reg--; \
8039 if (si->dst_reg == reg || si->src_reg == reg) \
8040 reg--; \
8041 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
8042 offsetof(struct bpf_sock_ops_kern, \
8043 temp)); \
8044 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
8045 struct bpf_sock_ops_kern, \
8046 is_fullsock), \
8047 reg, si->dst_reg, \
8048 offsetof(struct bpf_sock_ops_kern, \
8049 is_fullsock)); \
8050 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
8051 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
8052 struct bpf_sock_ops_kern, sk),\
8053 reg, si->dst_reg, \
8054 offsetof(struct bpf_sock_ops_kern, sk));\
8055 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
8056 reg, si->src_reg, \
8057 offsetof(OBJ, OBJ_FIELD)); \
8058 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
8059 offsetof(struct bpf_sock_ops_kern, \
8060 temp)); \
8061 } while (0)
8062
8063#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
8064 do { \
8065 if (TYPE == BPF_WRITE) \
8066 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
8067 else \
8068 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
8069 } while (0)
8070
8071 if (insn > insn_buf)
8072 return insn - insn_buf;
8073
8074 switch (si->off) {
8075 case offsetof(struct bpf_sock_ops, op) ...
8076 offsetof(struct bpf_sock_ops, replylong[3]):
8077 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, op) !=
8078 sizeof_field(struct bpf_sock_ops_kern, op));
8079 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) !=
8080 sizeof_field(struct bpf_sock_ops_kern, reply));
8081 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) !=
8082 sizeof_field(struct bpf_sock_ops_kern, replylong));
8083 off = si->off;
8084 off -= offsetof(struct bpf_sock_ops, op);
8085 off += offsetof(struct bpf_sock_ops_kern, op);
8086 if (type == BPF_WRITE)
8087 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8088 off);
8089 else
8090 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8091 off);
8092 break;
8093
8094 case offsetof(struct bpf_sock_ops, family):
8095 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
8096
8097 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8098 struct bpf_sock_ops_kern, sk),
8099 si->dst_reg, si->src_reg,
8100 offsetof(struct bpf_sock_ops_kern, sk));
8101 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8102 offsetof(struct sock_common, skc_family));
8103 break;
8104
8105 case offsetof(struct bpf_sock_ops, remote_ip4):
8106 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
8107
8108 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8109 struct bpf_sock_ops_kern, sk),
8110 si->dst_reg, si->src_reg,
8111 offsetof(struct bpf_sock_ops_kern, sk));
8112 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8113 offsetof(struct sock_common, skc_daddr));
8114 break;
8115
8116 case offsetof(struct bpf_sock_ops, local_ip4):
8117 BUILD_BUG_ON(sizeof_field(struct sock_common,
8118 skc_rcv_saddr) != 4);
8119
8120 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8121 struct bpf_sock_ops_kern, sk),
8122 si->dst_reg, si->src_reg,
8123 offsetof(struct bpf_sock_ops_kern, sk));
8124 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8125 offsetof(struct sock_common,
8126 skc_rcv_saddr));
8127 break;
8128
8129 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
8130 offsetof(struct bpf_sock_ops, remote_ip6[3]):
8131#if IS_ENABLED(CONFIG_IPV6)
8132 BUILD_BUG_ON(sizeof_field(struct sock_common,
8133 skc_v6_daddr.s6_addr32[0]) != 4);
8134
8135 off = si->off;
8136 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
8137 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8138 struct bpf_sock_ops_kern, sk),
8139 si->dst_reg, si->src_reg,
8140 offsetof(struct bpf_sock_ops_kern, sk));
8141 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8142 offsetof(struct sock_common,
8143 skc_v6_daddr.s6_addr32[0]) +
8144 off);
8145#else
8146 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8147#endif
8148 break;
8149
8150 case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
8151 offsetof(struct bpf_sock_ops, local_ip6[3]):
8152#if IS_ENABLED(CONFIG_IPV6)
8153 BUILD_BUG_ON(sizeof_field(struct sock_common,
8154 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8155
8156 off = si->off;
8157 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
8158 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8159 struct bpf_sock_ops_kern, sk),
8160 si->dst_reg, si->src_reg,
8161 offsetof(struct bpf_sock_ops_kern, sk));
8162 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8163 offsetof(struct sock_common,
8164 skc_v6_rcv_saddr.s6_addr32[0]) +
8165 off);
8166#else
8167 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8168#endif
8169 break;
8170
8171 case offsetof(struct bpf_sock_ops, remote_port):
8172 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
8173
8174 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8175 struct bpf_sock_ops_kern, sk),
8176 si->dst_reg, si->src_reg,
8177 offsetof(struct bpf_sock_ops_kern, sk));
8178 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8179 offsetof(struct sock_common, skc_dport));
8180#ifndef __BIG_ENDIAN_BITFIELD
8181 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8182#endif
8183 break;
8184
8185 case offsetof(struct bpf_sock_ops, local_port):
8186 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
8187
8188 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8189 struct bpf_sock_ops_kern, sk),
8190 si->dst_reg, si->src_reg,
8191 offsetof(struct bpf_sock_ops_kern, sk));
8192 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8193 offsetof(struct sock_common, skc_num));
8194 break;
8195
8196 case offsetof(struct bpf_sock_ops, is_fullsock):
8197 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8198 struct bpf_sock_ops_kern,
8199 is_fullsock),
8200 si->dst_reg, si->src_reg,
8201 offsetof(struct bpf_sock_ops_kern,
8202 is_fullsock));
8203 break;
8204
8205 case offsetof(struct bpf_sock_ops, state):
8206 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1);
8207
8208 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8209 struct bpf_sock_ops_kern, sk),
8210 si->dst_reg, si->src_reg,
8211 offsetof(struct bpf_sock_ops_kern, sk));
8212 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
8213 offsetof(struct sock_common, skc_state));
8214 break;
8215
8216 case offsetof(struct bpf_sock_ops, rtt_min):
8217 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
8218 sizeof(struct minmax));
8219 BUILD_BUG_ON(sizeof(struct minmax) <
8220 sizeof(struct minmax_sample));
8221
8222 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8223 struct bpf_sock_ops_kern, sk),
8224 si->dst_reg, si->src_reg,
8225 offsetof(struct bpf_sock_ops_kern, sk));
8226 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8227 offsetof(struct tcp_sock, rtt_min) +
8228 sizeof_field(struct minmax_sample, t));
8229 break;
8230
8231 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
8232 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
8233 struct tcp_sock);
8234 break;
8235
8236 case offsetof(struct bpf_sock_ops, sk_txhash):
8237 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
8238 struct sock, type);
8239 break;
8240 case offsetof(struct bpf_sock_ops, snd_cwnd):
8241 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd);
8242 break;
8243 case offsetof(struct bpf_sock_ops, srtt_us):
8244 SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us);
8245 break;
8246 case offsetof(struct bpf_sock_ops, snd_ssthresh):
8247 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh);
8248 break;
8249 case offsetof(struct bpf_sock_ops, rcv_nxt):
8250 SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt);
8251 break;
8252 case offsetof(struct bpf_sock_ops, snd_nxt):
8253 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt);
8254 break;
8255 case offsetof(struct bpf_sock_ops, snd_una):
8256 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una);
8257 break;
8258 case offsetof(struct bpf_sock_ops, mss_cache):
8259 SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache);
8260 break;
8261 case offsetof(struct bpf_sock_ops, ecn_flags):
8262 SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags);
8263 break;
8264 case offsetof(struct bpf_sock_ops, rate_delivered):
8265 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered);
8266 break;
8267 case offsetof(struct bpf_sock_ops, rate_interval_us):
8268 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us);
8269 break;
8270 case offsetof(struct bpf_sock_ops, packets_out):
8271 SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out);
8272 break;
8273 case offsetof(struct bpf_sock_ops, retrans_out):
8274 SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out);
8275 break;
8276 case offsetof(struct bpf_sock_ops, total_retrans):
8277 SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans);
8278 break;
8279 case offsetof(struct bpf_sock_ops, segs_in):
8280 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in);
8281 break;
8282 case offsetof(struct bpf_sock_ops, data_segs_in):
8283 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in);
8284 break;
8285 case offsetof(struct bpf_sock_ops, segs_out):
8286 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out);
8287 break;
8288 case offsetof(struct bpf_sock_ops, data_segs_out):
8289 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out);
8290 break;
8291 case offsetof(struct bpf_sock_ops, lost_out):
8292 SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out);
8293 break;
8294 case offsetof(struct bpf_sock_ops, sacked_out):
8295 SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out);
8296 break;
8297 case offsetof(struct bpf_sock_ops, bytes_received):
8298 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received);
8299 break;
8300 case offsetof(struct bpf_sock_ops, bytes_acked):
8301 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
8302 break;
8303 case offsetof(struct bpf_sock_ops, sk):
8304 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8305 struct bpf_sock_ops_kern,
8306 is_fullsock),
8307 si->dst_reg, si->src_reg,
8308 offsetof(struct bpf_sock_ops_kern,
8309 is_fullsock));
8310 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
8311 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8312 struct bpf_sock_ops_kern, sk),
8313 si->dst_reg, si->src_reg,
8314 offsetof(struct bpf_sock_ops_kern, sk));
8315 break;
8316 }
8317 return insn - insn_buf;
8318}
8319
8320static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
8321 const struct bpf_insn *si,
8322 struct bpf_insn *insn_buf,
8323 struct bpf_prog *prog, u32 *target_size)
8324{
8325 struct bpf_insn *insn = insn_buf;
8326 int off;
8327
8328 switch (si->off) {
8329 case offsetof(struct __sk_buff, data_end):
8330 off = si->off;
8331 off -= offsetof(struct __sk_buff, data_end);
8332 off += offsetof(struct sk_buff, cb);
8333 off += offsetof(struct tcp_skb_cb, bpf.data_end);
8334 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8335 si->src_reg, off);
8336 break;
8337 default:
8338 return bpf_convert_ctx_access(type, si, insn_buf, prog,
8339 target_size);
8340 }
8341
8342 return insn - insn_buf;
8343}
8344
8345static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
8346 const struct bpf_insn *si,
8347 struct bpf_insn *insn_buf,
8348 struct bpf_prog *prog, u32 *target_size)
8349{
8350 struct bpf_insn *insn = insn_buf;
8351#if IS_ENABLED(CONFIG_IPV6)
8352 int off;
8353#endif
8354
8355
8356 BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
8357
8358 switch (si->off) {
8359 case offsetof(struct sk_msg_md, data):
8360 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
8361 si->dst_reg, si->src_reg,
8362 offsetof(struct sk_msg, data));
8363 break;
8364 case offsetof(struct sk_msg_md, data_end):
8365 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
8366 si->dst_reg, si->src_reg,
8367 offsetof(struct sk_msg, data_end));
8368 break;
8369 case offsetof(struct sk_msg_md, family):
8370 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
8371
8372 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8373 struct sk_msg, sk),
8374 si->dst_reg, si->src_reg,
8375 offsetof(struct sk_msg, sk));
8376 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8377 offsetof(struct sock_common, skc_family));
8378 break;
8379
8380 case offsetof(struct sk_msg_md, remote_ip4):
8381 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
8382
8383 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8384 struct sk_msg, sk),
8385 si->dst_reg, si->src_reg,
8386 offsetof(struct sk_msg, sk));
8387 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8388 offsetof(struct sock_common, skc_daddr));
8389 break;
8390
8391 case offsetof(struct sk_msg_md, local_ip4):
8392 BUILD_BUG_ON(sizeof_field(struct sock_common,
8393 skc_rcv_saddr) != 4);
8394
8395 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8396 struct sk_msg, sk),
8397 si->dst_reg, si->src_reg,
8398 offsetof(struct sk_msg, sk));
8399 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8400 offsetof(struct sock_common,
8401 skc_rcv_saddr));
8402 break;
8403
8404 case offsetof(struct sk_msg_md, remote_ip6[0]) ...
8405 offsetof(struct sk_msg_md, remote_ip6[3]):
8406#if IS_ENABLED(CONFIG_IPV6)
8407 BUILD_BUG_ON(sizeof_field(struct sock_common,
8408 skc_v6_daddr.s6_addr32[0]) != 4);
8409
8410 off = si->off;
8411 off -= offsetof(struct sk_msg_md, remote_ip6[0]);
8412 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8413 struct sk_msg, sk),
8414 si->dst_reg, si->src_reg,
8415 offsetof(struct sk_msg, sk));
8416 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8417 offsetof(struct sock_common,
8418 skc_v6_daddr.s6_addr32[0]) +
8419 off);
8420#else
8421 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8422#endif
8423 break;
8424
8425 case offsetof(struct sk_msg_md, local_ip6[0]) ...
8426 offsetof(struct sk_msg_md, local_ip6[3]):
8427#if IS_ENABLED(CONFIG_IPV6)
8428 BUILD_BUG_ON(sizeof_field(struct sock_common,
8429 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8430
8431 off = si->off;
8432 off -= offsetof(struct sk_msg_md, local_ip6[0]);
8433 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8434 struct sk_msg, sk),
8435 si->dst_reg, si->src_reg,
8436 offsetof(struct sk_msg, sk));
8437 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8438 offsetof(struct sock_common,
8439 skc_v6_rcv_saddr.s6_addr32[0]) +
8440 off);
8441#else
8442 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8443#endif
8444 break;
8445
8446 case offsetof(struct sk_msg_md, remote_port):
8447 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
8448
8449 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8450 struct sk_msg, sk),
8451 si->dst_reg, si->src_reg,
8452 offsetof(struct sk_msg, sk));
8453 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8454 offsetof(struct sock_common, skc_dport));
8455#ifndef __BIG_ENDIAN_BITFIELD
8456 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8457#endif
8458 break;
8459
8460 case offsetof(struct sk_msg_md, local_port):
8461 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
8462
8463 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8464 struct sk_msg, sk),
8465 si->dst_reg, si->src_reg,
8466 offsetof(struct sk_msg, sk));
8467 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8468 offsetof(struct sock_common, skc_num));
8469 break;
8470
8471 case offsetof(struct sk_msg_md, size):
8472 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
8473 si->dst_reg, si->src_reg,
8474 offsetof(struct sk_msg_sg, size));
8475 break;
8476 }
8477
8478 return insn - insn_buf;
8479}
8480
8481const struct bpf_verifier_ops sk_filter_verifier_ops = {
8482 .get_func_proto = sk_filter_func_proto,
8483 .is_valid_access = sk_filter_is_valid_access,
8484 .convert_ctx_access = bpf_convert_ctx_access,
8485 .gen_ld_abs = bpf_gen_ld_abs,
8486};
8487
8488const struct bpf_prog_ops sk_filter_prog_ops = {
8489 .test_run = bpf_prog_test_run_skb,
8490};
8491
8492const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
8493 .get_func_proto = tc_cls_act_func_proto,
8494 .is_valid_access = tc_cls_act_is_valid_access,
8495 .convert_ctx_access = tc_cls_act_convert_ctx_access,
8496 .gen_prologue = tc_cls_act_prologue,
8497 .gen_ld_abs = bpf_gen_ld_abs,
8498};
8499
8500const struct bpf_prog_ops tc_cls_act_prog_ops = {
8501 .test_run = bpf_prog_test_run_skb,
8502};
8503
8504const struct bpf_verifier_ops xdp_verifier_ops = {
8505 .get_func_proto = xdp_func_proto,
8506 .is_valid_access = xdp_is_valid_access,
8507 .convert_ctx_access = xdp_convert_ctx_access,
8508 .gen_prologue = bpf_noop_prologue,
8509};
8510
8511const struct bpf_prog_ops xdp_prog_ops = {
8512 .test_run = bpf_prog_test_run_xdp,
8513};
8514
8515const struct bpf_verifier_ops cg_skb_verifier_ops = {
8516 .get_func_proto = cg_skb_func_proto,
8517 .is_valid_access = cg_skb_is_valid_access,
8518 .convert_ctx_access = bpf_convert_ctx_access,
8519};
8520
8521const struct bpf_prog_ops cg_skb_prog_ops = {
8522 .test_run = bpf_prog_test_run_skb,
8523};
8524
8525const struct bpf_verifier_ops lwt_in_verifier_ops = {
8526 .get_func_proto = lwt_in_func_proto,
8527 .is_valid_access = lwt_is_valid_access,
8528 .convert_ctx_access = bpf_convert_ctx_access,
8529};
8530
8531const struct bpf_prog_ops lwt_in_prog_ops = {
8532 .test_run = bpf_prog_test_run_skb,
8533};
8534
8535const struct bpf_verifier_ops lwt_out_verifier_ops = {
8536 .get_func_proto = lwt_out_func_proto,
8537 .is_valid_access = lwt_is_valid_access,
8538 .convert_ctx_access = bpf_convert_ctx_access,
8539};
8540
8541const struct bpf_prog_ops lwt_out_prog_ops = {
8542 .test_run = bpf_prog_test_run_skb,
8543};
8544
8545const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
8546 .get_func_proto = lwt_xmit_func_proto,
8547 .is_valid_access = lwt_is_valid_access,
8548 .convert_ctx_access = bpf_convert_ctx_access,
8549 .gen_prologue = tc_cls_act_prologue,
8550};
8551
8552const struct bpf_prog_ops lwt_xmit_prog_ops = {
8553 .test_run = bpf_prog_test_run_skb,
8554};
8555
8556const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
8557 .get_func_proto = lwt_seg6local_func_proto,
8558 .is_valid_access = lwt_is_valid_access,
8559 .convert_ctx_access = bpf_convert_ctx_access,
8560};
8561
8562const struct bpf_prog_ops lwt_seg6local_prog_ops = {
8563 .test_run = bpf_prog_test_run_skb,
8564};
8565
8566const struct bpf_verifier_ops cg_sock_verifier_ops = {
8567 .get_func_proto = sock_filter_func_proto,
8568 .is_valid_access = sock_filter_is_valid_access,
8569 .convert_ctx_access = bpf_sock_convert_ctx_access,
8570};
8571
8572const struct bpf_prog_ops cg_sock_prog_ops = {
8573};
8574
8575const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
8576 .get_func_proto = sock_addr_func_proto,
8577 .is_valid_access = sock_addr_is_valid_access,
8578 .convert_ctx_access = sock_addr_convert_ctx_access,
8579};
8580
8581const struct bpf_prog_ops cg_sock_addr_prog_ops = {
8582};
8583
8584const struct bpf_verifier_ops sock_ops_verifier_ops = {
8585 .get_func_proto = sock_ops_func_proto,
8586 .is_valid_access = sock_ops_is_valid_access,
8587 .convert_ctx_access = sock_ops_convert_ctx_access,
8588};
8589
8590const struct bpf_prog_ops sock_ops_prog_ops = {
8591};
8592
8593const struct bpf_verifier_ops sk_skb_verifier_ops = {
8594 .get_func_proto = sk_skb_func_proto,
8595 .is_valid_access = sk_skb_is_valid_access,
8596 .convert_ctx_access = sk_skb_convert_ctx_access,
8597 .gen_prologue = sk_skb_prologue,
8598};
8599
8600const struct bpf_prog_ops sk_skb_prog_ops = {
8601};
8602
8603const struct bpf_verifier_ops sk_msg_verifier_ops = {
8604 .get_func_proto = sk_msg_func_proto,
8605 .is_valid_access = sk_msg_is_valid_access,
8606 .convert_ctx_access = sk_msg_convert_ctx_access,
8607 .gen_prologue = bpf_noop_prologue,
8608};
8609
8610const struct bpf_prog_ops sk_msg_prog_ops = {
8611};
8612
8613const struct bpf_verifier_ops flow_dissector_verifier_ops = {
8614 .get_func_proto = flow_dissector_func_proto,
8615 .is_valid_access = flow_dissector_is_valid_access,
8616 .convert_ctx_access = flow_dissector_convert_ctx_access,
8617};
8618
8619const struct bpf_prog_ops flow_dissector_prog_ops = {
8620 .test_run = bpf_prog_test_run_flow_dissector,
8621};
8622
8623int sk_detach_filter(struct sock *sk)
8624{
8625 int ret = -ENOENT;
8626 struct sk_filter *filter;
8627
8628 if (sock_flag(sk, SOCK_FILTER_LOCKED))
8629 return -EPERM;
8630
8631 filter = rcu_dereference_protected(sk->sk_filter,
8632 lockdep_sock_is_held(sk));
8633 if (filter) {
8634 RCU_INIT_POINTER(sk->sk_filter, NULL);
8635 sk_filter_uncharge(sk, filter);
8636 ret = 0;
8637 }
8638
8639 return ret;
8640}
8641EXPORT_SYMBOL_GPL(sk_detach_filter);
8642
8643int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
8644 unsigned int len)
8645{
8646 struct sock_fprog_kern *fprog;
8647 struct sk_filter *filter;
8648 int ret = 0;
8649
8650 lock_sock(sk);
8651 filter = rcu_dereference_protected(sk->sk_filter,
8652 lockdep_sock_is_held(sk));
8653 if (!filter)
8654 goto out;
8655
8656
8657
8658
8659
8660 ret = -EACCES;
8661 fprog = filter->prog->orig_prog;
8662 if (!fprog)
8663 goto out;
8664
8665 ret = fprog->len;
8666 if (!len)
8667
8668 goto out;
8669
8670 ret = -EINVAL;
8671 if (len < fprog->len)
8672 goto out;
8673
8674 ret = -EFAULT;
8675 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
8676 goto out;
8677
8678
8679
8680
8681 ret = fprog->len;
8682out:
8683 release_sock(sk);
8684 return ret;
8685}
8686
8687#ifdef CONFIG_INET
8688static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
8689 struct sock_reuseport *reuse,
8690 struct sock *sk, struct sk_buff *skb,
8691 u32 hash)
8692{
8693 reuse_kern->skb = skb;
8694 reuse_kern->sk = sk;
8695 reuse_kern->selected_sk = NULL;
8696 reuse_kern->data_end = skb->data + skb_headlen(skb);
8697 reuse_kern->hash = hash;
8698 reuse_kern->reuseport_id = reuse->reuseport_id;
8699 reuse_kern->bind_inany = reuse->bind_inany;
8700}
8701
8702struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
8703 struct bpf_prog *prog, struct sk_buff *skb,
8704 u32 hash)
8705{
8706 struct sk_reuseport_kern reuse_kern;
8707 enum sk_action action;
8708
8709 bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
8710 action = BPF_PROG_RUN(prog, &reuse_kern);
8711
8712 if (action == SK_PASS)
8713 return reuse_kern.selected_sk;
8714 else
8715 return ERR_PTR(-ECONNREFUSED);
8716}
8717
8718BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
8719 struct bpf_map *, map, void *, key, u32, flags)
8720{
8721 struct sock_reuseport *reuse;
8722 struct sock *selected_sk;
8723
8724 selected_sk = map->ops->map_lookup_elem(map, key);
8725 if (!selected_sk)
8726 return -ENOENT;
8727
8728 reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
8729 if (!reuse)
8730
8731
8732
8733
8734 return -ENOENT;
8735
8736 if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
8737 struct sock *sk;
8738
8739 if (unlikely(!reuse_kern->reuseport_id))
8740
8741
8742
8743
8744
8745
8746 return -ENOENT;
8747
8748 sk = reuse_kern->sk;
8749 if (sk->sk_protocol != selected_sk->sk_protocol)
8750 return -EPROTOTYPE;
8751 else if (sk->sk_family != selected_sk->sk_family)
8752 return -EAFNOSUPPORT;
8753
8754
8755 return -EBADFD;
8756 }
8757
8758 reuse_kern->selected_sk = selected_sk;
8759
8760 return 0;
8761}
8762
8763static const struct bpf_func_proto sk_select_reuseport_proto = {
8764 .func = sk_select_reuseport,
8765 .gpl_only = false,
8766 .ret_type = RET_INTEGER,
8767 .arg1_type = ARG_PTR_TO_CTX,
8768 .arg2_type = ARG_CONST_MAP_PTR,
8769 .arg3_type = ARG_PTR_TO_MAP_KEY,
8770 .arg4_type = ARG_ANYTHING,
8771};
8772
8773BPF_CALL_4(sk_reuseport_load_bytes,
8774 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8775 void *, to, u32, len)
8776{
8777 return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
8778}
8779
8780static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
8781 .func = sk_reuseport_load_bytes,
8782 .gpl_only = false,
8783 .ret_type = RET_INTEGER,
8784 .arg1_type = ARG_PTR_TO_CTX,
8785 .arg2_type = ARG_ANYTHING,
8786 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
8787 .arg4_type = ARG_CONST_SIZE,
8788};
8789
8790BPF_CALL_5(sk_reuseport_load_bytes_relative,
8791 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8792 void *, to, u32, len, u32, start_header)
8793{
8794 return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
8795 len, start_header);
8796}
8797
8798static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
8799 .func = sk_reuseport_load_bytes_relative,
8800 .gpl_only = false,
8801 .ret_type = RET_INTEGER,
8802 .arg1_type = ARG_PTR_TO_CTX,
8803 .arg2_type = ARG_ANYTHING,
8804 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
8805 .arg4_type = ARG_CONST_SIZE,
8806 .arg5_type = ARG_ANYTHING,
8807};
8808
8809static const struct bpf_func_proto *
8810sk_reuseport_func_proto(enum bpf_func_id func_id,
8811 const struct bpf_prog *prog)
8812{
8813 switch (func_id) {
8814 case BPF_FUNC_sk_select_reuseport:
8815 return &sk_select_reuseport_proto;
8816 case BPF_FUNC_skb_load_bytes:
8817 return &sk_reuseport_load_bytes_proto;
8818 case BPF_FUNC_skb_load_bytes_relative:
8819 return &sk_reuseport_load_bytes_relative_proto;
8820 default:
8821 return bpf_base_func_proto(func_id);
8822 }
8823}
8824
8825static bool
8826sk_reuseport_is_valid_access(int off, int size,
8827 enum bpf_access_type type,
8828 const struct bpf_prog *prog,
8829 struct bpf_insn_access_aux *info)
8830{
8831 const u32 size_default = sizeof(__u32);
8832
8833 if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
8834 off % size || type != BPF_READ)
8835 return false;
8836
8837 switch (off) {
8838 case offsetof(struct sk_reuseport_md, data):
8839 info->reg_type = PTR_TO_PACKET;
8840 return size == sizeof(__u64);
8841
8842 case offsetof(struct sk_reuseport_md, data_end):
8843 info->reg_type = PTR_TO_PACKET_END;
8844 return size == sizeof(__u64);
8845
8846 case offsetof(struct sk_reuseport_md, hash):
8847 return size == size_default;
8848
8849
8850 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
8851 if (size < sizeof_field(struct sk_buff, protocol))
8852 return false;
8853
8854 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
8855 case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
8856 case bpf_ctx_range(struct sk_reuseport_md, len):
8857 bpf_ctx_record_field_size(info, size_default);
8858 return bpf_ctx_narrow_access_ok(off, size, size_default);
8859
8860 default:
8861 return false;
8862 }
8863}
8864
8865#define SK_REUSEPORT_LOAD_FIELD(F) ({ \
8866 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
8867 si->dst_reg, si->src_reg, \
8868 bpf_target_off(struct sk_reuseport_kern, F, \
8869 sizeof_field(struct sk_reuseport_kern, F), \
8870 target_size)); \
8871 })
8872
8873#define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \
8874 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
8875 struct sk_buff, \
8876 skb, \
8877 SKB_FIELD)
8878
8879#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
8880 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern, \
8881 struct sock, \
8882 sk, \
8883 SK_FIELD, BPF_SIZE, EXTRA_OFF)
8884
8885static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
8886 const struct bpf_insn *si,
8887 struct bpf_insn *insn_buf,
8888 struct bpf_prog *prog,
8889 u32 *target_size)
8890{
8891 struct bpf_insn *insn = insn_buf;
8892
8893 switch (si->off) {
8894 case offsetof(struct sk_reuseport_md, data):
8895 SK_REUSEPORT_LOAD_SKB_FIELD(data);
8896 break;
8897
8898 case offsetof(struct sk_reuseport_md, len):
8899 SK_REUSEPORT_LOAD_SKB_FIELD(len);
8900 break;
8901
8902 case offsetof(struct sk_reuseport_md, eth_protocol):
8903 SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
8904 break;
8905
8906 case offsetof(struct sk_reuseport_md, ip_protocol):
8907 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
8908 SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
8909 BPF_W, 0);
8910 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
8911 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
8912 SK_FL_PROTO_SHIFT);
8913
8914
8915
8916 *target_size = 1;
8917 break;
8918
8919 case offsetof(struct sk_reuseport_md, data_end):
8920 SK_REUSEPORT_LOAD_FIELD(data_end);
8921 break;
8922
8923 case offsetof(struct sk_reuseport_md, hash):
8924 SK_REUSEPORT_LOAD_FIELD(hash);
8925 break;
8926
8927 case offsetof(struct sk_reuseport_md, bind_inany):
8928 SK_REUSEPORT_LOAD_FIELD(bind_inany);
8929 break;
8930 }
8931
8932 return insn - insn_buf;
8933}
8934
8935const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
8936 .get_func_proto = sk_reuseport_func_proto,
8937 .is_valid_access = sk_reuseport_is_valid_access,
8938 .convert_ctx_access = sk_reuseport_convert_ctx_access,
8939};
8940
8941const struct bpf_prog_ops sk_reuseport_prog_ops = {
8942};
8943#endif
8944