1
2
3
4#ifndef _LINUX_BPF_H
5#define _LINUX_BPF_H 1
6
7#include <uapi/linux/bpf.h>
8
9#include <linux/workqueue.h>
10#include <linux/file.h>
11#include <linux/percpu.h>
12#include <linux/err.h>
13#include <linux/rbtree_latch.h>
14#include <linux/numa.h>
15#include <linux/mm_types.h>
16#include <linux/wait.h>
17#include <linux/refcount.h>
18#include <linux/mutex.h>
19#include <linux/module.h>
20#include <linux/kallsyms.h>
21#include <linux/capability.h>
22#include <linux/sched/mm.h>
23#include <linux/slab.h>
24#include <linux/percpu-refcount.h>
25#include <linux/bpfptr.h>
26
27struct bpf_verifier_env;
28struct bpf_verifier_log;
29struct perf_event;
30struct bpf_prog;
31struct bpf_prog_aux;
32struct bpf_map;
33struct sock;
34struct seq_file;
35struct btf;
36struct btf_type;
37struct exception_table_entry;
38struct seq_operations;
39struct bpf_iter_aux_info;
40struct bpf_local_storage;
41struct bpf_local_storage_map;
42struct kobject;
43struct mem_cgroup;
44struct module;
45struct bpf_func_state;
46
47extern struct idr btf_idr;
48extern spinlock_t btf_idr_lock;
49extern struct kobject *btf_kobj;
50
51typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
52typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
53 struct bpf_iter_aux_info *aux);
54typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
55struct bpf_iter_seq_info {
56 const struct seq_operations *seq_ops;
57 bpf_iter_init_seq_priv_t init_seq_private;
58 bpf_iter_fini_seq_priv_t fini_seq_private;
59 u32 seq_priv_size;
60};
61
62
63struct bpf_map_ops {
64
65 int (*map_alloc_check)(union bpf_attr *attr);
66 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
67 void (*map_release)(struct bpf_map *map, struct file *map_file);
68 void (*map_free)(struct bpf_map *map);
69 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
70 void (*map_release_uref)(struct bpf_map *map);
71 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
72 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
73 union bpf_attr __user *uattr);
74 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
75 void *value, u64 flags);
76 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
77 const union bpf_attr *attr,
78 union bpf_attr __user *uattr);
79 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
80 union bpf_attr __user *uattr);
81 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
82 union bpf_attr __user *uattr);
83
84
85 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
86 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
87 int (*map_delete_elem)(struct bpf_map *map, void *key);
88 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
89 int (*map_pop_elem)(struct bpf_map *map, void *value);
90 int (*map_peek_elem)(struct bpf_map *map, void *value);
91
92
93 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
94 int fd);
95 void (*map_fd_put_ptr)(void *ptr);
96 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
97 u32 (*map_fd_sys_lookup_elem)(void *ptr);
98 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
99 struct seq_file *m);
100 int (*map_check_btf)(const struct bpf_map *map,
101 const struct btf *btf,
102 const struct btf_type *key_type,
103 const struct btf_type *value_type);
104
105
106 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
107 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
108 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
109 struct bpf_prog *new);
110
111
112 int (*map_direct_value_addr)(const struct bpf_map *map,
113 u64 *imm, u32 off);
114 int (*map_direct_value_meta)(const struct bpf_map *map,
115 u64 imm, u32 *off);
116 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
117 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
118 struct poll_table_struct *pts);
119
120
121 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
122 void *owner, u32 size);
123 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
124 void *owner, u32 size);
125 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
126
127
128 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
129
130
131
132
133
134
135
136
137
138
139 bool (*map_meta_equal)(const struct bpf_map *meta0,
140 const struct bpf_map *meta1);
141
142
143 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
144 struct bpf_func_state *caller,
145 struct bpf_func_state *callee);
146 int (*map_for_each_callback)(struct bpf_map *map,
147 bpf_callback_t callback_fn,
148 void *callback_ctx, u64 flags);
149
150
151 const char * const map_btf_name;
152 int *map_btf_id;
153
154
155 const struct bpf_iter_seq_info *iter_seq_info;
156};
157
158struct bpf_map {
159
160
161
162 const struct bpf_map_ops *ops ____cacheline_aligned;
163 struct bpf_map *inner_map_meta;
164#ifdef CONFIG_SECURITY
165 void *security;
166#endif
167 enum bpf_map_type map_type;
168 u32 key_size;
169 u32 value_size;
170 u32 max_entries;
171 u64 map_extra;
172 u32 map_flags;
173 int spin_lock_off;
174 int timer_off;
175 u32 id;
176 int numa_node;
177 u32 btf_key_type_id;
178 u32 btf_value_type_id;
179 u32 btf_vmlinux_value_type_id;
180 struct btf *btf;
181#ifdef CONFIG_MEMCG_KMEM
182 struct mem_cgroup *memcg;
183#endif
184 char name[BPF_OBJ_NAME_LEN];
185 bool bypass_spec_v1;
186 bool frozen;
187
188
189
190
191
192 atomic64_t refcnt ____cacheline_aligned;
193 atomic64_t usercnt;
194 struct work_struct work;
195 struct mutex freeze_mutex;
196 atomic64_t writecnt;
197
198
199
200
201
202 struct {
203 spinlock_t lock;
204 enum bpf_prog_type type;
205 bool jited;
206 bool xdp_has_frags;
207 } owner;
208};
209
210static inline bool map_value_has_spin_lock(const struct bpf_map *map)
211{
212 return map->spin_lock_off >= 0;
213}
214
215static inline bool map_value_has_timer(const struct bpf_map *map)
216{
217 return map->timer_off >= 0;
218}
219
220static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
221{
222 if (unlikely(map_value_has_spin_lock(map)))
223 memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
224 if (unlikely(map_value_has_timer(map)))
225 memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
226}
227
228
229static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
230{
231 u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;
232
233 if (unlikely(map_value_has_spin_lock(map))) {
234 s_off = map->spin_lock_off;
235 s_sz = sizeof(struct bpf_spin_lock);
236 }
237 if (unlikely(map_value_has_timer(map))) {
238 t_off = map->timer_off;
239 t_sz = sizeof(struct bpf_timer);
240 }
241
242 if (unlikely(s_sz || t_sz)) {
243 if (s_off < t_off || !s_sz) {
244 swap(s_off, t_off);
245 swap(s_sz, t_sz);
246 }
247 memcpy(dst, src, t_off);
248 memcpy(dst + t_off + t_sz,
249 src + t_off + t_sz,
250 s_off - t_off - t_sz);
251 memcpy(dst + s_off + s_sz,
252 src + s_off + s_sz,
253 map->value_size - s_off - s_sz);
254 } else {
255 memcpy(dst, src, map->value_size);
256 }
257}
258void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
259 bool lock_src);
260void bpf_timer_cancel_and_free(void *timer);
261int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
262
263struct bpf_offload_dev;
264struct bpf_offloaded_map;
265
266struct bpf_map_dev_ops {
267 int (*map_get_next_key)(struct bpf_offloaded_map *map,
268 void *key, void *next_key);
269 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
270 void *key, void *value);
271 int (*map_update_elem)(struct bpf_offloaded_map *map,
272 void *key, void *value, u64 flags);
273 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
274};
275
276struct bpf_offloaded_map {
277 struct bpf_map map;
278 struct net_device *netdev;
279 const struct bpf_map_dev_ops *dev_ops;
280 void *dev_priv;
281 struct list_head offloads;
282};
283
284static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
285{
286 return container_of(map, struct bpf_offloaded_map, map);
287}
288
289static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
290{
291 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
292}
293
294static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
295{
296 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
297 map->ops->map_seq_show_elem;
298}
299
300int map_check_no_btf(const struct bpf_map *map,
301 const struct btf *btf,
302 const struct btf_type *key_type,
303 const struct btf_type *value_type);
304
305bool bpf_map_meta_equal(const struct bpf_map *meta0,
306 const struct bpf_map *meta1);
307
308extern const struct bpf_map_ops bpf_map_offload_ops;
309
310
311
312
313
314
315
316
317
318#define BPF_BASE_TYPE_BITS 8
319
320enum bpf_type_flag {
321
322 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
323
324
325
326
327 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
328
329
330
331
332 MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
333
334
335 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
336
337
338
339
340
341
342
343 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
344
345 __BPF_TYPE_LAST_FLAG = MEM_PERCPU,
346};
347
348
349#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
350
351
352#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
353
354
355enum bpf_arg_type {
356 ARG_DONTCARE = 0,
357
358
359
360
361 ARG_CONST_MAP_PTR,
362 ARG_PTR_TO_MAP_KEY,
363 ARG_PTR_TO_MAP_VALUE,
364 ARG_PTR_TO_UNINIT_MAP_VALUE,
365
366
367
368
369 ARG_PTR_TO_MEM,
370 ARG_PTR_TO_UNINIT_MEM,
371
372
373
374
375 ARG_CONST_SIZE,
376 ARG_CONST_SIZE_OR_ZERO,
377
378 ARG_PTR_TO_CTX,
379 ARG_ANYTHING,
380 ARG_PTR_TO_SPIN_LOCK,
381 ARG_PTR_TO_SOCK_COMMON,
382 ARG_PTR_TO_INT,
383 ARG_PTR_TO_LONG,
384 ARG_PTR_TO_SOCKET,
385 ARG_PTR_TO_BTF_ID,
386 ARG_PTR_TO_ALLOC_MEM,
387 ARG_CONST_ALLOC_SIZE_OR_ZERO,
388 ARG_PTR_TO_BTF_ID_SOCK_COMMON,
389 ARG_PTR_TO_PERCPU_BTF_ID,
390 ARG_PTR_TO_FUNC,
391 ARG_PTR_TO_STACK,
392 ARG_PTR_TO_CONST_STR,
393 ARG_PTR_TO_TIMER,
394 __BPF_ARG_TYPE_MAX,
395
396
397 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
398 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
399 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
400 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
401 ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
402 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
403
404
405
406
407 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
408};
409static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
410
411
412enum bpf_return_type {
413 RET_INTEGER,
414 RET_VOID,
415 RET_PTR_TO_MAP_VALUE,
416 RET_PTR_TO_SOCKET,
417 RET_PTR_TO_TCP_SOCK,
418 RET_PTR_TO_SOCK_COMMON,
419 RET_PTR_TO_ALLOC_MEM,
420 RET_PTR_TO_MEM_OR_BTF_ID,
421 RET_PTR_TO_BTF_ID,
422 __BPF_RET_TYPE_MAX,
423
424
425 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
426 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
427 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
428 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
429 RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
430 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
431
432
433
434
435 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
436};
437static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
438
439
440
441
442
443struct bpf_func_proto {
444 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
445 bool gpl_only;
446 bool pkt_access;
447 enum bpf_return_type ret_type;
448 union {
449 struct {
450 enum bpf_arg_type arg1_type;
451 enum bpf_arg_type arg2_type;
452 enum bpf_arg_type arg3_type;
453 enum bpf_arg_type arg4_type;
454 enum bpf_arg_type arg5_type;
455 };
456 enum bpf_arg_type arg_type[5];
457 };
458 union {
459 struct {
460 u32 *arg1_btf_id;
461 u32 *arg2_btf_id;
462 u32 *arg3_btf_id;
463 u32 *arg4_btf_id;
464 u32 *arg5_btf_id;
465 };
466 u32 *arg_btf_id[5];
467 };
468 int *ret_btf_id;
469 bool (*allowed)(const struct bpf_prog *prog);
470};
471
472
473
474
475
476struct bpf_context;
477
478enum bpf_access_type {
479 BPF_READ = 1,
480 BPF_WRITE = 2
481};
482
483
484
485
486
487
488
489
490
491
492
493enum bpf_reg_type {
494 NOT_INIT = 0,
495 SCALAR_VALUE,
496 PTR_TO_CTX,
497 CONST_PTR_TO_MAP,
498 PTR_TO_MAP_VALUE,
499 PTR_TO_MAP_KEY,
500 PTR_TO_STACK,
501 PTR_TO_PACKET_META,
502 PTR_TO_PACKET,
503 PTR_TO_PACKET_END,
504 PTR_TO_FLOW_KEYS,
505 PTR_TO_SOCKET,
506 PTR_TO_SOCK_COMMON,
507 PTR_TO_TCP_SOCK,
508 PTR_TO_TP_BUFFER,
509 PTR_TO_XDP_SOCK,
510
511
512
513
514
515
516
517
518
519
520 PTR_TO_BTF_ID,
521
522
523
524
525 PTR_TO_MEM,
526 PTR_TO_BUF,
527 PTR_TO_FUNC,
528 __BPF_REG_TYPE_MAX,
529
530
531 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
532 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
533 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
534 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
535 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
536
537
538
539
540 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
541};
542static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
543
544
545
546
547struct bpf_insn_access_aux {
548 enum bpf_reg_type reg_type;
549 union {
550 int ctx_field_size;
551 struct {
552 struct btf *btf;
553 u32 btf_id;
554 };
555 };
556 struct bpf_verifier_log *log;
557};
558
559static inline void
560bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
561{
562 aux->ctx_field_size = size;
563}
564
565static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
566{
567 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
568 insn->src_reg == BPF_PSEUDO_FUNC;
569}
570
571struct bpf_prog_ops {
572 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
573 union bpf_attr __user *uattr);
574};
575
576struct bpf_verifier_ops {
577
578 const struct bpf_func_proto *
579 (*get_func_proto)(enum bpf_func_id func_id,
580 const struct bpf_prog *prog);
581
582
583
584
585 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
586 const struct bpf_prog *prog,
587 struct bpf_insn_access_aux *info);
588 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
589 const struct bpf_prog *prog);
590 int (*gen_ld_abs)(const struct bpf_insn *orig,
591 struct bpf_insn *insn_buf);
592 u32 (*convert_ctx_access)(enum bpf_access_type type,
593 const struct bpf_insn *src,
594 struct bpf_insn *dst,
595 struct bpf_prog *prog, u32 *target_size);
596 int (*btf_struct_access)(struct bpf_verifier_log *log,
597 const struct btf *btf,
598 const struct btf_type *t, int off, int size,
599 enum bpf_access_type atype,
600 u32 *next_btf_id, enum bpf_type_flag *flag);
601};
602
603struct bpf_prog_offload_ops {
604
605 int (*insn_hook)(struct bpf_verifier_env *env,
606 int insn_idx, int prev_insn_idx);
607 int (*finalize)(struct bpf_verifier_env *env);
608
609 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
610 struct bpf_insn *insn);
611 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
612
613 int (*prepare)(struct bpf_prog *prog);
614 int (*translate)(struct bpf_prog *prog);
615 void (*destroy)(struct bpf_prog *prog);
616};
617
618struct bpf_prog_offload {
619 struct bpf_prog *prog;
620 struct net_device *netdev;
621 struct bpf_offload_dev *offdev;
622 void *dev_priv;
623 struct list_head offloads;
624 bool dev_state;
625 bool opt_failed;
626 void *jited_image;
627 u32 jited_len;
628};
629
630enum bpf_cgroup_storage_type {
631 BPF_CGROUP_STORAGE_SHARED,
632 BPF_CGROUP_STORAGE_PERCPU,
633 __BPF_CGROUP_STORAGE_MAX
634};
635
636#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
637
638
639
640
641#define MAX_BPF_FUNC_ARGS 12
642
643
644
645
646#define MAX_BPF_FUNC_REG_ARGS 5
647
648struct btf_func_model {
649 u8 ret_size;
650 u8 nr_args;
651 u8 arg_size[MAX_BPF_FUNC_ARGS];
652};
653
654
655
656
657
658#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
659
660
661
662#define BPF_TRAMP_F_CALL_ORIG BIT(1)
663
664
665
666#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
667
668
669
670#define BPF_TRAMP_F_IP_ARG BIT(3)
671
672#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
673
674
675
676
677#define BPF_MAX_TRAMP_PROGS 38
678
679struct bpf_tramp_progs {
680 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
681 int nr_progs;
682};
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704struct bpf_tramp_image;
705int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
706 const struct btf_func_model *m, u32 flags,
707 struct bpf_tramp_progs *tprogs,
708 void *orig_call);
709
710u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
711void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
712u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
713void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
714void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
715void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
716
717struct bpf_ksym {
718 unsigned long start;
719 unsigned long end;
720 char name[KSYM_NAME_LEN];
721 struct list_head lnode;
722 struct latch_tree_node tnode;
723 bool prog;
724};
725
726enum bpf_tramp_prog_type {
727 BPF_TRAMP_FENTRY,
728 BPF_TRAMP_FEXIT,
729 BPF_TRAMP_MODIFY_RETURN,
730 BPF_TRAMP_MAX,
731 BPF_TRAMP_REPLACE,
732};
733
734struct bpf_tramp_image {
735 void *image;
736 struct bpf_ksym ksym;
737 struct percpu_ref pcref;
738 void *ip_after_call;
739 void *ip_epilogue;
740 union {
741 struct rcu_head rcu;
742 struct work_struct work;
743 };
744};
745
746struct bpf_trampoline {
747
748 struct hlist_node hlist;
749
750 struct mutex mutex;
751 refcount_t refcnt;
752 u64 key;
753 struct {
754 struct btf_func_model model;
755 void *addr;
756 bool ftrace_managed;
757 } func;
758
759
760
761
762 struct bpf_prog *extension_prog;
763
764 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
765
766 int progs_cnt[BPF_TRAMP_MAX];
767
768 struct bpf_tramp_image *cur_image;
769 u64 selector;
770 struct module *mod;
771};
772
773struct bpf_attach_target_info {
774 struct btf_func_model fmodel;
775 long tgt_addr;
776 const char *tgt_name;
777 const struct btf_type *tgt_type;
778};
779
780#define BPF_DISPATCHER_MAX 48
781
782struct bpf_dispatcher_prog {
783 struct bpf_prog *prog;
784 refcount_t users;
785};
786
787struct bpf_dispatcher {
788
789 struct mutex mutex;
790 void *func;
791 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
792 int num_progs;
793 void *image;
794 u32 image_off;
795 struct bpf_ksym ksym;
796};
797
798static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
799 const void *ctx,
800 const struct bpf_insn *insnsi,
801 unsigned int (*bpf_func)(const void *,
802 const struct bpf_insn *))
803{
804 return bpf_func(ctx, insnsi);
805}
806#ifdef CONFIG_BPF_JIT
807int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
808int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
809struct bpf_trampoline *bpf_trampoline_get(u64 key,
810 struct bpf_attach_target_info *tgt_info);
811void bpf_trampoline_put(struct bpf_trampoline *tr);
812int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
813#define BPF_DISPATCHER_INIT(_name) { \
814 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
815 .func = &_name##_func, \
816 .progs = {}, \
817 .num_progs = 0, \
818 .image = NULL, \
819 .image_off = 0, \
820 .ksym = { \
821 .name = #_name, \
822 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
823 }, \
824}
825
826#define DEFINE_BPF_DISPATCHER(name) \
827 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
828 const void *ctx, \
829 const struct bpf_insn *insnsi, \
830 unsigned int (*bpf_func)(const void *, \
831 const struct bpf_insn *)) \
832 { \
833 return bpf_func(ctx, insnsi); \
834 } \
835 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
836 struct bpf_dispatcher bpf_dispatcher_##name = \
837 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
838#define DECLARE_BPF_DISPATCHER(name) \
839 unsigned int bpf_dispatcher_##name##_func( \
840 const void *ctx, \
841 const struct bpf_insn *insnsi, \
842 unsigned int (*bpf_func)(const void *, \
843 const struct bpf_insn *)); \
844 extern struct bpf_dispatcher bpf_dispatcher_##name;
845#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
846#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
847void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
848 struct bpf_prog *to);
849
850void *bpf_jit_alloc_exec_page(void);
851void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
852void bpf_image_ksym_del(struct bpf_ksym *ksym);
853void bpf_ksym_add(struct bpf_ksym *ksym);
854void bpf_ksym_del(struct bpf_ksym *ksym);
855int bpf_jit_charge_modmem(u32 size);
856void bpf_jit_uncharge_modmem(u32 size);
857bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
858#else
859static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
860 struct bpf_trampoline *tr)
861{
862 return -ENOTSUPP;
863}
864static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
865 struct bpf_trampoline *tr)
866{
867 return -ENOTSUPP;
868}
869static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
870 struct bpf_attach_target_info *tgt_info)
871{
872 return ERR_PTR(-EOPNOTSUPP);
873}
874static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
875#define DEFINE_BPF_DISPATCHER(name)
876#define DECLARE_BPF_DISPATCHER(name)
877#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
878#define BPF_DISPATCHER_PTR(name) NULL
879static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
880 struct bpf_prog *from,
881 struct bpf_prog *to) {}
882static inline bool is_bpf_image_address(unsigned long address)
883{
884 return false;
885}
886static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
887{
888 return false;
889}
890#endif
891
892struct bpf_func_info_aux {
893 u16 linkage;
894 bool unreliable;
895};
896
897enum bpf_jit_poke_reason {
898 BPF_POKE_REASON_TAIL_CALL,
899};
900
901
902struct bpf_jit_poke_descriptor {
903 void *tailcall_target;
904 void *tailcall_bypass;
905 void *bypass_addr;
906 void *aux;
907 union {
908 struct {
909 struct bpf_map *map;
910 u32 key;
911 } tail_call;
912 };
913 bool tailcall_target_stable;
914 u8 adj_off;
915 u16 reason;
916 u32 insn_idx;
917};
918
919
920struct bpf_ctx_arg_aux {
921 u32 offset;
922 enum bpf_reg_type reg_type;
923 u32 btf_id;
924};
925
926struct btf_mod_pair {
927 struct btf *btf;
928 struct module *module;
929};
930
931struct bpf_kfunc_desc_tab;
932
933struct bpf_prog_aux {
934 atomic64_t refcnt;
935 u32 used_map_cnt;
936 u32 used_btf_cnt;
937 u32 max_ctx_offset;
938 u32 max_pkt_offset;
939 u32 max_tp_access;
940 u32 stack_depth;
941 u32 id;
942 u32 func_cnt;
943 u32 func_idx;
944 u32 attach_btf_id;
945 u32 ctx_arg_info_size;
946 u32 max_rdonly_access;
947 u32 max_rdwr_access;
948 struct btf *attach_btf;
949 const struct bpf_ctx_arg_aux *ctx_arg_info;
950 struct mutex dst_mutex;
951 struct bpf_prog *dst_prog;
952 struct bpf_trampoline *dst_trampoline;
953 enum bpf_prog_type saved_dst_prog_type;
954 enum bpf_attach_type saved_dst_attach_type;
955 bool verifier_zext;
956 bool offload_requested;
957 bool attach_btf_trace;
958 bool func_proto_unreliable;
959 bool sleepable;
960 bool tail_call_reachable;
961 bool xdp_has_frags;
962 bool use_bpf_prog_pack;
963 struct hlist_node tramp_hlist;
964
965 const struct btf_type *attach_func_proto;
966
967 const char *attach_func_name;
968 struct bpf_prog **func;
969 void *jit_data;
970 struct bpf_jit_poke_descriptor *poke_tab;
971 struct bpf_kfunc_desc_tab *kfunc_tab;
972 struct bpf_kfunc_btf_tab *kfunc_btf_tab;
973 u32 size_poke_tab;
974 struct bpf_ksym ksym;
975 const struct bpf_prog_ops *ops;
976 struct bpf_map **used_maps;
977 struct mutex used_maps_mutex;
978 struct btf_mod_pair *used_btfs;
979 struct bpf_prog *prog;
980 struct user_struct *user;
981 u64 load_time;
982 u32 verified_insns;
983 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
984 char name[BPF_OBJ_NAME_LEN];
985#ifdef CONFIG_SECURITY
986 void *security;
987#endif
988 struct bpf_prog_offload *offload;
989 struct btf *btf;
990 struct bpf_func_info *func_info;
991 struct bpf_func_info_aux *func_info_aux;
992
993
994
995
996
997
998 struct bpf_line_info *linfo;
999
1000
1001
1002
1003
1004
1005
1006 void **jited_linfo;
1007 u32 func_info_cnt;
1008 u32 nr_linfo;
1009
1010
1011
1012
1013 u32 linfo_idx;
1014 u32 num_exentries;
1015 struct exception_table_entry *extable;
1016 union {
1017 struct work_struct work;
1018 struct rcu_head rcu;
1019 };
1020};
1021
1022struct bpf_array_aux {
1023
1024 struct list_head poke_progs;
1025 struct bpf_map *map;
1026 struct mutex poke_mutex;
1027 struct work_struct work;
1028};
1029
1030struct bpf_link {
1031 atomic64_t refcnt;
1032 u32 id;
1033 enum bpf_link_type type;
1034 const struct bpf_link_ops *ops;
1035 struct bpf_prog *prog;
1036 struct work_struct work;
1037};
1038
1039struct bpf_link_ops {
1040 void (*release)(struct bpf_link *link);
1041 void (*dealloc)(struct bpf_link *link);
1042 int (*detach)(struct bpf_link *link);
1043 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1044 struct bpf_prog *old_prog);
1045 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1046 int (*fill_link_info)(const struct bpf_link *link,
1047 struct bpf_link_info *info);
1048};
1049
1050struct bpf_link_primer {
1051 struct bpf_link *link;
1052 struct file *file;
1053 int fd;
1054 u32 id;
1055};
1056
1057struct bpf_struct_ops_value;
1058struct btf_member;
1059
1060#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1061struct bpf_struct_ops {
1062 const struct bpf_verifier_ops *verifier_ops;
1063 int (*init)(struct btf *btf);
1064 int (*check_member)(const struct btf_type *t,
1065 const struct btf_member *member);
1066 int (*init_member)(const struct btf_type *t,
1067 const struct btf_member *member,
1068 void *kdata, const void *udata);
1069 int (*reg)(void *kdata);
1070 void (*unreg)(void *kdata);
1071 const struct btf_type *type;
1072 const struct btf_type *value_type;
1073 const char *name;
1074 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1075 u32 type_id;
1076 u32 value_id;
1077};
1078
1079#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1080#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1081const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
1082void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
1083bool bpf_struct_ops_get(const void *kdata);
1084void bpf_struct_ops_put(const void *kdata);
1085int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1086 void *value);
1087int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
1088 struct bpf_prog *prog,
1089 const struct btf_func_model *model,
1090 void *image, void *image_end);
1091static inline bool bpf_try_module_get(const void *data, struct module *owner)
1092{
1093 if (owner == BPF_MODULE_OWNER)
1094 return bpf_struct_ops_get(data);
1095 else
1096 return try_module_get(owner);
1097}
1098static inline void bpf_module_put(const void *data, struct module *owner)
1099{
1100 if (owner == BPF_MODULE_OWNER)
1101 bpf_struct_ops_put(data);
1102 else
1103 module_put(owner);
1104}
1105
1106#ifdef CONFIG_NET
1107
1108struct bpf_dummy_ops_state {
1109 int val;
1110};
1111
1112struct bpf_dummy_ops {
1113 int (*test_1)(struct bpf_dummy_ops_state *cb);
1114 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1115 char a3, unsigned long a4);
1116};
1117
1118int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1119 union bpf_attr __user *uattr);
1120#endif
1121#else
1122static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
1123{
1124 return NULL;
1125}
1126static inline void bpf_struct_ops_init(struct btf *btf,
1127 struct bpf_verifier_log *log)
1128{
1129}
1130static inline bool bpf_try_module_get(const void *data, struct module *owner)
1131{
1132 return try_module_get(owner);
1133}
1134static inline void bpf_module_put(const void *data, struct module *owner)
1135{
1136 module_put(owner);
1137}
1138static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1139 void *key,
1140 void *value)
1141{
1142 return -EINVAL;
1143}
1144#endif
1145
1146struct bpf_array {
1147 struct bpf_map map;
1148 u32 elem_size;
1149 u32 index_mask;
1150 struct bpf_array_aux *aux;
1151 union {
1152 char value[0] __aligned(8);
1153 void *ptrs[0] __aligned(8);
1154 void __percpu *pptrs[0] __aligned(8);
1155 };
1156};
1157
1158#define BPF_COMPLEXITY_LIMIT_INSNS 1000000
1159#define MAX_TAIL_CALL_CNT 33
1160
1161#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
1162 BPF_F_RDONLY_PROG | \
1163 BPF_F_WRONLY | \
1164 BPF_F_WRONLY_PROG)
1165
1166#define BPF_MAP_CAN_READ BIT(0)
1167#define BPF_MAP_CAN_WRITE BIT(1)
1168
1169static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1170{
1171 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1172
1173
1174
1175
1176 if (access_flags & BPF_F_RDONLY_PROG)
1177 return BPF_MAP_CAN_READ;
1178 else if (access_flags & BPF_F_WRONLY_PROG)
1179 return BPF_MAP_CAN_WRITE;
1180 else
1181 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1182}
1183
1184static inline bool bpf_map_flags_access_ok(u32 access_flags)
1185{
1186 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1187 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1188}
1189
1190struct bpf_event_entry {
1191 struct perf_event *event;
1192 struct file *perf_file;
1193 struct file *map_file;
1194 struct rcu_head rcu;
1195};
1196
1197static inline bool map_type_contains_progs(struct bpf_map *map)
1198{
1199 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
1200 map->map_type == BPF_MAP_TYPE_DEVMAP ||
1201 map->map_type == BPF_MAP_TYPE_CPUMAP;
1202}
1203
1204bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
1205int bpf_prog_calc_tag(struct bpf_prog *fp);
1206
1207const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1208const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
1209
1210typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1211 unsigned long off, unsigned long len);
1212typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1213 const struct bpf_insn *src,
1214 struct bpf_insn *dst,
1215 struct bpf_prog *prog,
1216 u32 *target_size);
1217
1218u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1219 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233struct bpf_prog_array_item {
1234 struct bpf_prog *prog;
1235 union {
1236 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1237 u64 bpf_cookie;
1238 };
1239};
1240
1241struct bpf_prog_array {
1242 struct rcu_head rcu;
1243 struct bpf_prog_array_item items[];
1244};
1245
1246struct bpf_empty_prog_array {
1247 struct bpf_prog_array hdr;
1248 struct bpf_prog *null_prog;
1249};
1250
1251
1252
1253
1254
1255
1256
1257extern struct bpf_empty_prog_array bpf_empty_prog_array;
1258
1259struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
1260void bpf_prog_array_free(struct bpf_prog_array *progs);
1261int bpf_prog_array_length(struct bpf_prog_array *progs);
1262bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1263int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
1264 __u32 __user *prog_ids, u32 cnt);
1265
1266void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
1267 struct bpf_prog *old_prog);
1268int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1269int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1270 struct bpf_prog *prog);
1271int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1272 u32 *prog_ids, u32 request_cnt,
1273 u32 *prog_cnt);
1274int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1275 struct bpf_prog *exclude_prog,
1276 struct bpf_prog *include_prog,
1277 u64 bpf_cookie,
1278 struct bpf_prog_array **new_array);
1279
1280struct bpf_run_ctx {};
1281
1282struct bpf_cg_run_ctx {
1283 struct bpf_run_ctx run_ctx;
1284 const struct bpf_prog_array_item *prog_item;
1285 int retval;
1286};
1287
1288struct bpf_trace_run_ctx {
1289 struct bpf_run_ctx run_ctx;
1290 u64 bpf_cookie;
1291};
1292
1293static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
1294{
1295 struct bpf_run_ctx *old_ctx = NULL;
1296
1297#ifdef CONFIG_BPF_SYSCALL
1298 old_ctx = current->bpf_ctx;
1299 current->bpf_ctx = new_ctx;
1300#endif
1301 return old_ctx;
1302}
1303
1304static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
1305{
1306#ifdef CONFIG_BPF_SYSCALL
1307 current->bpf_ctx = old_ctx;
1308#endif
1309}
1310
1311
1312#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
1313
1314#define BPF_RET_SET_CN (1 << 0)
1315
1316typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
1317
1318static __always_inline int
1319BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
1320 const void *ctx, bpf_prog_run_fn run_prog,
1321 int retval, u32 *ret_flags)
1322{
1323 const struct bpf_prog_array_item *item;
1324 const struct bpf_prog *prog;
1325 const struct bpf_prog_array *array;
1326 struct bpf_run_ctx *old_run_ctx;
1327 struct bpf_cg_run_ctx run_ctx;
1328 u32 func_ret;
1329
1330 run_ctx.retval = retval;
1331 migrate_disable();
1332 rcu_read_lock();
1333 array = rcu_dereference(array_rcu);
1334 item = &array->items[0];
1335 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1336 while ((prog = READ_ONCE(item->prog))) {
1337 run_ctx.prog_item = item;
1338 func_ret = run_prog(prog, ctx);
1339 if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
1340 run_ctx.retval = -EPERM;
1341 *(ret_flags) |= (func_ret >> 1);
1342 item++;
1343 }
1344 bpf_reset_run_ctx(old_run_ctx);
1345 rcu_read_unlock();
1346 migrate_enable();
1347 return run_ctx.retval;
1348}
1349
1350static __always_inline int
1351BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
1352 const void *ctx, bpf_prog_run_fn run_prog,
1353 int retval)
1354{
1355 const struct bpf_prog_array_item *item;
1356 const struct bpf_prog *prog;
1357 const struct bpf_prog_array *array;
1358 struct bpf_run_ctx *old_run_ctx;
1359 struct bpf_cg_run_ctx run_ctx;
1360
1361 run_ctx.retval = retval;
1362 migrate_disable();
1363 rcu_read_lock();
1364 array = rcu_dereference(array_rcu);
1365 item = &array->items[0];
1366 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1367 while ((prog = READ_ONCE(item->prog))) {
1368 run_ctx.prog_item = item;
1369 if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
1370 run_ctx.retval = -EPERM;
1371 item++;
1372 }
1373 bpf_reset_run_ctx(old_run_ctx);
1374 rcu_read_unlock();
1375 migrate_enable();
1376 return run_ctx.retval;
1377}
1378
1379static __always_inline u32
1380BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
1381 const void *ctx, bpf_prog_run_fn run_prog)
1382{
1383 const struct bpf_prog_array_item *item;
1384 const struct bpf_prog *prog;
1385 const struct bpf_prog_array *array;
1386 struct bpf_run_ctx *old_run_ctx;
1387 struct bpf_trace_run_ctx run_ctx;
1388 u32 ret = 1;
1389
1390 migrate_disable();
1391 rcu_read_lock();
1392 array = rcu_dereference(array_rcu);
1393 if (unlikely(!array))
1394 goto out;
1395 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1396 item = &array->items[0];
1397 while ((prog = READ_ONCE(item->prog))) {
1398 run_ctx.bpf_cookie = item->bpf_cookie;
1399 ret &= run_prog(prog, ctx);
1400 item++;
1401 }
1402 bpf_reset_run_ctx(old_run_ctx);
1403out:
1404 rcu_read_unlock();
1405 migrate_enable();
1406 return ret;
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
1432 ({ \
1433 u32 _flags = 0; \
1434 bool _cn; \
1435 u32 _ret; \
1436 _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \
1437 _cn = _flags & BPF_RET_SET_CN; \
1438 if (_ret && !IS_ERR_VALUE((long)_ret)) \
1439 _ret = -EFAULT; \
1440 if (!_ret) \
1441 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
1442 else \
1443 _ret = (_cn ? NET_XMIT_DROP : _ret); \
1444 _ret; \
1445 })
1446
1447#ifdef CONFIG_BPF_SYSCALL
1448DECLARE_PER_CPU(int, bpf_prog_active);
1449extern struct mutex bpf_stats_enabled_mutex;
1450
1451
1452
1453
1454
1455
1456
1457static inline void bpf_disable_instrumentation(void)
1458{
1459 migrate_disable();
1460 this_cpu_inc(bpf_prog_active);
1461}
1462
1463static inline void bpf_enable_instrumentation(void)
1464{
1465 this_cpu_dec(bpf_prog_active);
1466 migrate_enable();
1467}
1468
1469extern const struct file_operations bpf_map_fops;
1470extern const struct file_operations bpf_prog_fops;
1471extern const struct file_operations bpf_iter_fops;
1472
1473#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1474 extern const struct bpf_prog_ops _name ## _prog_ops; \
1475 extern const struct bpf_verifier_ops _name ## _verifier_ops;
1476#define BPF_MAP_TYPE(_id, _ops) \
1477 extern const struct bpf_map_ops _ops;
1478#define BPF_LINK_TYPE(_id, _name)
1479#include <linux/bpf_types.h>
1480#undef BPF_PROG_TYPE
1481#undef BPF_MAP_TYPE
1482#undef BPF_LINK_TYPE
1483
1484extern const struct bpf_prog_ops bpf_offload_prog_ops;
1485extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1486extern const struct bpf_verifier_ops xdp_analyzer_ops;
1487
1488struct bpf_prog *bpf_prog_get(u32 ufd);
1489struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1490 bool attach_drv);
1491void bpf_prog_add(struct bpf_prog *prog, int i);
1492void bpf_prog_sub(struct bpf_prog *prog, int i);
1493void bpf_prog_inc(struct bpf_prog *prog);
1494struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1495void bpf_prog_put(struct bpf_prog *prog);
1496
1497void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1498void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1499
1500struct bpf_map *bpf_map_get(u32 ufd);
1501struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1502struct bpf_map *__bpf_map_get(struct fd f);
1503void bpf_map_inc(struct bpf_map *map);
1504void bpf_map_inc_with_uref(struct bpf_map *map);
1505struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1506void bpf_map_put_with_uref(struct bpf_map *map);
1507void bpf_map_put(struct bpf_map *map);
1508void *bpf_map_area_alloc(u64 size, int numa_node);
1509void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1510void bpf_map_area_free(void *base);
1511bool bpf_map_write_active(const struct bpf_map *map);
1512void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1513int generic_map_lookup_batch(struct bpf_map *map,
1514 const union bpf_attr *attr,
1515 union bpf_attr __user *uattr);
1516int generic_map_update_batch(struct bpf_map *map,
1517 const union bpf_attr *attr,
1518 union bpf_attr __user *uattr);
1519int generic_map_delete_batch(struct bpf_map *map,
1520 const union bpf_attr *attr,
1521 union bpf_attr __user *uattr);
1522struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
1523struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
1524
1525#ifdef CONFIG_MEMCG_KMEM
1526void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1527 int node);
1528void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
1529void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
1530 size_t align, gfp_t flags);
1531#else
1532static inline void *
1533bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1534 int node)
1535{
1536 return kmalloc_node(size, flags, node);
1537}
1538
1539static inline void *
1540bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
1541{
1542 return kzalloc(size, flags);
1543}
1544
1545static inline void __percpu *
1546bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
1547 gfp_t flags)
1548{
1549 return __alloc_percpu_gfp(size, align, flags);
1550}
1551#endif
1552
1553extern int sysctl_unprivileged_bpf_disabled;
1554
1555static inline bool bpf_allow_ptr_leaks(void)
1556{
1557 return perfmon_capable();
1558}
1559
1560static inline bool bpf_allow_uninit_stack(void)
1561{
1562 return perfmon_capable();
1563}
1564
1565static inline bool bpf_allow_ptr_to_map_access(void)
1566{
1567 return perfmon_capable();
1568}
1569
1570static inline bool bpf_bypass_spec_v1(void)
1571{
1572 return perfmon_capable();
1573}
1574
1575static inline bool bpf_bypass_spec_v4(void)
1576{
1577 return perfmon_capable();
1578}
1579
1580int bpf_map_new_fd(struct bpf_map *map, int flags);
1581int bpf_prog_new_fd(struct bpf_prog *prog);
1582
1583void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1584 const struct bpf_link_ops *ops, struct bpf_prog *prog);
1585int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1586int bpf_link_settle(struct bpf_link_primer *primer);
1587void bpf_link_cleanup(struct bpf_link_primer *primer);
1588void bpf_link_inc(struct bpf_link *link);
1589void bpf_link_put(struct bpf_link *link);
1590int bpf_link_new_fd(struct bpf_link *link);
1591struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1592struct bpf_link *bpf_link_get_from_fd(u32 ufd);
1593
1594int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1595int bpf_obj_get_user(const char __user *pathname, int flags);
1596
1597#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1598#define DEFINE_BPF_ITER_FUNC(target, args...) \
1599 extern int bpf_iter_ ## target(args); \
1600 int __init bpf_iter_ ## target(args) { return 0; }
1601
1602struct bpf_iter_aux_info {
1603 struct bpf_map *map;
1604};
1605
1606typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
1607 union bpf_iter_link_info *linfo,
1608 struct bpf_iter_aux_info *aux);
1609typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
1610typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
1611 struct seq_file *seq);
1612typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
1613 struct bpf_link_info *info);
1614typedef const struct bpf_func_proto *
1615(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
1616 const struct bpf_prog *prog);
1617
1618enum bpf_iter_feature {
1619 BPF_ITER_RESCHED = BIT(0),
1620};
1621
1622#define BPF_ITER_CTX_ARG_MAX 2
1623struct bpf_iter_reg {
1624 const char *target;
1625 bpf_iter_attach_target_t attach_target;
1626 bpf_iter_detach_target_t detach_target;
1627 bpf_iter_show_fdinfo_t show_fdinfo;
1628 bpf_iter_fill_link_info_t fill_link_info;
1629 bpf_iter_get_func_proto_t get_func_proto;
1630 u32 ctx_arg_info_size;
1631 u32 feature;
1632 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1633 const struct bpf_iter_seq_info *seq_info;
1634};
1635
1636struct bpf_iter_meta {
1637 __bpf_md_ptr(struct seq_file *, seq);
1638 u64 session_id;
1639 u64 seq_num;
1640};
1641
1642struct bpf_iter__bpf_map_elem {
1643 __bpf_md_ptr(struct bpf_iter_meta *, meta);
1644 __bpf_md_ptr(struct bpf_map *, map);
1645 __bpf_md_ptr(void *, key);
1646 __bpf_md_ptr(void *, value);
1647};
1648
1649int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1650void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1651bool bpf_iter_prog_supported(struct bpf_prog *prog);
1652const struct bpf_func_proto *
1653bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
1654int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
1655int bpf_iter_new_fd(struct bpf_link *link);
1656bool bpf_link_is_iter(struct bpf_link *link);
1657struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1658int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1659void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1660 struct seq_file *seq);
1661int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
1662 struct bpf_link_info *info);
1663
1664int map_set_for_each_callback_args(struct bpf_verifier_env *env,
1665 struct bpf_func_state *caller,
1666 struct bpf_func_state *callee);
1667
1668int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1669int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1670int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1671 u64 flags);
1672int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1673 u64 flags);
1674
1675int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1676
1677int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1678 void *key, void *value, u64 map_flags);
1679int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1680int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1681 void *key, void *value, u64 map_flags);
1682int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1683
1684int bpf_get_file_flag(int flags);
1685int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
1686 size_t actual_size);
1687
1688
1689
1690
1691
1692
1693
1694static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1695{
1696 const long *lsrc = src;
1697 long *ldst = dst;
1698
1699 size /= sizeof(long);
1700 while (size--)
1701 *ldst++ = *lsrc++;
1702}
1703
1704
1705int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
1706
1707#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1708void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1709#endif
1710
1711struct btf *bpf_get_btf_vmlinux(void);
1712
1713
1714struct xdp_frame;
1715struct sk_buff;
1716struct bpf_dtab_netdev;
1717struct bpf_cpu_map_entry;
1718
1719void __dev_flush(void);
1720int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1721 struct net_device *dev_rx);
1722int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
1723 struct net_device *dev_rx);
1724int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
1725 struct bpf_map *map, bool exclude_ingress);
1726int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1727 struct bpf_prog *xdp_prog);
1728int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1729 struct bpf_prog *xdp_prog, struct bpf_map *map,
1730 bool exclude_ingress);
1731
1732void __cpu_map_flush(void);
1733int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
1734 struct net_device *dev_rx);
1735int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1736 struct sk_buff *skb);
1737
1738
1739static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1740{
1741 return (attr->map_flags & BPF_F_NUMA_NODE) ?
1742 attr->numa_node : NUMA_NO_NODE;
1743}
1744
1745struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1746int array_map_alloc_check(union bpf_attr *attr);
1747
1748int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1749 union bpf_attr __user *uattr);
1750int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1751 union bpf_attr __user *uattr);
1752int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1753 const union bpf_attr *kattr,
1754 union bpf_attr __user *uattr);
1755int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1756 const union bpf_attr *kattr,
1757 union bpf_attr __user *uattr);
1758int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
1759 const union bpf_attr *kattr,
1760 union bpf_attr __user *uattr);
1761int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1762 const union bpf_attr *kattr,
1763 union bpf_attr __user *uattr);
1764bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1765 const struct bpf_prog *prog,
1766 struct bpf_insn_access_aux *info);
1767
1768static inline bool bpf_tracing_ctx_access(int off, int size,
1769 enum bpf_access_type type)
1770{
1771 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1772 return false;
1773 if (type != BPF_READ)
1774 return false;
1775 if (off % size != 0)
1776 return false;
1777 return true;
1778}
1779
1780static inline bool bpf_tracing_btf_ctx_access(int off, int size,
1781 enum bpf_access_type type,
1782 const struct bpf_prog *prog,
1783 struct bpf_insn_access_aux *info)
1784{
1785 if (!bpf_tracing_ctx_access(off, size, type))
1786 return false;
1787 return btf_ctx_access(off, size, type, prog, info);
1788}
1789
1790int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
1791 const struct btf_type *t, int off, int size,
1792 enum bpf_access_type atype,
1793 u32 *next_btf_id, enum bpf_type_flag *flag);
1794bool btf_struct_ids_match(struct bpf_verifier_log *log,
1795 const struct btf *btf, u32 id, int off,
1796 const struct btf *need_btf, u32 need_type_id);
1797
1798int btf_distill_func_proto(struct bpf_verifier_log *log,
1799 struct btf *btf,
1800 const struct btf_type *func_proto,
1801 const char *func_name,
1802 struct btf_func_model *m);
1803
1804struct bpf_reg_state;
1805int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
1806 struct bpf_reg_state *regs);
1807int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
1808 const struct btf *btf, u32 func_id,
1809 struct bpf_reg_state *regs);
1810int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1811 struct bpf_reg_state *reg);
1812int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
1813 struct btf *btf, const struct btf_type *t);
1814
1815struct bpf_prog *bpf_prog_by_id(u32 id);
1816struct bpf_link *bpf_link_by_id(u32 id);
1817
1818const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1819void bpf_task_storage_free(struct task_struct *task);
1820bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
1821const struct btf_func_model *
1822bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1823 const struct bpf_insn *insn);
1824struct bpf_core_ctx {
1825 struct bpf_verifier_log *log;
1826 const struct btf *btf;
1827};
1828
1829int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
1830 int relo_idx, void *insn);
1831
1832static inline bool unprivileged_ebpf_enabled(void)
1833{
1834 return !sysctl_unprivileged_bpf_disabled;
1835}
1836
1837#else
1838static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1839{
1840 return ERR_PTR(-EOPNOTSUPP);
1841}
1842
1843static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1844 enum bpf_prog_type type,
1845 bool attach_drv)
1846{
1847 return ERR_PTR(-EOPNOTSUPP);
1848}
1849
1850static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1851{
1852}
1853
1854static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1855{
1856}
1857
1858static inline void bpf_prog_put(struct bpf_prog *prog)
1859{
1860}
1861
1862static inline void bpf_prog_inc(struct bpf_prog *prog)
1863{
1864}
1865
1866static inline struct bpf_prog *__must_check
1867bpf_prog_inc_not_zero(struct bpf_prog *prog)
1868{
1869 return ERR_PTR(-EOPNOTSUPP);
1870}
1871
1872static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1873 const struct bpf_link_ops *ops,
1874 struct bpf_prog *prog)
1875{
1876}
1877
1878static inline int bpf_link_prime(struct bpf_link *link,
1879 struct bpf_link_primer *primer)
1880{
1881 return -EOPNOTSUPP;
1882}
1883
1884static inline int bpf_link_settle(struct bpf_link_primer *primer)
1885{
1886 return -EOPNOTSUPP;
1887}
1888
1889static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
1890{
1891}
1892
1893static inline void bpf_link_inc(struct bpf_link *link)
1894{
1895}
1896
1897static inline void bpf_link_put(struct bpf_link *link)
1898{
1899}
1900
1901static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1902{
1903 return -EOPNOTSUPP;
1904}
1905
1906static inline void __dev_flush(void)
1907{
1908}
1909
1910struct xdp_frame;
1911struct bpf_dtab_netdev;
1912struct bpf_cpu_map_entry;
1913
1914static inline
1915int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1916 struct net_device *dev_rx)
1917{
1918 return 0;
1919}
1920
1921static inline
1922int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
1923 struct net_device *dev_rx)
1924{
1925 return 0;
1926}
1927
1928static inline
1929int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
1930 struct bpf_map *map, bool exclude_ingress)
1931{
1932 return 0;
1933}
1934
1935struct sk_buff;
1936
1937static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1938 struct sk_buff *skb,
1939 struct bpf_prog *xdp_prog)
1940{
1941 return 0;
1942}
1943
1944static inline
1945int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1946 struct bpf_prog *xdp_prog, struct bpf_map *map,
1947 bool exclude_ingress)
1948{
1949 return 0;
1950}
1951
1952static inline void __cpu_map_flush(void)
1953{
1954}
1955
1956static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1957 struct xdp_frame *xdpf,
1958 struct net_device *dev_rx)
1959{
1960 return 0;
1961}
1962
1963static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1964 struct sk_buff *skb)
1965{
1966 return -EOPNOTSUPP;
1967}
1968
1969static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1970 enum bpf_prog_type type)
1971{
1972 return ERR_PTR(-EOPNOTSUPP);
1973}
1974
1975static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1976 const union bpf_attr *kattr,
1977 union bpf_attr __user *uattr)
1978{
1979 return -ENOTSUPP;
1980}
1981
1982static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1983 const union bpf_attr *kattr,
1984 union bpf_attr __user *uattr)
1985{
1986 return -ENOTSUPP;
1987}
1988
1989static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1990 const union bpf_attr *kattr,
1991 union bpf_attr __user *uattr)
1992{
1993 return -ENOTSUPP;
1994}
1995
1996static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1997 const union bpf_attr *kattr,
1998 union bpf_attr __user *uattr)
1999{
2000 return -ENOTSUPP;
2001}
2002
2003static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2004 const union bpf_attr *kattr,
2005 union bpf_attr __user *uattr)
2006{
2007 return -ENOTSUPP;
2008}
2009
2010static inline void bpf_map_put(struct bpf_map *map)
2011{
2012}
2013
2014static inline struct bpf_prog *bpf_prog_by_id(u32 id)
2015{
2016 return ERR_PTR(-ENOTSUPP);
2017}
2018
2019static inline const struct bpf_func_proto *
2020bpf_base_func_proto(enum bpf_func_id func_id)
2021{
2022 return NULL;
2023}
2024
2025static inline void bpf_task_storage_free(struct task_struct *task)
2026{
2027}
2028
2029static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2030{
2031 return false;
2032}
2033
2034static inline const struct btf_func_model *
2035bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2036 const struct bpf_insn *insn)
2037{
2038 return NULL;
2039}
2040
2041static inline bool unprivileged_ebpf_enabled(void)
2042{
2043 return false;
2044}
2045
2046#endif
2047
2048void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2049 struct btf_mod_pair *used_btfs, u32 len);
2050
2051static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
2052 enum bpf_prog_type type)
2053{
2054 return bpf_prog_get_type_dev(ufd, type, false);
2055}
2056
2057void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2058 struct bpf_map **used_maps, u32 len);
2059
2060bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
2061
2062int bpf_prog_offload_compile(struct bpf_prog *prog);
2063void bpf_prog_offload_destroy(struct bpf_prog *prog);
2064int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
2065 struct bpf_prog *prog);
2066
2067int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
2068
2069int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
2070int bpf_map_offload_update_elem(struct bpf_map *map,
2071 void *key, void *value, u64 flags);
2072int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
2073int bpf_map_offload_get_next_key(struct bpf_map *map,
2074 void *key, void *next_key);
2075
2076bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
2077
2078struct bpf_offload_dev *
2079bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
2080void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
2081void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
2082int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
2083 struct net_device *netdev);
2084void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
2085 struct net_device *netdev);
2086bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
2087
2088#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2089int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
2090
2091static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
2092{
2093 return aux->offload_requested;
2094}
2095
2096static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2097{
2098 return unlikely(map->ops == &bpf_map_offload_ops);
2099}
2100
2101struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
2102void bpf_map_offload_map_free(struct bpf_map *map);
2103int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2104 const union bpf_attr *kattr,
2105 union bpf_attr __user *uattr);
2106
2107int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
2108int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
2109int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
2110int sock_map_bpf_prog_query(const union bpf_attr *attr,
2111 union bpf_attr __user *uattr);
2112
2113void sock_map_unhash(struct sock *sk);
2114void sock_map_close(struct sock *sk, long timeout);
2115#else
2116static inline int bpf_prog_offload_init(struct bpf_prog *prog,
2117 union bpf_attr *attr)
2118{
2119 return -EOPNOTSUPP;
2120}
2121
2122static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
2123{
2124 return false;
2125}
2126
2127static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2128{
2129 return false;
2130}
2131
2132static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
2133{
2134 return ERR_PTR(-EOPNOTSUPP);
2135}
2136
2137static inline void bpf_map_offload_map_free(struct bpf_map *map)
2138{
2139}
2140
2141static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2142 const union bpf_attr *kattr,
2143 union bpf_attr __user *uattr)
2144{
2145 return -ENOTSUPP;
2146}
2147
2148#ifdef CONFIG_BPF_SYSCALL
2149static inline int sock_map_get_from_fd(const union bpf_attr *attr,
2150 struct bpf_prog *prog)
2151{
2152 return -EINVAL;
2153}
2154
2155static inline int sock_map_prog_detach(const union bpf_attr *attr,
2156 enum bpf_prog_type ptype)
2157{
2158 return -EOPNOTSUPP;
2159}
2160
2161static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
2162 u64 flags)
2163{
2164 return -EOPNOTSUPP;
2165}
2166
2167static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
2168 union bpf_attr __user *uattr)
2169{
2170 return -EINVAL;
2171}
2172#endif
2173#endif
2174
2175#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
2176void bpf_sk_reuseport_detach(struct sock *sk);
2177int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
2178 void *value);
2179int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2180 void *value, u64 map_flags);
2181#else
2182static inline void bpf_sk_reuseport_detach(struct sock *sk)
2183{
2184}
2185
2186#ifdef CONFIG_BPF_SYSCALL
2187static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
2188 void *key, void *value)
2189{
2190 return -EOPNOTSUPP;
2191}
2192
2193static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
2194 void *key, void *value,
2195 u64 map_flags)
2196{
2197 return -EOPNOTSUPP;
2198}
2199#endif
2200#endif
2201
2202
2203extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
2204extern const struct bpf_func_proto bpf_map_update_elem_proto;
2205extern const struct bpf_func_proto bpf_map_delete_elem_proto;
2206extern const struct bpf_func_proto bpf_map_push_elem_proto;
2207extern const struct bpf_func_proto bpf_map_pop_elem_proto;
2208extern const struct bpf_func_proto bpf_map_peek_elem_proto;
2209
2210extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
2211extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
2212extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
2213extern const struct bpf_func_proto bpf_tail_call_proto;
2214extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
2215extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
2216extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
2217extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
2218extern const struct bpf_func_proto bpf_get_current_comm_proto;
2219extern const struct bpf_func_proto bpf_get_stackid_proto;
2220extern const struct bpf_func_proto bpf_get_stack_proto;
2221extern const struct bpf_func_proto bpf_get_task_stack_proto;
2222extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
2223extern const struct bpf_func_proto bpf_get_stack_proto_pe;
2224extern const struct bpf_func_proto bpf_sock_map_update_proto;
2225extern const struct bpf_func_proto bpf_sock_hash_update_proto;
2226extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
2227extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
2228extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
2229extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
2230extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
2231extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
2232extern const struct bpf_func_proto bpf_spin_lock_proto;
2233extern const struct bpf_func_proto bpf_spin_unlock_proto;
2234extern const struct bpf_func_proto bpf_get_local_storage_proto;
2235extern const struct bpf_func_proto bpf_strtol_proto;
2236extern const struct bpf_func_proto bpf_strtoul_proto;
2237extern const struct bpf_func_proto bpf_tcp_sock_proto;
2238extern const struct bpf_func_proto bpf_jiffies64_proto;
2239extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
2240extern const struct bpf_func_proto bpf_event_output_data_proto;
2241extern const struct bpf_func_proto bpf_ringbuf_output_proto;
2242extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
2243extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
2244extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
2245extern const struct bpf_func_proto bpf_ringbuf_query_proto;
2246extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
2247extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
2248extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
2249extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
2250extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
2251extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
2252extern const struct bpf_func_proto bpf_copy_from_user_proto;
2253extern const struct bpf_func_proto bpf_snprintf_btf_proto;
2254extern const struct bpf_func_proto bpf_snprintf_proto;
2255extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
2256extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
2257extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
2258extern const struct bpf_func_proto bpf_sock_from_file_proto;
2259extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
2260extern const struct bpf_func_proto bpf_task_storage_get_proto;
2261extern const struct bpf_func_proto bpf_task_storage_delete_proto;
2262extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
2263extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
2264extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
2265extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
2266extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
2267extern const struct bpf_func_proto bpf_find_vma_proto;
2268extern const struct bpf_func_proto bpf_loop_proto;
2269extern const struct bpf_func_proto bpf_strncmp_proto;
2270extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
2271
2272const struct bpf_func_proto *tracing_prog_func_proto(
2273 enum bpf_func_id func_id, const struct bpf_prog *prog);
2274
2275
2276void bpf_user_rnd_init_once(void);
2277u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2278u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2279
2280#if defined(CONFIG_NET)
2281bool bpf_sock_common_is_valid_access(int off, int size,
2282 enum bpf_access_type type,
2283 struct bpf_insn_access_aux *info);
2284bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2285 struct bpf_insn_access_aux *info);
2286u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2287 const struct bpf_insn *si,
2288 struct bpf_insn *insn_buf,
2289 struct bpf_prog *prog,
2290 u32 *target_size);
2291#else
2292static inline bool bpf_sock_common_is_valid_access(int off, int size,
2293 enum bpf_access_type type,
2294 struct bpf_insn_access_aux *info)
2295{
2296 return false;
2297}
2298static inline bool bpf_sock_is_valid_access(int off, int size,
2299 enum bpf_access_type type,
2300 struct bpf_insn_access_aux *info)
2301{
2302 return false;
2303}
2304static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2305 const struct bpf_insn *si,
2306 struct bpf_insn *insn_buf,
2307 struct bpf_prog *prog,
2308 u32 *target_size)
2309{
2310 return 0;
2311}
2312#endif
2313
2314#ifdef CONFIG_INET
2315struct sk_reuseport_kern {
2316 struct sk_buff *skb;
2317 struct sock *sk;
2318 struct sock *selected_sk;
2319 struct sock *migrating_sk;
2320 void *data_end;
2321 u32 hash;
2322 u32 reuseport_id;
2323 bool bind_inany;
2324};
2325bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2326 struct bpf_insn_access_aux *info);
2327
2328u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2329 const struct bpf_insn *si,
2330 struct bpf_insn *insn_buf,
2331 struct bpf_prog *prog,
2332 u32 *target_size);
2333
2334bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2335 struct bpf_insn_access_aux *info);
2336
2337u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2338 const struct bpf_insn *si,
2339 struct bpf_insn *insn_buf,
2340 struct bpf_prog *prog,
2341 u32 *target_size);
2342#else
2343static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
2344 enum bpf_access_type type,
2345 struct bpf_insn_access_aux *info)
2346{
2347 return false;
2348}
2349
2350static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2351 const struct bpf_insn *si,
2352 struct bpf_insn *insn_buf,
2353 struct bpf_prog *prog,
2354 u32 *target_size)
2355{
2356 return 0;
2357}
2358static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
2359 enum bpf_access_type type,
2360 struct bpf_insn_access_aux *info)
2361{
2362 return false;
2363}
2364
2365static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2366 const struct bpf_insn *si,
2367 struct bpf_insn *insn_buf,
2368 struct bpf_prog *prog,
2369 u32 *target_size)
2370{
2371 return 0;
2372}
2373#endif
2374
2375enum bpf_text_poke_type {
2376 BPF_MOD_CALL,
2377 BPF_MOD_JUMP,
2378};
2379
2380int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2381 void *addr1, void *addr2);
2382
2383void *bpf_arch_text_copy(void *dst, void *src, size_t len);
2384
2385struct btf_id_set;
2386bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
2387
2388#define MAX_BPRINTF_VARARGS 12
2389
2390int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
2391 u32 **bin_buf, u32 num_args);
2392void bpf_bprintf_cleanup(void);
2393
2394#endif
2395