1
2
3
4#ifndef _LINUX_BPF_H
5#define _LINUX_BPF_H 1
6
7#include <uapi/linux/bpf.h>
8
9#include <linux/workqueue.h>
10#include <linux/file.h>
11#include <linux/percpu.h>
12#include <linux/err.h>
13#include <linux/rbtree_latch.h>
14#include <linux/numa.h>
15#include <linux/mm_types.h>
16#include <linux/wait.h>
17#include <linux/refcount.h>
18#include <linux/mutex.h>
19#include <linux/module.h>
20#include <linux/kallsyms.h>
21#include <linux/capability.h>
22#include <linux/sched/mm.h>
23#include <linux/slab.h>
24#include <linux/percpu-refcount.h>
25#include <linux/bpfptr.h>
26
27struct bpf_verifier_env;
28struct bpf_verifier_log;
29struct perf_event;
30struct bpf_prog;
31struct bpf_prog_aux;
32struct bpf_map;
33struct sock;
34struct seq_file;
35struct btf;
36struct btf_type;
37struct exception_table_entry;
38struct seq_operations;
39struct bpf_iter_aux_info;
40struct bpf_local_storage;
41struct bpf_local_storage_map;
42struct kobject;
43struct mem_cgroup;
44struct module;
45struct bpf_func_state;
46
47extern struct idr btf_idr;
48extern spinlock_t btf_idr_lock;
49extern struct kobject *btf_kobj;
50
51typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
52typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
53 struct bpf_iter_aux_info *aux);
54typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
55struct bpf_iter_seq_info {
56 const struct seq_operations *seq_ops;
57 bpf_iter_init_seq_priv_t init_seq_private;
58 bpf_iter_fini_seq_priv_t fini_seq_private;
59 u32 seq_priv_size;
60};
61
62
63struct bpf_map_ops {
64
65 int (*map_alloc_check)(union bpf_attr *attr);
66 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
67 void (*map_release)(struct bpf_map *map, struct file *map_file);
68 void (*map_free)(struct bpf_map *map);
69 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
70 void (*map_release_uref)(struct bpf_map *map);
71 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
72 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
73 union bpf_attr __user *uattr);
74 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
75 void *value, u64 flags);
76 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
77 const union bpf_attr *attr,
78 union bpf_attr __user *uattr);
79 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
80 union bpf_attr __user *uattr);
81 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
82 union bpf_attr __user *uattr);
83
84
85 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
86 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
87 int (*map_delete_elem)(struct bpf_map *map, void *key);
88 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
89 int (*map_pop_elem)(struct bpf_map *map, void *value);
90 int (*map_peek_elem)(struct bpf_map *map, void *value);
91
92
93 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
94 int fd);
95 void (*map_fd_put_ptr)(void *ptr);
96 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
97 u32 (*map_fd_sys_lookup_elem)(void *ptr);
98 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
99 struct seq_file *m);
100 int (*map_check_btf)(const struct bpf_map *map,
101 const struct btf *btf,
102 const struct btf_type *key_type,
103 const struct btf_type *value_type);
104
105
106 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
107 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
108 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
109 struct bpf_prog *new);
110
111
112 int (*map_direct_value_addr)(const struct bpf_map *map,
113 u64 *imm, u32 off);
114 int (*map_direct_value_meta)(const struct bpf_map *map,
115 u64 imm, u32 *off);
116 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
117 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
118 struct poll_table_struct *pts);
119
120
121 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
122 void *owner, u32 size);
123 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
124 void *owner, u32 size);
125 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
126
127
128 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
129
130
131
132
133
134
135
136
137
138
139 bool (*map_meta_equal)(const struct bpf_map *meta0,
140 const struct bpf_map *meta1);
141
142
143 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
144 struct bpf_func_state *caller,
145 struct bpf_func_state *callee);
146 int (*map_for_each_callback)(struct bpf_map *map,
147 bpf_callback_t callback_fn,
148 void *callback_ctx, u64 flags);
149
150
151 const char * const map_btf_name;
152 int *map_btf_id;
153
154
155 const struct bpf_iter_seq_info *iter_seq_info;
156};
157
158struct bpf_map {
159
160
161
162 const struct bpf_map_ops *ops ____cacheline_aligned;
163 struct bpf_map *inner_map_meta;
164#ifdef CONFIG_SECURITY
165 void *security;
166#endif
167 enum bpf_map_type map_type;
168 u32 key_size;
169 u32 value_size;
170 u32 max_entries;
171 u64 map_extra;
172 u32 map_flags;
173 int spin_lock_off;
174 int timer_off;
175 u32 id;
176 int numa_node;
177 u32 btf_key_type_id;
178 u32 btf_value_type_id;
179 u32 btf_vmlinux_value_type_id;
180 struct btf *btf;
181#ifdef CONFIG_MEMCG_KMEM
182 struct mem_cgroup *memcg;
183#endif
184 char name[BPF_OBJ_NAME_LEN];
185 bool bypass_spec_v1;
186 bool frozen;
187
188
189
190
191
192 atomic64_t refcnt ____cacheline_aligned;
193 atomic64_t usercnt;
194 struct work_struct work;
195 struct mutex freeze_mutex;
196 atomic64_t writecnt;
197};
198
199static inline bool map_value_has_spin_lock(const struct bpf_map *map)
200{
201 return map->spin_lock_off >= 0;
202}
203
204static inline bool map_value_has_timer(const struct bpf_map *map)
205{
206 return map->timer_off >= 0;
207}
208
209static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
210{
211 if (unlikely(map_value_has_spin_lock(map)))
212 memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
213 if (unlikely(map_value_has_timer(map)))
214 memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
215}
216
217
218static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
219{
220 u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;
221
222 if (unlikely(map_value_has_spin_lock(map))) {
223 s_off = map->spin_lock_off;
224 s_sz = sizeof(struct bpf_spin_lock);
225 }
226 if (unlikely(map_value_has_timer(map))) {
227 t_off = map->timer_off;
228 t_sz = sizeof(struct bpf_timer);
229 }
230
231 if (unlikely(s_sz || t_sz)) {
232 if (s_off < t_off || !s_sz) {
233 swap(s_off, t_off);
234 swap(s_sz, t_sz);
235 }
236 memcpy(dst, src, t_off);
237 memcpy(dst + t_off + t_sz,
238 src + t_off + t_sz,
239 s_off - t_off - t_sz);
240 memcpy(dst + s_off + s_sz,
241 src + s_off + s_sz,
242 map->value_size - s_off - s_sz);
243 } else {
244 memcpy(dst, src, map->value_size);
245 }
246}
247void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
248 bool lock_src);
249void bpf_timer_cancel_and_free(void *timer);
250int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
251
252struct bpf_offload_dev;
253struct bpf_offloaded_map;
254
255struct bpf_map_dev_ops {
256 int (*map_get_next_key)(struct bpf_offloaded_map *map,
257 void *key, void *next_key);
258 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
259 void *key, void *value);
260 int (*map_update_elem)(struct bpf_offloaded_map *map,
261 void *key, void *value, u64 flags);
262 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
263};
264
265struct bpf_offloaded_map {
266 struct bpf_map map;
267 struct net_device *netdev;
268 const struct bpf_map_dev_ops *dev_ops;
269 void *dev_priv;
270 struct list_head offloads;
271};
272
273static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
274{
275 return container_of(map, struct bpf_offloaded_map, map);
276}
277
278static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
279{
280 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
281}
282
283static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
284{
285 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
286 map->ops->map_seq_show_elem;
287}
288
289int map_check_no_btf(const struct bpf_map *map,
290 const struct btf *btf,
291 const struct btf_type *key_type,
292 const struct btf_type *value_type);
293
294bool bpf_map_meta_equal(const struct bpf_map *meta0,
295 const struct bpf_map *meta1);
296
297extern const struct bpf_map_ops bpf_map_offload_ops;
298
299
300
301
302
303
304
305
306
307#define BPF_BASE_TYPE_BITS 8
308
309enum bpf_type_flag {
310
311 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
312
313
314
315
316 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
317
318
319
320
321 MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
322
323 __BPF_TYPE_LAST_FLAG = MEM_ALLOC,
324};
325
326
327#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
328
329
330#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
331
332
333enum bpf_arg_type {
334 ARG_DONTCARE = 0,
335
336
337
338
339 ARG_CONST_MAP_PTR,
340 ARG_PTR_TO_MAP_KEY,
341 ARG_PTR_TO_MAP_VALUE,
342 ARG_PTR_TO_UNINIT_MAP_VALUE,
343
344
345
346
347 ARG_PTR_TO_MEM,
348 ARG_PTR_TO_UNINIT_MEM,
349
350
351
352
353 ARG_CONST_SIZE,
354 ARG_CONST_SIZE_OR_ZERO,
355
356 ARG_PTR_TO_CTX,
357 ARG_ANYTHING,
358 ARG_PTR_TO_SPIN_LOCK,
359 ARG_PTR_TO_SOCK_COMMON,
360 ARG_PTR_TO_INT,
361 ARG_PTR_TO_LONG,
362 ARG_PTR_TO_SOCKET,
363 ARG_PTR_TO_BTF_ID,
364 ARG_PTR_TO_ALLOC_MEM,
365 ARG_CONST_ALLOC_SIZE_OR_ZERO,
366 ARG_PTR_TO_BTF_ID_SOCK_COMMON,
367 ARG_PTR_TO_PERCPU_BTF_ID,
368 ARG_PTR_TO_FUNC,
369 ARG_PTR_TO_STACK,
370 ARG_PTR_TO_CONST_STR,
371 ARG_PTR_TO_TIMER,
372 __BPF_ARG_TYPE_MAX,
373
374
375 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
376 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
377 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
378 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
379 ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
380 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
381
382
383
384
385 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
386};
387static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
388
389
390enum bpf_return_type {
391 RET_INTEGER,
392 RET_VOID,
393 RET_PTR_TO_MAP_VALUE,
394 RET_PTR_TO_SOCKET,
395 RET_PTR_TO_TCP_SOCK,
396 RET_PTR_TO_SOCK_COMMON,
397 RET_PTR_TO_ALLOC_MEM,
398 RET_PTR_TO_MEM_OR_BTF_ID,
399 RET_PTR_TO_BTF_ID,
400 __BPF_RET_TYPE_MAX,
401
402
403 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
404 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
405 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
406 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
407 RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
408 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
409
410
411
412
413 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
414};
415static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
416
417
418
419
420
421struct bpf_func_proto {
422 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
423 bool gpl_only;
424 bool pkt_access;
425 enum bpf_return_type ret_type;
426 union {
427 struct {
428 enum bpf_arg_type arg1_type;
429 enum bpf_arg_type arg2_type;
430 enum bpf_arg_type arg3_type;
431 enum bpf_arg_type arg4_type;
432 enum bpf_arg_type arg5_type;
433 };
434 enum bpf_arg_type arg_type[5];
435 };
436 union {
437 struct {
438 u32 *arg1_btf_id;
439 u32 *arg2_btf_id;
440 u32 *arg3_btf_id;
441 u32 *arg4_btf_id;
442 u32 *arg5_btf_id;
443 };
444 u32 *arg_btf_id[5];
445 };
446 int *ret_btf_id;
447 bool (*allowed)(const struct bpf_prog *prog);
448};
449
450
451
452
453
454struct bpf_context;
455
456enum bpf_access_type {
457 BPF_READ = 1,
458 BPF_WRITE = 2
459};
460
461
462
463
464
465
466
467
468
469
470
471enum bpf_reg_type {
472 NOT_INIT = 0,
473 SCALAR_VALUE,
474 PTR_TO_CTX,
475 CONST_PTR_TO_MAP,
476 PTR_TO_MAP_VALUE,
477 PTR_TO_MAP_KEY,
478 PTR_TO_STACK,
479 PTR_TO_PACKET_META,
480 PTR_TO_PACKET,
481 PTR_TO_PACKET_END,
482 PTR_TO_FLOW_KEYS,
483 PTR_TO_SOCKET,
484 PTR_TO_SOCK_COMMON,
485 PTR_TO_TCP_SOCK,
486 PTR_TO_TP_BUFFER,
487 PTR_TO_XDP_SOCK,
488
489
490
491
492
493
494
495
496
497
498 PTR_TO_BTF_ID,
499
500
501
502
503 PTR_TO_MEM,
504 PTR_TO_BUF,
505 PTR_TO_PERCPU_BTF_ID,
506 PTR_TO_FUNC,
507 __BPF_REG_TYPE_MAX,
508
509
510 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
511 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
512 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
513 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
514 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
515
516
517
518
519 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
520};
521static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
522
523
524
525
526struct bpf_insn_access_aux {
527 enum bpf_reg_type reg_type;
528 union {
529 int ctx_field_size;
530 struct {
531 struct btf *btf;
532 u32 btf_id;
533 };
534 };
535 struct bpf_verifier_log *log;
536};
537
538static inline void
539bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
540{
541 aux->ctx_field_size = size;
542}
543
544static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
545{
546 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
547 insn->src_reg == BPF_PSEUDO_FUNC;
548}
549
550struct bpf_prog_ops {
551 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
552 union bpf_attr __user *uattr);
553};
554
555struct bpf_verifier_ops {
556
557 const struct bpf_func_proto *
558 (*get_func_proto)(enum bpf_func_id func_id,
559 const struct bpf_prog *prog);
560
561
562
563
564 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
565 const struct bpf_prog *prog,
566 struct bpf_insn_access_aux *info);
567 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
568 const struct bpf_prog *prog);
569 int (*gen_ld_abs)(const struct bpf_insn *orig,
570 struct bpf_insn *insn_buf);
571 u32 (*convert_ctx_access)(enum bpf_access_type type,
572 const struct bpf_insn *src,
573 struct bpf_insn *dst,
574 struct bpf_prog *prog, u32 *target_size);
575 int (*btf_struct_access)(struct bpf_verifier_log *log,
576 const struct btf *btf,
577 const struct btf_type *t, int off, int size,
578 enum bpf_access_type atype,
579 u32 *next_btf_id);
580 bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner);
581};
582
583struct bpf_prog_offload_ops {
584
585 int (*insn_hook)(struct bpf_verifier_env *env,
586 int insn_idx, int prev_insn_idx);
587 int (*finalize)(struct bpf_verifier_env *env);
588
589 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
590 struct bpf_insn *insn);
591 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
592
593 int (*prepare)(struct bpf_prog *prog);
594 int (*translate)(struct bpf_prog *prog);
595 void (*destroy)(struct bpf_prog *prog);
596};
597
598struct bpf_prog_offload {
599 struct bpf_prog *prog;
600 struct net_device *netdev;
601 struct bpf_offload_dev *offdev;
602 void *dev_priv;
603 struct list_head offloads;
604 bool dev_state;
605 bool opt_failed;
606 void *jited_image;
607 u32 jited_len;
608};
609
610enum bpf_cgroup_storage_type {
611 BPF_CGROUP_STORAGE_SHARED,
612 BPF_CGROUP_STORAGE_PERCPU,
613 __BPF_CGROUP_STORAGE_MAX
614};
615
616#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
617
618
619
620
621#define MAX_BPF_FUNC_ARGS 12
622
623
624
625
626#define MAX_BPF_FUNC_REG_ARGS 5
627
628struct btf_func_model {
629 u8 ret_size;
630 u8 nr_args;
631 u8 arg_size[MAX_BPF_FUNC_ARGS];
632};
633
634
635
636
637
638#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
639
640
641
642#define BPF_TRAMP_F_CALL_ORIG BIT(1)
643
644
645
646#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
647
648
649
650#define BPF_TRAMP_F_IP_ARG BIT(3)
651
652#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
653
654
655
656
657#define BPF_MAX_TRAMP_PROGS 38
658
659struct bpf_tramp_progs {
660 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
661 int nr_progs;
662};
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684struct bpf_tramp_image;
685int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
686 const struct btf_func_model *m, u32 flags,
687 struct bpf_tramp_progs *tprogs,
688 void *orig_call);
689
690u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
691void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
692u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
693void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
694void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
695void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
696
697struct bpf_ksym {
698 unsigned long start;
699 unsigned long end;
700 char name[KSYM_NAME_LEN];
701 struct list_head lnode;
702 struct latch_tree_node tnode;
703 bool prog;
704};
705
706enum bpf_tramp_prog_type {
707 BPF_TRAMP_FENTRY,
708 BPF_TRAMP_FEXIT,
709 BPF_TRAMP_MODIFY_RETURN,
710 BPF_TRAMP_MAX,
711 BPF_TRAMP_REPLACE,
712};
713
714struct bpf_tramp_image {
715 void *image;
716 struct bpf_ksym ksym;
717 struct percpu_ref pcref;
718 void *ip_after_call;
719 void *ip_epilogue;
720 union {
721 struct rcu_head rcu;
722 struct work_struct work;
723 };
724};
725
726struct bpf_trampoline {
727
728 struct hlist_node hlist;
729
730 struct mutex mutex;
731 refcount_t refcnt;
732 u64 key;
733 struct {
734 struct btf_func_model model;
735 void *addr;
736 bool ftrace_managed;
737 } func;
738
739
740
741
742 struct bpf_prog *extension_prog;
743
744 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
745
746 int progs_cnt[BPF_TRAMP_MAX];
747
748 struct bpf_tramp_image *cur_image;
749 u64 selector;
750 struct module *mod;
751};
752
753struct bpf_attach_target_info {
754 struct btf_func_model fmodel;
755 long tgt_addr;
756 const char *tgt_name;
757 const struct btf_type *tgt_type;
758};
759
760#define BPF_DISPATCHER_MAX 48
761
762struct bpf_dispatcher_prog {
763 struct bpf_prog *prog;
764 refcount_t users;
765};
766
767struct bpf_dispatcher {
768
769 struct mutex mutex;
770 void *func;
771 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
772 int num_progs;
773 void *image;
774 u32 image_off;
775 struct bpf_ksym ksym;
776};
777
778static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
779 const void *ctx,
780 const struct bpf_insn *insnsi,
781 unsigned int (*bpf_func)(const void *,
782 const struct bpf_insn *))
783{
784 return bpf_func(ctx, insnsi);
785}
786#ifdef CONFIG_BPF_JIT
787int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
788int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
789struct bpf_trampoline *bpf_trampoline_get(u64 key,
790 struct bpf_attach_target_info *tgt_info);
791void bpf_trampoline_put(struct bpf_trampoline *tr);
792int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
793#define BPF_DISPATCHER_INIT(_name) { \
794 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
795 .func = &_name##_func, \
796 .progs = {}, \
797 .num_progs = 0, \
798 .image = NULL, \
799 .image_off = 0, \
800 .ksym = { \
801 .name = #_name, \
802 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
803 }, \
804}
805
806#define DEFINE_BPF_DISPATCHER(name) \
807 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
808 const void *ctx, \
809 const struct bpf_insn *insnsi, \
810 unsigned int (*bpf_func)(const void *, \
811 const struct bpf_insn *)) \
812 { \
813 return bpf_func(ctx, insnsi); \
814 } \
815 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
816 struct bpf_dispatcher bpf_dispatcher_##name = \
817 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
818#define DECLARE_BPF_DISPATCHER(name) \
819 unsigned int bpf_dispatcher_##name##_func( \
820 const void *ctx, \
821 const struct bpf_insn *insnsi, \
822 unsigned int (*bpf_func)(const void *, \
823 const struct bpf_insn *)); \
824 extern struct bpf_dispatcher bpf_dispatcher_##name;
825#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
826#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
827void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
828 struct bpf_prog *to);
829
830void *bpf_jit_alloc_exec_page(void);
831void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
832void bpf_image_ksym_del(struct bpf_ksym *ksym);
833void bpf_ksym_add(struct bpf_ksym *ksym);
834void bpf_ksym_del(struct bpf_ksym *ksym);
835int bpf_jit_charge_modmem(u32 pages);
836void bpf_jit_uncharge_modmem(u32 pages);
837bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
838#else
839static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
840 struct bpf_trampoline *tr)
841{
842 return -ENOTSUPP;
843}
844static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
845 struct bpf_trampoline *tr)
846{
847 return -ENOTSUPP;
848}
849static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
850 struct bpf_attach_target_info *tgt_info)
851{
852 return ERR_PTR(-EOPNOTSUPP);
853}
854static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
855#define DEFINE_BPF_DISPATCHER(name)
856#define DECLARE_BPF_DISPATCHER(name)
857#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
858#define BPF_DISPATCHER_PTR(name) NULL
859static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
860 struct bpf_prog *from,
861 struct bpf_prog *to) {}
862static inline bool is_bpf_image_address(unsigned long address)
863{
864 return false;
865}
866static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
867{
868 return false;
869}
870#endif
871
872struct bpf_func_info_aux {
873 u16 linkage;
874 bool unreliable;
875};
876
877enum bpf_jit_poke_reason {
878 BPF_POKE_REASON_TAIL_CALL,
879};
880
881
882struct bpf_jit_poke_descriptor {
883 void *tailcall_target;
884 void *tailcall_bypass;
885 void *bypass_addr;
886 void *aux;
887 union {
888 struct {
889 struct bpf_map *map;
890 u32 key;
891 } tail_call;
892 };
893 bool tailcall_target_stable;
894 u8 adj_off;
895 u16 reason;
896 u32 insn_idx;
897};
898
899
900struct bpf_ctx_arg_aux {
901 u32 offset;
902 enum bpf_reg_type reg_type;
903 u32 btf_id;
904};
905
906struct btf_mod_pair {
907 struct btf *btf;
908 struct module *module;
909};
910
911struct bpf_kfunc_desc_tab;
912
913struct bpf_prog_aux {
914 atomic64_t refcnt;
915 u32 used_map_cnt;
916 u32 used_btf_cnt;
917 u32 max_ctx_offset;
918 u32 max_pkt_offset;
919 u32 max_tp_access;
920 u32 stack_depth;
921 u32 id;
922 u32 func_cnt;
923 u32 func_idx;
924 u32 attach_btf_id;
925 u32 ctx_arg_info_size;
926 u32 max_rdonly_access;
927 u32 max_rdwr_access;
928 struct btf *attach_btf;
929 const struct bpf_ctx_arg_aux *ctx_arg_info;
930 struct mutex dst_mutex;
931 struct bpf_prog *dst_prog;
932 struct bpf_trampoline *dst_trampoline;
933 enum bpf_prog_type saved_dst_prog_type;
934 enum bpf_attach_type saved_dst_attach_type;
935 bool verifier_zext;
936 bool offload_requested;
937 bool attach_btf_trace;
938 bool func_proto_unreliable;
939 bool sleepable;
940 bool tail_call_reachable;
941 struct hlist_node tramp_hlist;
942
943 const struct btf_type *attach_func_proto;
944
945 const char *attach_func_name;
946 struct bpf_prog **func;
947 void *jit_data;
948 struct bpf_jit_poke_descriptor *poke_tab;
949 struct bpf_kfunc_desc_tab *kfunc_tab;
950 struct bpf_kfunc_btf_tab *kfunc_btf_tab;
951 u32 size_poke_tab;
952 struct bpf_ksym ksym;
953 const struct bpf_prog_ops *ops;
954 struct bpf_map **used_maps;
955 struct mutex used_maps_mutex;
956 struct btf_mod_pair *used_btfs;
957 struct bpf_prog *prog;
958 struct user_struct *user;
959 u64 load_time;
960 u32 verified_insns;
961 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
962 char name[BPF_OBJ_NAME_LEN];
963#ifdef CONFIG_SECURITY
964 void *security;
965#endif
966 struct bpf_prog_offload *offload;
967 struct btf *btf;
968 struct bpf_func_info *func_info;
969 struct bpf_func_info_aux *func_info_aux;
970
971
972
973
974
975
976 struct bpf_line_info *linfo;
977
978
979
980
981
982
983
984 void **jited_linfo;
985 u32 func_info_cnt;
986 u32 nr_linfo;
987
988
989
990
991 u32 linfo_idx;
992 u32 num_exentries;
993 struct exception_table_entry *extable;
994 union {
995 struct work_struct work;
996 struct rcu_head rcu;
997 };
998};
999
1000struct bpf_array_aux {
1001
1002
1003
1004
1005
1006 struct {
1007 spinlock_t lock;
1008 enum bpf_prog_type type;
1009 bool jited;
1010 } owner;
1011
1012 struct list_head poke_progs;
1013 struct bpf_map *map;
1014 struct mutex poke_mutex;
1015 struct work_struct work;
1016};
1017
1018struct bpf_link {
1019 atomic64_t refcnt;
1020 u32 id;
1021 enum bpf_link_type type;
1022 const struct bpf_link_ops *ops;
1023 struct bpf_prog *prog;
1024 struct work_struct work;
1025};
1026
1027struct bpf_link_ops {
1028 void (*release)(struct bpf_link *link);
1029 void (*dealloc)(struct bpf_link *link);
1030 int (*detach)(struct bpf_link *link);
1031 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1032 struct bpf_prog *old_prog);
1033 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1034 int (*fill_link_info)(const struct bpf_link *link,
1035 struct bpf_link_info *info);
1036};
1037
1038struct bpf_link_primer {
1039 struct bpf_link *link;
1040 struct file *file;
1041 int fd;
1042 u32 id;
1043};
1044
1045struct bpf_struct_ops_value;
1046struct btf_member;
1047
1048#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1049struct bpf_struct_ops {
1050 const struct bpf_verifier_ops *verifier_ops;
1051 int (*init)(struct btf *btf);
1052 int (*check_member)(const struct btf_type *t,
1053 const struct btf_member *member);
1054 int (*init_member)(const struct btf_type *t,
1055 const struct btf_member *member,
1056 void *kdata, const void *udata);
1057 int (*reg)(void *kdata);
1058 void (*unreg)(void *kdata);
1059 const struct btf_type *type;
1060 const struct btf_type *value_type;
1061 const char *name;
1062 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1063 u32 type_id;
1064 u32 value_id;
1065};
1066
1067#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1068#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1069const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
1070void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
1071bool bpf_struct_ops_get(const void *kdata);
1072void bpf_struct_ops_put(const void *kdata);
1073int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1074 void *value);
1075int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
1076 struct bpf_prog *prog,
1077 const struct btf_func_model *model,
1078 void *image, void *image_end);
1079static inline bool bpf_try_module_get(const void *data, struct module *owner)
1080{
1081 if (owner == BPF_MODULE_OWNER)
1082 return bpf_struct_ops_get(data);
1083 else
1084 return try_module_get(owner);
1085}
1086static inline void bpf_module_put(const void *data, struct module *owner)
1087{
1088 if (owner == BPF_MODULE_OWNER)
1089 bpf_struct_ops_put(data);
1090 else
1091 module_put(owner);
1092}
1093
1094#ifdef CONFIG_NET
1095
1096struct bpf_dummy_ops_state {
1097 int val;
1098};
1099
1100struct bpf_dummy_ops {
1101 int (*test_1)(struct bpf_dummy_ops_state *cb);
1102 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1103 char a3, unsigned long a4);
1104};
1105
1106int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1107 union bpf_attr __user *uattr);
1108#endif
1109#else
1110static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
1111{
1112 return NULL;
1113}
1114static inline void bpf_struct_ops_init(struct btf *btf,
1115 struct bpf_verifier_log *log)
1116{
1117}
1118static inline bool bpf_try_module_get(const void *data, struct module *owner)
1119{
1120 return try_module_get(owner);
1121}
1122static inline void bpf_module_put(const void *data, struct module *owner)
1123{
1124 module_put(owner);
1125}
1126static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1127 void *key,
1128 void *value)
1129{
1130 return -EINVAL;
1131}
1132#endif
1133
1134struct bpf_array {
1135 struct bpf_map map;
1136 u32 elem_size;
1137 u32 index_mask;
1138 struct bpf_array_aux *aux;
1139 union {
1140 char value[0] __aligned(8);
1141 void *ptrs[0] __aligned(8);
1142 void __percpu *pptrs[0] __aligned(8);
1143 };
1144};
1145
1146#define BPF_COMPLEXITY_LIMIT_INSNS 1000000
1147#define MAX_TAIL_CALL_CNT 33
1148
1149#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
1150 BPF_F_RDONLY_PROG | \
1151 BPF_F_WRONLY | \
1152 BPF_F_WRONLY_PROG)
1153
1154#define BPF_MAP_CAN_READ BIT(0)
1155#define BPF_MAP_CAN_WRITE BIT(1)
1156
1157static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1158{
1159 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1160
1161
1162
1163
1164 if (access_flags & BPF_F_RDONLY_PROG)
1165 return BPF_MAP_CAN_READ;
1166 else if (access_flags & BPF_F_WRONLY_PROG)
1167 return BPF_MAP_CAN_WRITE;
1168 else
1169 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1170}
1171
1172static inline bool bpf_map_flags_access_ok(u32 access_flags)
1173{
1174 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1175 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1176}
1177
1178struct bpf_event_entry {
1179 struct perf_event *event;
1180 struct file *perf_file;
1181 struct file *map_file;
1182 struct rcu_head rcu;
1183};
1184
1185bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
1186int bpf_prog_calc_tag(struct bpf_prog *fp);
1187
1188const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1189const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
1190
1191typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1192 unsigned long off, unsigned long len);
1193typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1194 const struct bpf_insn *src,
1195 struct bpf_insn *dst,
1196 struct bpf_prog *prog,
1197 u32 *target_size);
1198
1199u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1200 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214struct bpf_prog_array_item {
1215 struct bpf_prog *prog;
1216 union {
1217 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1218 u64 bpf_cookie;
1219 };
1220};
1221
1222struct bpf_prog_array {
1223 struct rcu_head rcu;
1224 struct bpf_prog_array_item items[];
1225};
1226
1227struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
1228void bpf_prog_array_free(struct bpf_prog_array *progs);
1229int bpf_prog_array_length(struct bpf_prog_array *progs);
1230bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1231int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
1232 __u32 __user *prog_ids, u32 cnt);
1233
1234void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
1235 struct bpf_prog *old_prog);
1236int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1237int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1238 struct bpf_prog *prog);
1239int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1240 u32 *prog_ids, u32 request_cnt,
1241 u32 *prog_cnt);
1242int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1243 struct bpf_prog *exclude_prog,
1244 struct bpf_prog *include_prog,
1245 u64 bpf_cookie,
1246 struct bpf_prog_array **new_array);
1247
1248struct bpf_run_ctx {};
1249
1250struct bpf_cg_run_ctx {
1251 struct bpf_run_ctx run_ctx;
1252 const struct bpf_prog_array_item *prog_item;
1253};
1254
1255struct bpf_trace_run_ctx {
1256 struct bpf_run_ctx run_ctx;
1257 u64 bpf_cookie;
1258};
1259
1260static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
1261{
1262 struct bpf_run_ctx *old_ctx = NULL;
1263
1264#ifdef CONFIG_BPF_SYSCALL
1265 old_ctx = current->bpf_ctx;
1266 current->bpf_ctx = new_ctx;
1267#endif
1268 return old_ctx;
1269}
1270
1271static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
1272{
1273#ifdef CONFIG_BPF_SYSCALL
1274 current->bpf_ctx = old_ctx;
1275#endif
1276}
1277
1278
1279#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
1280
1281#define BPF_RET_SET_CN (1 << 0)
1282
1283typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
1284
1285static __always_inline u32
1286BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
1287 const void *ctx, bpf_prog_run_fn run_prog,
1288 u32 *ret_flags)
1289{
1290 const struct bpf_prog_array_item *item;
1291 const struct bpf_prog *prog;
1292 const struct bpf_prog_array *array;
1293 struct bpf_run_ctx *old_run_ctx;
1294 struct bpf_cg_run_ctx run_ctx;
1295 u32 ret = 1;
1296 u32 func_ret;
1297
1298 migrate_disable();
1299 rcu_read_lock();
1300 array = rcu_dereference(array_rcu);
1301 item = &array->items[0];
1302 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1303 while ((prog = READ_ONCE(item->prog))) {
1304 run_ctx.prog_item = item;
1305 func_ret = run_prog(prog, ctx);
1306 ret &= (func_ret & 1);
1307 *(ret_flags) |= (func_ret >> 1);
1308 item++;
1309 }
1310 bpf_reset_run_ctx(old_run_ctx);
1311 rcu_read_unlock();
1312 migrate_enable();
1313 return ret;
1314}
1315
1316static __always_inline u32
1317BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
1318 const void *ctx, bpf_prog_run_fn run_prog)
1319{
1320 const struct bpf_prog_array_item *item;
1321 const struct bpf_prog *prog;
1322 const struct bpf_prog_array *array;
1323 struct bpf_run_ctx *old_run_ctx;
1324 struct bpf_cg_run_ctx run_ctx;
1325 u32 ret = 1;
1326
1327 migrate_disable();
1328 rcu_read_lock();
1329 array = rcu_dereference(array_rcu);
1330 item = &array->items[0];
1331 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1332 while ((prog = READ_ONCE(item->prog))) {
1333 run_ctx.prog_item = item;
1334 ret &= run_prog(prog, ctx);
1335 item++;
1336 }
1337 bpf_reset_run_ctx(old_run_ctx);
1338 rcu_read_unlock();
1339 migrate_enable();
1340 return ret;
1341}
1342
1343static __always_inline u32
1344BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
1345 const void *ctx, bpf_prog_run_fn run_prog)
1346{
1347 const struct bpf_prog_array_item *item;
1348 const struct bpf_prog *prog;
1349 const struct bpf_prog_array *array;
1350 struct bpf_run_ctx *old_run_ctx;
1351 struct bpf_trace_run_ctx run_ctx;
1352 u32 ret = 1;
1353
1354 migrate_disable();
1355 rcu_read_lock();
1356 array = rcu_dereference(array_rcu);
1357 if (unlikely(!array))
1358 goto out;
1359 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1360 item = &array->items[0];
1361 while ((prog = READ_ONCE(item->prog))) {
1362 run_ctx.bpf_cookie = item->bpf_cookie;
1363 ret &= run_prog(prog, ctx);
1364 item++;
1365 }
1366 bpf_reset_run_ctx(old_run_ctx);
1367out:
1368 rcu_read_unlock();
1369 migrate_enable();
1370 return ret;
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
1396 ({ \
1397 u32 _flags = 0; \
1398 bool _cn; \
1399 u32 _ret; \
1400 _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \
1401 _cn = _flags & BPF_RET_SET_CN; \
1402 if (_ret) \
1403 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
1404 else \
1405 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \
1406 _ret; \
1407 })
1408
1409#ifdef CONFIG_BPF_SYSCALL
1410DECLARE_PER_CPU(int, bpf_prog_active);
1411extern struct mutex bpf_stats_enabled_mutex;
1412
1413
1414
1415
1416
1417
1418
1419static inline void bpf_disable_instrumentation(void)
1420{
1421 migrate_disable();
1422 this_cpu_inc(bpf_prog_active);
1423}
1424
1425static inline void bpf_enable_instrumentation(void)
1426{
1427 this_cpu_dec(bpf_prog_active);
1428 migrate_enable();
1429}
1430
1431extern const struct file_operations bpf_map_fops;
1432extern const struct file_operations bpf_prog_fops;
1433extern const struct file_operations bpf_iter_fops;
1434
1435#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1436 extern const struct bpf_prog_ops _name ## _prog_ops; \
1437 extern const struct bpf_verifier_ops _name ## _verifier_ops;
1438#define BPF_MAP_TYPE(_id, _ops) \
1439 extern const struct bpf_map_ops _ops;
1440#define BPF_LINK_TYPE(_id, _name)
1441#include <linux/bpf_types.h>
1442#undef BPF_PROG_TYPE
1443#undef BPF_MAP_TYPE
1444#undef BPF_LINK_TYPE
1445
1446extern const struct bpf_prog_ops bpf_offload_prog_ops;
1447extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1448extern const struct bpf_verifier_ops xdp_analyzer_ops;
1449
1450struct bpf_prog *bpf_prog_get(u32 ufd);
1451struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1452 bool attach_drv);
1453void bpf_prog_add(struct bpf_prog *prog, int i);
1454void bpf_prog_sub(struct bpf_prog *prog, int i);
1455void bpf_prog_inc(struct bpf_prog *prog);
1456struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1457void bpf_prog_put(struct bpf_prog *prog);
1458
1459void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1460void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1461
1462struct bpf_map *bpf_map_get(u32 ufd);
1463struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1464struct bpf_map *__bpf_map_get(struct fd f);
1465void bpf_map_inc(struct bpf_map *map);
1466void bpf_map_inc_with_uref(struct bpf_map *map);
1467struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1468void bpf_map_put_with_uref(struct bpf_map *map);
1469void bpf_map_put(struct bpf_map *map);
1470void *bpf_map_area_alloc(u64 size, int numa_node);
1471void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1472void bpf_map_area_free(void *base);
1473bool bpf_map_write_active(const struct bpf_map *map);
1474void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1475int generic_map_lookup_batch(struct bpf_map *map,
1476 const union bpf_attr *attr,
1477 union bpf_attr __user *uattr);
1478int generic_map_update_batch(struct bpf_map *map,
1479 const union bpf_attr *attr,
1480 union bpf_attr __user *uattr);
1481int generic_map_delete_batch(struct bpf_map *map,
1482 const union bpf_attr *attr,
1483 union bpf_attr __user *uattr);
1484struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
1485struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
1486
1487#ifdef CONFIG_MEMCG_KMEM
1488void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1489 int node);
1490void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
1491void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
1492 size_t align, gfp_t flags);
1493#else
1494static inline void *
1495bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1496 int node)
1497{
1498 return kmalloc_node(size, flags, node);
1499}
1500
1501static inline void *
1502bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
1503{
1504 return kzalloc(size, flags);
1505}
1506
1507static inline void __percpu *
1508bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
1509 gfp_t flags)
1510{
1511 return __alloc_percpu_gfp(size, align, flags);
1512}
1513#endif
1514
1515extern int sysctl_unprivileged_bpf_disabled;
1516
1517static inline bool bpf_allow_ptr_leaks(void)
1518{
1519 return perfmon_capable();
1520}
1521
1522static inline bool bpf_allow_uninit_stack(void)
1523{
1524 return perfmon_capable();
1525}
1526
1527static inline bool bpf_allow_ptr_to_map_access(void)
1528{
1529 return perfmon_capable();
1530}
1531
1532static inline bool bpf_bypass_spec_v1(void)
1533{
1534 return perfmon_capable();
1535}
1536
1537static inline bool bpf_bypass_spec_v4(void)
1538{
1539 return perfmon_capable();
1540}
1541
1542int bpf_map_new_fd(struct bpf_map *map, int flags);
1543int bpf_prog_new_fd(struct bpf_prog *prog);
1544
1545void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1546 const struct bpf_link_ops *ops, struct bpf_prog *prog);
1547int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1548int bpf_link_settle(struct bpf_link_primer *primer);
1549void bpf_link_cleanup(struct bpf_link_primer *primer);
1550void bpf_link_inc(struct bpf_link *link);
1551void bpf_link_put(struct bpf_link *link);
1552int bpf_link_new_fd(struct bpf_link *link);
1553struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1554struct bpf_link *bpf_link_get_from_fd(u32 ufd);
1555
1556int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1557int bpf_obj_get_user(const char __user *pathname, int flags);
1558
1559#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1560#define DEFINE_BPF_ITER_FUNC(target, args...) \
1561 extern int bpf_iter_ ## target(args); \
1562 int __init bpf_iter_ ## target(args) { return 0; }
1563
1564struct bpf_iter_aux_info {
1565 struct bpf_map *map;
1566};
1567
1568typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
1569 union bpf_iter_link_info *linfo,
1570 struct bpf_iter_aux_info *aux);
1571typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
1572typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
1573 struct seq_file *seq);
1574typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
1575 struct bpf_link_info *info);
1576typedef const struct bpf_func_proto *
1577(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
1578 const struct bpf_prog *prog);
1579
1580enum bpf_iter_feature {
1581 BPF_ITER_RESCHED = BIT(0),
1582};
1583
1584#define BPF_ITER_CTX_ARG_MAX 2
1585struct bpf_iter_reg {
1586 const char *target;
1587 bpf_iter_attach_target_t attach_target;
1588 bpf_iter_detach_target_t detach_target;
1589 bpf_iter_show_fdinfo_t show_fdinfo;
1590 bpf_iter_fill_link_info_t fill_link_info;
1591 bpf_iter_get_func_proto_t get_func_proto;
1592 u32 ctx_arg_info_size;
1593 u32 feature;
1594 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1595 const struct bpf_iter_seq_info *seq_info;
1596};
1597
1598struct bpf_iter_meta {
1599 __bpf_md_ptr(struct seq_file *, seq);
1600 u64 session_id;
1601 u64 seq_num;
1602};
1603
1604struct bpf_iter__bpf_map_elem {
1605 __bpf_md_ptr(struct bpf_iter_meta *, meta);
1606 __bpf_md_ptr(struct bpf_map *, map);
1607 __bpf_md_ptr(void *, key);
1608 __bpf_md_ptr(void *, value);
1609};
1610
1611int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1612void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1613bool bpf_iter_prog_supported(struct bpf_prog *prog);
1614const struct bpf_func_proto *
1615bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
1616int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
1617int bpf_iter_new_fd(struct bpf_link *link);
1618bool bpf_link_is_iter(struct bpf_link *link);
1619struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1620int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1621void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1622 struct seq_file *seq);
1623int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
1624 struct bpf_link_info *info);
1625
1626int map_set_for_each_callback_args(struct bpf_verifier_env *env,
1627 struct bpf_func_state *caller,
1628 struct bpf_func_state *callee);
1629
1630int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1631int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1632int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1633 u64 flags);
1634int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1635 u64 flags);
1636
1637int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1638
1639int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1640 void *key, void *value, u64 map_flags);
1641int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1642int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1643 void *key, void *value, u64 map_flags);
1644int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1645
1646int bpf_get_file_flag(int flags);
1647int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
1648 size_t actual_size);
1649
1650
1651
1652
1653
1654
1655
1656static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1657{
1658 const long *lsrc = src;
1659 long *ldst = dst;
1660
1661 size /= sizeof(long);
1662 while (size--)
1663 *ldst++ = *lsrc++;
1664}
1665
1666
1667int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
1668
1669#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1670void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1671#endif
1672
1673struct btf *bpf_get_btf_vmlinux(void);
1674
1675
1676struct xdp_frame;
1677struct sk_buff;
1678struct bpf_dtab_netdev;
1679struct bpf_cpu_map_entry;
1680
1681void __dev_flush(void);
1682int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1683 struct net_device *dev_rx);
1684int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
1685 struct net_device *dev_rx);
1686int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
1687 struct bpf_map *map, bool exclude_ingress);
1688int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1689 struct bpf_prog *xdp_prog);
1690int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1691 struct bpf_prog *xdp_prog, struct bpf_map *map,
1692 bool exclude_ingress);
1693
1694void __cpu_map_flush(void);
1695int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
1696 struct net_device *dev_rx);
1697int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1698 struct sk_buff *skb);
1699
1700
1701static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1702{
1703 return (attr->map_flags & BPF_F_NUMA_NODE) ?
1704 attr->numa_node : NUMA_NO_NODE;
1705}
1706
1707struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1708int array_map_alloc_check(union bpf_attr *attr);
1709
1710int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1711 union bpf_attr __user *uattr);
1712int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1713 union bpf_attr __user *uattr);
1714int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1715 const union bpf_attr *kattr,
1716 union bpf_attr __user *uattr);
1717int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1718 const union bpf_attr *kattr,
1719 union bpf_attr __user *uattr);
1720int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
1721 const union bpf_attr *kattr,
1722 union bpf_attr __user *uattr);
1723int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1724 const union bpf_attr *kattr,
1725 union bpf_attr __user *uattr);
1726bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
1727bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1728 const struct bpf_prog *prog,
1729 struct bpf_insn_access_aux *info);
1730
1731static inline bool bpf_tracing_ctx_access(int off, int size,
1732 enum bpf_access_type type)
1733{
1734 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1735 return false;
1736 if (type != BPF_READ)
1737 return false;
1738 if (off % size != 0)
1739 return false;
1740 return true;
1741}
1742
1743static inline bool bpf_tracing_btf_ctx_access(int off, int size,
1744 enum bpf_access_type type,
1745 const struct bpf_prog *prog,
1746 struct bpf_insn_access_aux *info)
1747{
1748 if (!bpf_tracing_ctx_access(off, size, type))
1749 return false;
1750 return btf_ctx_access(off, size, type, prog, info);
1751}
1752
1753int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
1754 const struct btf_type *t, int off, int size,
1755 enum bpf_access_type atype,
1756 u32 *next_btf_id);
1757bool btf_struct_ids_match(struct bpf_verifier_log *log,
1758 const struct btf *btf, u32 id, int off,
1759 const struct btf *need_btf, u32 need_type_id);
1760
1761int btf_distill_func_proto(struct bpf_verifier_log *log,
1762 struct btf *btf,
1763 const struct btf_type *func_proto,
1764 const char *func_name,
1765 struct btf_func_model *m);
1766
1767struct bpf_reg_state;
1768int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
1769 struct bpf_reg_state *regs);
1770int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
1771 const struct btf *btf, u32 func_id,
1772 struct bpf_reg_state *regs);
1773int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1774 struct bpf_reg_state *reg);
1775int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
1776 struct btf *btf, const struct btf_type *t);
1777
1778struct bpf_prog *bpf_prog_by_id(u32 id);
1779struct bpf_link *bpf_link_by_id(u32 id);
1780
1781const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1782void bpf_task_storage_free(struct task_struct *task);
1783bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
1784const struct btf_func_model *
1785bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1786 const struct bpf_insn *insn);
1787struct bpf_core_ctx {
1788 struct bpf_verifier_log *log;
1789 const struct btf *btf;
1790};
1791
1792int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
1793 int relo_idx, void *insn);
1794
1795static inline bool unprivileged_ebpf_enabled(void)
1796{
1797 return !sysctl_unprivileged_bpf_disabled;
1798}
1799
1800#else
1801static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1802{
1803 return ERR_PTR(-EOPNOTSUPP);
1804}
1805
1806static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1807 enum bpf_prog_type type,
1808 bool attach_drv)
1809{
1810 return ERR_PTR(-EOPNOTSUPP);
1811}
1812
1813static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1814{
1815}
1816
1817static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1818{
1819}
1820
1821static inline void bpf_prog_put(struct bpf_prog *prog)
1822{
1823}
1824
1825static inline void bpf_prog_inc(struct bpf_prog *prog)
1826{
1827}
1828
1829static inline struct bpf_prog *__must_check
1830bpf_prog_inc_not_zero(struct bpf_prog *prog)
1831{
1832 return ERR_PTR(-EOPNOTSUPP);
1833}
1834
1835static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1836 const struct bpf_link_ops *ops,
1837 struct bpf_prog *prog)
1838{
1839}
1840
1841static inline int bpf_link_prime(struct bpf_link *link,
1842 struct bpf_link_primer *primer)
1843{
1844 return -EOPNOTSUPP;
1845}
1846
1847static inline int bpf_link_settle(struct bpf_link_primer *primer)
1848{
1849 return -EOPNOTSUPP;
1850}
1851
1852static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
1853{
1854}
1855
1856static inline void bpf_link_inc(struct bpf_link *link)
1857{
1858}
1859
1860static inline void bpf_link_put(struct bpf_link *link)
1861{
1862}
1863
1864static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1865{
1866 return -EOPNOTSUPP;
1867}
1868
1869static inline bool dev_map_can_have_prog(struct bpf_map *map)
1870{
1871 return false;
1872}
1873
1874static inline void __dev_flush(void)
1875{
1876}
1877
1878struct xdp_frame;
1879struct bpf_dtab_netdev;
1880struct bpf_cpu_map_entry;
1881
1882static inline
1883int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1884 struct net_device *dev_rx)
1885{
1886 return 0;
1887}
1888
1889static inline
1890int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
1891 struct net_device *dev_rx)
1892{
1893 return 0;
1894}
1895
1896static inline
1897int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
1898 struct bpf_map *map, bool exclude_ingress)
1899{
1900 return 0;
1901}
1902
1903struct sk_buff;
1904
1905static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1906 struct sk_buff *skb,
1907 struct bpf_prog *xdp_prog)
1908{
1909 return 0;
1910}
1911
1912static inline
1913int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1914 struct bpf_prog *xdp_prog, struct bpf_map *map,
1915 bool exclude_ingress)
1916{
1917 return 0;
1918}
1919
1920static inline void __cpu_map_flush(void)
1921{
1922}
1923
1924static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1925 struct xdp_frame *xdpf,
1926 struct net_device *dev_rx)
1927{
1928 return 0;
1929}
1930
1931static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1932 struct sk_buff *skb)
1933{
1934 return -EOPNOTSUPP;
1935}
1936
1937static inline bool cpu_map_prog_allowed(struct bpf_map *map)
1938{
1939 return false;
1940}
1941
1942static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1943 enum bpf_prog_type type)
1944{
1945 return ERR_PTR(-EOPNOTSUPP);
1946}
1947
1948static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1949 const union bpf_attr *kattr,
1950 union bpf_attr __user *uattr)
1951{
1952 return -ENOTSUPP;
1953}
1954
1955static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1956 const union bpf_attr *kattr,
1957 union bpf_attr __user *uattr)
1958{
1959 return -ENOTSUPP;
1960}
1961
1962static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1963 const union bpf_attr *kattr,
1964 union bpf_attr __user *uattr)
1965{
1966 return -ENOTSUPP;
1967}
1968
1969static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1970 const union bpf_attr *kattr,
1971 union bpf_attr __user *uattr)
1972{
1973 return -ENOTSUPP;
1974}
1975
1976static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1977 const union bpf_attr *kattr,
1978 union bpf_attr __user *uattr)
1979{
1980 return -ENOTSUPP;
1981}
1982
1983static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id,
1984 struct module *owner)
1985{
1986 return false;
1987}
1988
1989static inline void bpf_map_put(struct bpf_map *map)
1990{
1991}
1992
1993static inline struct bpf_prog *bpf_prog_by_id(u32 id)
1994{
1995 return ERR_PTR(-ENOTSUPP);
1996}
1997
1998static inline const struct bpf_func_proto *
1999bpf_base_func_proto(enum bpf_func_id func_id)
2000{
2001 return NULL;
2002}
2003
2004static inline void bpf_task_storage_free(struct task_struct *task)
2005{
2006}
2007
2008static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2009{
2010 return false;
2011}
2012
2013static inline const struct btf_func_model *
2014bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2015 const struct bpf_insn *insn)
2016{
2017 return NULL;
2018}
2019
2020static inline bool unprivileged_ebpf_enabled(void)
2021{
2022 return false;
2023}
2024
2025#endif
2026
2027void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2028 struct btf_mod_pair *used_btfs, u32 len);
2029
2030static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
2031 enum bpf_prog_type type)
2032{
2033 return bpf_prog_get_type_dev(ufd, type, false);
2034}
2035
2036void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2037 struct bpf_map **used_maps, u32 len);
2038
2039bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
2040
2041int bpf_prog_offload_compile(struct bpf_prog *prog);
2042void bpf_prog_offload_destroy(struct bpf_prog *prog);
2043int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
2044 struct bpf_prog *prog);
2045
2046int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
2047
2048int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
2049int bpf_map_offload_update_elem(struct bpf_map *map,
2050 void *key, void *value, u64 flags);
2051int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
2052int bpf_map_offload_get_next_key(struct bpf_map *map,
2053 void *key, void *next_key);
2054
2055bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
2056
2057struct bpf_offload_dev *
2058bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
2059void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
2060void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
2061int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
2062 struct net_device *netdev);
2063void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
2064 struct net_device *netdev);
2065bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
2066
2067#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2068int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
2069
2070static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
2071{
2072 return aux->offload_requested;
2073}
2074
2075static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2076{
2077 return unlikely(map->ops == &bpf_map_offload_ops);
2078}
2079
2080struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
2081void bpf_map_offload_map_free(struct bpf_map *map);
2082int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2083 const union bpf_attr *kattr,
2084 union bpf_attr __user *uattr);
2085
2086int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
2087int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
2088int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
2089void sock_map_unhash(struct sock *sk);
2090void sock_map_close(struct sock *sk, long timeout);
2091#else
2092static inline int bpf_prog_offload_init(struct bpf_prog *prog,
2093 union bpf_attr *attr)
2094{
2095 return -EOPNOTSUPP;
2096}
2097
2098static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
2099{
2100 return false;
2101}
2102
2103static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2104{
2105 return false;
2106}
2107
2108static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
2109{
2110 return ERR_PTR(-EOPNOTSUPP);
2111}
2112
2113static inline void bpf_map_offload_map_free(struct bpf_map *map)
2114{
2115}
2116
2117static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2118 const union bpf_attr *kattr,
2119 union bpf_attr __user *uattr)
2120{
2121 return -ENOTSUPP;
2122}
2123
2124#ifdef CONFIG_BPF_SYSCALL
2125static inline int sock_map_get_from_fd(const union bpf_attr *attr,
2126 struct bpf_prog *prog)
2127{
2128 return -EINVAL;
2129}
2130
2131static inline int sock_map_prog_detach(const union bpf_attr *attr,
2132 enum bpf_prog_type ptype)
2133{
2134 return -EOPNOTSUPP;
2135}
2136
2137static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
2138 u64 flags)
2139{
2140 return -EOPNOTSUPP;
2141}
2142#endif
2143#endif
2144
2145#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
2146void bpf_sk_reuseport_detach(struct sock *sk);
2147int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
2148 void *value);
2149int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2150 void *value, u64 map_flags);
2151#else
2152static inline void bpf_sk_reuseport_detach(struct sock *sk)
2153{
2154}
2155
2156#ifdef CONFIG_BPF_SYSCALL
2157static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
2158 void *key, void *value)
2159{
2160 return -EOPNOTSUPP;
2161}
2162
2163static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
2164 void *key, void *value,
2165 u64 map_flags)
2166{
2167 return -EOPNOTSUPP;
2168}
2169#endif
2170#endif
2171
2172
2173extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
2174extern const struct bpf_func_proto bpf_map_update_elem_proto;
2175extern const struct bpf_func_proto bpf_map_delete_elem_proto;
2176extern const struct bpf_func_proto bpf_map_push_elem_proto;
2177extern const struct bpf_func_proto bpf_map_pop_elem_proto;
2178extern const struct bpf_func_proto bpf_map_peek_elem_proto;
2179
2180extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
2181extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
2182extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
2183extern const struct bpf_func_proto bpf_tail_call_proto;
2184extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
2185extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
2186extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
2187extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
2188extern const struct bpf_func_proto bpf_get_current_comm_proto;
2189extern const struct bpf_func_proto bpf_get_stackid_proto;
2190extern const struct bpf_func_proto bpf_get_stack_proto;
2191extern const struct bpf_func_proto bpf_get_task_stack_proto;
2192extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
2193extern const struct bpf_func_proto bpf_get_stack_proto_pe;
2194extern const struct bpf_func_proto bpf_sock_map_update_proto;
2195extern const struct bpf_func_proto bpf_sock_hash_update_proto;
2196extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
2197extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
2198extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
2199extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
2200extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
2201extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
2202extern const struct bpf_func_proto bpf_spin_lock_proto;
2203extern const struct bpf_func_proto bpf_spin_unlock_proto;
2204extern const struct bpf_func_proto bpf_get_local_storage_proto;
2205extern const struct bpf_func_proto bpf_strtol_proto;
2206extern const struct bpf_func_proto bpf_strtoul_proto;
2207extern const struct bpf_func_proto bpf_tcp_sock_proto;
2208extern const struct bpf_func_proto bpf_jiffies64_proto;
2209extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
2210extern const struct bpf_func_proto bpf_event_output_data_proto;
2211extern const struct bpf_func_proto bpf_ringbuf_output_proto;
2212extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
2213extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
2214extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
2215extern const struct bpf_func_proto bpf_ringbuf_query_proto;
2216extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
2217extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
2218extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
2219extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
2220extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
2221extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
2222extern const struct bpf_func_proto bpf_copy_from_user_proto;
2223extern const struct bpf_func_proto bpf_snprintf_btf_proto;
2224extern const struct bpf_func_proto bpf_snprintf_proto;
2225extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
2226extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
2227extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
2228extern const struct bpf_func_proto bpf_sock_from_file_proto;
2229extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
2230extern const struct bpf_func_proto bpf_task_storage_get_proto;
2231extern const struct bpf_func_proto bpf_task_storage_delete_proto;
2232extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
2233extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
2234extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
2235extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
2236extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
2237extern const struct bpf_func_proto bpf_find_vma_proto;
2238extern const struct bpf_func_proto bpf_loop_proto;
2239extern const struct bpf_func_proto bpf_strncmp_proto;
2240
2241const struct bpf_func_proto *tracing_prog_func_proto(
2242 enum bpf_func_id func_id, const struct bpf_prog *prog);
2243
2244
2245void bpf_user_rnd_init_once(void);
2246u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2247u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2248
2249#if defined(CONFIG_NET)
2250bool bpf_sock_common_is_valid_access(int off, int size,
2251 enum bpf_access_type type,
2252 struct bpf_insn_access_aux *info);
2253bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2254 struct bpf_insn_access_aux *info);
2255u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2256 const struct bpf_insn *si,
2257 struct bpf_insn *insn_buf,
2258 struct bpf_prog *prog,
2259 u32 *target_size);
2260#else
2261static inline bool bpf_sock_common_is_valid_access(int off, int size,
2262 enum bpf_access_type type,
2263 struct bpf_insn_access_aux *info)
2264{
2265 return false;
2266}
2267static inline bool bpf_sock_is_valid_access(int off, int size,
2268 enum bpf_access_type type,
2269 struct bpf_insn_access_aux *info)
2270{
2271 return false;
2272}
2273static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2274 const struct bpf_insn *si,
2275 struct bpf_insn *insn_buf,
2276 struct bpf_prog *prog,
2277 u32 *target_size)
2278{
2279 return 0;
2280}
2281#endif
2282
2283#ifdef CONFIG_INET
2284struct sk_reuseport_kern {
2285 struct sk_buff *skb;
2286 struct sock *sk;
2287 struct sock *selected_sk;
2288 struct sock *migrating_sk;
2289 void *data_end;
2290 u32 hash;
2291 u32 reuseport_id;
2292 bool bind_inany;
2293};
2294bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2295 struct bpf_insn_access_aux *info);
2296
2297u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2298 const struct bpf_insn *si,
2299 struct bpf_insn *insn_buf,
2300 struct bpf_prog *prog,
2301 u32 *target_size);
2302
2303bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2304 struct bpf_insn_access_aux *info);
2305
2306u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2307 const struct bpf_insn *si,
2308 struct bpf_insn *insn_buf,
2309 struct bpf_prog *prog,
2310 u32 *target_size);
2311#else
2312static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
2313 enum bpf_access_type type,
2314 struct bpf_insn_access_aux *info)
2315{
2316 return false;
2317}
2318
2319static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2320 const struct bpf_insn *si,
2321 struct bpf_insn *insn_buf,
2322 struct bpf_prog *prog,
2323 u32 *target_size)
2324{
2325 return 0;
2326}
2327static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
2328 enum bpf_access_type type,
2329 struct bpf_insn_access_aux *info)
2330{
2331 return false;
2332}
2333
2334static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2335 const struct bpf_insn *si,
2336 struct bpf_insn *insn_buf,
2337 struct bpf_prog *prog,
2338 u32 *target_size)
2339{
2340 return 0;
2341}
2342#endif
2343
2344enum bpf_text_poke_type {
2345 BPF_MOD_CALL,
2346 BPF_MOD_JUMP,
2347};
2348
2349int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2350 void *addr1, void *addr2);
2351
2352struct btf_id_set;
2353bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
2354
2355#define MAX_BPRINTF_VARARGS 12
2356
2357int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
2358 u32 **bin_buf, u32 num_args);
2359void bpf_bprintf_cleanup(void);
2360
2361#endif
2362