linux/include/linux/bpf.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 */
   4#ifndef _LINUX_BPF_H
   5#define _LINUX_BPF_H 1
   6
   7#include <uapi/linux/bpf.h>
   8
   9#include <linux/workqueue.h>
  10#include <linux/file.h>
  11#include <linux/percpu.h>
  12#include <linux/err.h>
  13#include <linux/rbtree_latch.h>
  14#include <linux/numa.h>
  15#include <linux/mm_types.h>
  16#include <linux/wait.h>
  17#include <linux/u64_stats_sync.h>
  18#include <linux/refcount.h>
  19#include <linux/mutex.h>
  20#include <linux/module.h>
  21#include <linux/kallsyms.h>
  22#include <linux/capability.h>
  23
  24struct bpf_verifier_env;
  25struct bpf_verifier_log;
  26struct perf_event;
  27struct bpf_prog;
  28struct bpf_prog_aux;
  29struct bpf_map;
  30struct sock;
  31struct seq_file;
  32struct btf;
  33struct btf_type;
  34struct exception_table_entry;
  35struct seq_operations;
  36
  37extern struct idr btf_idr;
  38extern spinlock_t btf_idr_lock;
  39
  40/* map is generic key/value storage optionally accesible by eBPF programs */
  41struct bpf_map_ops {
  42        /* funcs callable from userspace (via syscall) */
  43        int (*map_alloc_check)(union bpf_attr *attr);
  44        struct bpf_map *(*map_alloc)(union bpf_attr *attr);
  45        void (*map_release)(struct bpf_map *map, struct file *map_file);
  46        void (*map_free)(struct bpf_map *map);
  47        int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
  48        void (*map_release_uref)(struct bpf_map *map);
  49        void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
  50        int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
  51                                union bpf_attr __user *uattr);
  52        int (*map_lookup_and_delete_batch)(struct bpf_map *map,
  53                                           const union bpf_attr *attr,
  54                                           union bpf_attr __user *uattr);
  55        int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
  56                                union bpf_attr __user *uattr);
  57        int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
  58                                union bpf_attr __user *uattr);
  59
  60        /* funcs callable from userspace and from eBPF programs */
  61        void *(*map_lookup_elem)(struct bpf_map *map, void *key);
  62        int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
  63        int (*map_delete_elem)(struct bpf_map *map, void *key);
  64        int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
  65        int (*map_pop_elem)(struct bpf_map *map, void *value);
  66        int (*map_peek_elem)(struct bpf_map *map, void *value);
  67
  68        /* funcs called by prog_array and perf_event_array map */
  69        void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
  70                                int fd);
  71        void (*map_fd_put_ptr)(void *ptr);
  72        u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
  73        u32 (*map_fd_sys_lookup_elem)(void *ptr);
  74        void (*map_seq_show_elem)(struct bpf_map *map, void *key,
  75                                  struct seq_file *m);
  76        int (*map_check_btf)(const struct bpf_map *map,
  77                             const struct btf *btf,
  78                             const struct btf_type *key_type,
  79                             const struct btf_type *value_type);
  80
  81        /* Prog poke tracking helpers. */
  82        int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
  83        void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
  84        void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
  85                             struct bpf_prog *new);
  86
  87        /* Direct value access helpers. */
  88        int (*map_direct_value_addr)(const struct bpf_map *map,
  89                                     u64 *imm, u32 off);
  90        int (*map_direct_value_meta)(const struct bpf_map *map,
  91                                     u64 imm, u32 *off);
  92        int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
  93        __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
  94                             struct poll_table_struct *pts);
  95};
  96
  97struct bpf_map_memory {
  98        u32 pages;
  99        struct user_struct *user;
 100};
 101
 102struct bpf_map {
 103        /* The first two cachelines with read-mostly members of which some
 104         * are also accessed in fast-path (e.g. ops, max_entries).
 105         */
 106        const struct bpf_map_ops *ops ____cacheline_aligned;
 107        struct bpf_map *inner_map_meta;
 108#ifdef CONFIG_SECURITY
 109        void *security;
 110#endif
 111        enum bpf_map_type map_type;
 112        u32 key_size;
 113        u32 value_size;
 114        u32 max_entries;
 115        u32 map_flags;
 116        int spin_lock_off; /* >=0 valid offset, <0 error */
 117        u32 id;
 118        int numa_node;
 119        u32 btf_key_type_id;
 120        u32 btf_value_type_id;
 121        struct btf *btf;
 122        struct bpf_map_memory memory;
 123        char name[BPF_OBJ_NAME_LEN];
 124        u32 btf_vmlinux_value_type_id;
 125        bool bypass_spec_v1;
 126        bool frozen; /* write-once; write-protected by freeze_mutex */
 127        /* 22 bytes hole */
 128
 129        /* The 3rd and 4th cacheline with misc members to avoid false sharing
 130         * particularly with refcounting.
 131         */
 132        atomic64_t refcnt ____cacheline_aligned;
 133        atomic64_t usercnt;
 134        struct work_struct work;
 135        struct mutex freeze_mutex;
 136        u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
 137};
 138
 139static inline bool map_value_has_spin_lock(const struct bpf_map *map)
 140{
 141        return map->spin_lock_off >= 0;
 142}
 143
 144static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
 145{
 146        if (likely(!map_value_has_spin_lock(map)))
 147                return;
 148        *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
 149                (struct bpf_spin_lock){};
 150}
 151
 152/* copy everything but bpf_spin_lock */
 153static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
 154{
 155        if (unlikely(map_value_has_spin_lock(map))) {
 156                u32 off = map->spin_lock_off;
 157
 158                memcpy(dst, src, off);
 159                memcpy(dst + off + sizeof(struct bpf_spin_lock),
 160                       src + off + sizeof(struct bpf_spin_lock),
 161                       map->value_size - off - sizeof(struct bpf_spin_lock));
 162        } else {
 163                memcpy(dst, src, map->value_size);
 164        }
 165}
 166void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
 167                           bool lock_src);
 168int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
 169
 170struct bpf_offload_dev;
 171struct bpf_offloaded_map;
 172
 173struct bpf_map_dev_ops {
 174        int (*map_get_next_key)(struct bpf_offloaded_map *map,
 175                                void *key, void *next_key);
 176        int (*map_lookup_elem)(struct bpf_offloaded_map *map,
 177                               void *key, void *value);
 178        int (*map_update_elem)(struct bpf_offloaded_map *map,
 179                               void *key, void *value, u64 flags);
 180        int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
 181};
 182
 183struct bpf_offloaded_map {
 184        struct bpf_map map;
 185        struct net_device *netdev;
 186        const struct bpf_map_dev_ops *dev_ops;
 187        void *dev_priv;
 188        struct list_head offloads;
 189};
 190
 191static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
 192{
 193        return container_of(map, struct bpf_offloaded_map, map);
 194}
 195
 196static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
 197{
 198        return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
 199}
 200
 201static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
 202{
 203        return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
 204                map->ops->map_seq_show_elem;
 205}
 206
 207int map_check_no_btf(const struct bpf_map *map,
 208                     const struct btf *btf,
 209                     const struct btf_type *key_type,
 210                     const struct btf_type *value_type);
 211
 212extern const struct bpf_map_ops bpf_map_offload_ops;
 213
 214/* function argument constraints */
 215enum bpf_arg_type {
 216        ARG_DONTCARE = 0,       /* unused argument in helper function */
 217
 218        /* the following constraints used to prototype
 219         * bpf_map_lookup/update/delete_elem() functions
 220         */
 221        ARG_CONST_MAP_PTR,      /* const argument used as pointer to bpf_map */
 222        ARG_PTR_TO_MAP_KEY,     /* pointer to stack used as map key */
 223        ARG_PTR_TO_MAP_VALUE,   /* pointer to stack used as map value */
 224        ARG_PTR_TO_UNINIT_MAP_VALUE,    /* pointer to valid memory used to store a map value */
 225        ARG_PTR_TO_MAP_VALUE_OR_NULL,   /* pointer to stack used as map value or NULL */
 226
 227        /* the following constraints used to prototype bpf_memcmp() and other
 228         * functions that access data on eBPF program stack
 229         */
 230        ARG_PTR_TO_MEM,         /* pointer to valid memory (stack, packet, map value) */
 231        ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
 232        ARG_PTR_TO_UNINIT_MEM,  /* pointer to memory does not need to be initialized,
 233                                 * helper function must fill all bytes or clear
 234                                 * them in error case.
 235                                 */
 236
 237        ARG_CONST_SIZE,         /* number of bytes accessed from memory */
 238        ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
 239
 240        ARG_PTR_TO_CTX,         /* pointer to context */
 241        ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */
 242        ARG_ANYTHING,           /* any (initialized) argument is ok */
 243        ARG_PTR_TO_SPIN_LOCK,   /* pointer to bpf_spin_lock */
 244        ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
 245        ARG_PTR_TO_INT,         /* pointer to int */
 246        ARG_PTR_TO_LONG,        /* pointer to long */
 247        ARG_PTR_TO_SOCKET,      /* pointer to bpf_sock (fullsock) */
 248        ARG_PTR_TO_BTF_ID,      /* pointer to in-kernel struct */
 249        ARG_PTR_TO_ALLOC_MEM,   /* pointer to dynamically allocated memory */
 250        ARG_PTR_TO_ALLOC_MEM_OR_NULL,   /* pointer to dynamically allocated memory or NULL */
 251        ARG_CONST_ALLOC_SIZE_OR_ZERO,   /* number of allocated bytes requested */
 252};
 253
 254/* type of values returned from helper functions */
 255enum bpf_return_type {
 256        RET_INTEGER,                    /* function returns integer */
 257        RET_VOID,                       /* function doesn't return anything */
 258        RET_PTR_TO_MAP_VALUE,           /* returns a pointer to map elem value */
 259        RET_PTR_TO_MAP_VALUE_OR_NULL,   /* returns a pointer to map elem value or NULL */
 260        RET_PTR_TO_SOCKET_OR_NULL,      /* returns a pointer to a socket or NULL */
 261        RET_PTR_TO_TCP_SOCK_OR_NULL,    /* returns a pointer to a tcp_sock or NULL */
 262        RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
 263        RET_PTR_TO_ALLOC_MEM_OR_NULL,   /* returns a pointer to dynamically allocated memory or NULL */
 264};
 265
 266/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
 267 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
 268 * instructions after verifying
 269 */
 270struct bpf_func_proto {
 271        u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 272        bool gpl_only;
 273        bool pkt_access;
 274        enum bpf_return_type ret_type;
 275        union {
 276                struct {
 277                        enum bpf_arg_type arg1_type;
 278                        enum bpf_arg_type arg2_type;
 279                        enum bpf_arg_type arg3_type;
 280                        enum bpf_arg_type arg4_type;
 281                        enum bpf_arg_type arg5_type;
 282                };
 283                enum bpf_arg_type arg_type[5];
 284        };
 285        int *btf_id; /* BTF ids of arguments */
 286};
 287
 288/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
 289 * the first argument to eBPF programs.
 290 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
 291 */
 292struct bpf_context;
 293
 294enum bpf_access_type {
 295        BPF_READ = 1,
 296        BPF_WRITE = 2
 297};
 298
 299/* types of values stored in eBPF registers */
 300/* Pointer types represent:
 301 * pointer
 302 * pointer + imm
 303 * pointer + (u16) var
 304 * pointer + (u16) var + imm
 305 * if (range > 0) then [ptr, ptr + range - off) is safe to access
 306 * if (id > 0) means that some 'var' was added
 307 * if (off > 0) means that 'imm' was added
 308 */
 309enum bpf_reg_type {
 310        NOT_INIT = 0,            /* nothing was written into register */
 311        SCALAR_VALUE,            /* reg doesn't contain a valid pointer */
 312        PTR_TO_CTX,              /* reg points to bpf_context */
 313        CONST_PTR_TO_MAP,        /* reg points to struct bpf_map */
 314        PTR_TO_MAP_VALUE,        /* reg points to map element value */
 315        PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
 316        PTR_TO_STACK,            /* reg == frame_pointer + offset */
 317        PTR_TO_PACKET_META,      /* skb->data - meta_len */
 318        PTR_TO_PACKET,           /* reg points to skb->data */
 319        PTR_TO_PACKET_END,       /* skb->data + headlen */
 320        PTR_TO_FLOW_KEYS,        /* reg points to bpf_flow_keys */
 321        PTR_TO_SOCKET,           /* reg points to struct bpf_sock */
 322        PTR_TO_SOCKET_OR_NULL,   /* reg points to struct bpf_sock or NULL */
 323        PTR_TO_SOCK_COMMON,      /* reg points to sock_common */
 324        PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
 325        PTR_TO_TCP_SOCK,         /* reg points to struct tcp_sock */
 326        PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
 327        PTR_TO_TP_BUFFER,        /* reg points to a writable raw tp's buffer */
 328        PTR_TO_XDP_SOCK,         /* reg points to struct xdp_sock */
 329        PTR_TO_BTF_ID,           /* reg points to kernel struct */
 330        PTR_TO_BTF_ID_OR_NULL,   /* reg points to kernel struct or NULL */
 331        PTR_TO_MEM,              /* reg points to valid memory region */
 332        PTR_TO_MEM_OR_NULL,      /* reg points to valid memory region or NULL */
 333};
 334
 335/* The information passed from prog-specific *_is_valid_access
 336 * back to the verifier.
 337 */
 338struct bpf_insn_access_aux {
 339        enum bpf_reg_type reg_type;
 340        union {
 341                int ctx_field_size;
 342                u32 btf_id;
 343        };
 344        struct bpf_verifier_log *log; /* for verbose logs */
 345};
 346
 347static inline void
 348bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
 349{
 350        aux->ctx_field_size = size;
 351}
 352
 353struct bpf_prog_ops {
 354        int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
 355                        union bpf_attr __user *uattr);
 356};
 357
 358struct bpf_verifier_ops {
 359        /* return eBPF function prototype for verification */
 360        const struct bpf_func_proto *
 361        (*get_func_proto)(enum bpf_func_id func_id,
 362                          const struct bpf_prog *prog);
 363
 364        /* return true if 'size' wide access at offset 'off' within bpf_context
 365         * with 'type' (read or write) is allowed
 366         */
 367        bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
 368                                const struct bpf_prog *prog,
 369                                struct bpf_insn_access_aux *info);
 370        int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
 371                            const struct bpf_prog *prog);
 372        int (*gen_ld_abs)(const struct bpf_insn *orig,
 373                          struct bpf_insn *insn_buf);
 374        u32 (*convert_ctx_access)(enum bpf_access_type type,
 375                                  const struct bpf_insn *src,
 376                                  struct bpf_insn *dst,
 377                                  struct bpf_prog *prog, u32 *target_size);
 378        int (*btf_struct_access)(struct bpf_verifier_log *log,
 379                                 const struct btf_type *t, int off, int size,
 380                                 enum bpf_access_type atype,
 381                                 u32 *next_btf_id);
 382};
 383
 384struct bpf_prog_offload_ops {
 385        /* verifier basic callbacks */
 386        int (*insn_hook)(struct bpf_verifier_env *env,
 387                         int insn_idx, int prev_insn_idx);
 388        int (*finalize)(struct bpf_verifier_env *env);
 389        /* verifier optimization callbacks (called after .finalize) */
 390        int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
 391                            struct bpf_insn *insn);
 392        int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
 393        /* program management callbacks */
 394        int (*prepare)(struct bpf_prog *prog);
 395        int (*translate)(struct bpf_prog *prog);
 396        void (*destroy)(struct bpf_prog *prog);
 397};
 398
 399struct bpf_prog_offload {
 400        struct bpf_prog         *prog;
 401        struct net_device       *netdev;
 402        struct bpf_offload_dev  *offdev;
 403        void                    *dev_priv;
 404        struct list_head        offloads;
 405        bool                    dev_state;
 406        bool                    opt_failed;
 407        void                    *jited_image;
 408        u32                     jited_len;
 409};
 410
 411enum bpf_cgroup_storage_type {
 412        BPF_CGROUP_STORAGE_SHARED,
 413        BPF_CGROUP_STORAGE_PERCPU,
 414        __BPF_CGROUP_STORAGE_MAX
 415};
 416
 417#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
 418
 419/* The longest tracepoint has 12 args.
 420 * See include/trace/bpf_probe.h
 421 */
 422#define MAX_BPF_FUNC_ARGS 12
 423
 424struct bpf_prog_stats {
 425        u64 cnt;
 426        u64 nsecs;
 427        struct u64_stats_sync syncp;
 428} __aligned(2 * sizeof(u64));
 429
 430struct btf_func_model {
 431        u8 ret_size;
 432        u8 nr_args;
 433        u8 arg_size[MAX_BPF_FUNC_ARGS];
 434};
 435
 436/* Restore arguments before returning from trampoline to let original function
 437 * continue executing. This flag is used for fentry progs when there are no
 438 * fexit progs.
 439 */
 440#define BPF_TRAMP_F_RESTORE_REGS        BIT(0)
 441/* Call original function after fentry progs, but before fexit progs.
 442 * Makes sense for fentry/fexit, normal calls and indirect calls.
 443 */
 444#define BPF_TRAMP_F_CALL_ORIG           BIT(1)
 445/* Skip current frame and return to parent.  Makes sense for fentry/fexit
 446 * programs only. Should not be used with normal calls and indirect calls.
 447 */
 448#define BPF_TRAMP_F_SKIP_FRAME          BIT(2)
 449
 450/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
 451 * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
 452 */
 453#define BPF_MAX_TRAMP_PROGS 40
 454
 455struct bpf_tramp_progs {
 456        struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
 457        int nr_progs;
 458};
 459
 460/* Different use cases for BPF trampoline:
 461 * 1. replace nop at the function entry (kprobe equivalent)
 462 *    flags = BPF_TRAMP_F_RESTORE_REGS
 463 *    fentry = a set of programs to run before returning from trampoline
 464 *
 465 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
 466 *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
 467 *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
 468 *    fentry = a set of program to run before calling original function
 469 *    fexit = a set of program to run after original function
 470 *
 471 * 3. replace direct call instruction anywhere in the function body
 472 *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
 473 *    With flags = 0
 474 *      fentry = a set of programs to run before returning from trampoline
 475 *    With flags = BPF_TRAMP_F_CALL_ORIG
 476 *      orig_call = original callback addr or direct function addr
 477 *      fentry = a set of program to run before calling original function
 478 *      fexit = a set of program to run after original function
 479 */
 480int arch_prepare_bpf_trampoline(void *image, void *image_end,
 481                                const struct btf_func_model *m, u32 flags,
 482                                struct bpf_tramp_progs *tprogs,
 483                                void *orig_call);
 484/* these two functions are called from generated trampoline */
 485u64 notrace __bpf_prog_enter(void);
 486void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
 487
 488struct bpf_ksym {
 489        unsigned long            start;
 490        unsigned long            end;
 491        char                     name[KSYM_NAME_LEN];
 492        struct list_head         lnode;
 493        struct latch_tree_node   tnode;
 494        bool                     prog;
 495};
 496
 497enum bpf_tramp_prog_type {
 498        BPF_TRAMP_FENTRY,
 499        BPF_TRAMP_FEXIT,
 500        BPF_TRAMP_MODIFY_RETURN,
 501        BPF_TRAMP_MAX,
 502        BPF_TRAMP_REPLACE, /* more than MAX */
 503};
 504
 505struct bpf_trampoline {
 506        /* hlist for trampoline_table */
 507        struct hlist_node hlist;
 508        /* serializes access to fields of this trampoline */
 509        struct mutex mutex;
 510        refcount_t refcnt;
 511        u64 key;
 512        struct {
 513                struct btf_func_model model;
 514                void *addr;
 515                bool ftrace_managed;
 516        } func;
 517        /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
 518         * program by replacing one of its functions. func.addr is the address
 519         * of the function it replaced.
 520         */
 521        struct bpf_prog *extension_prog;
 522        /* list of BPF programs using this trampoline */
 523        struct hlist_head progs_hlist[BPF_TRAMP_MAX];
 524        /* Number of attached programs. A counter per kind. */
 525        int progs_cnt[BPF_TRAMP_MAX];
 526        /* Executable image of trampoline */
 527        void *image;
 528        u64 selector;
 529        struct bpf_ksym ksym;
 530};
 531
 532#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
 533
 534struct bpf_dispatcher_prog {
 535        struct bpf_prog *prog;
 536        refcount_t users;
 537};
 538
 539struct bpf_dispatcher {
 540        /* dispatcher mutex */
 541        struct mutex mutex;
 542        void *func;
 543        struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
 544        int num_progs;
 545        void *image;
 546        u32 image_off;
 547        struct bpf_ksym ksym;
 548};
 549
 550static __always_inline unsigned int bpf_dispatcher_nop_func(
 551        const void *ctx,
 552        const struct bpf_insn *insnsi,
 553        unsigned int (*bpf_func)(const void *,
 554                                 const struct bpf_insn *))
 555{
 556        return bpf_func(ctx, insnsi);
 557}
 558#ifdef CONFIG_BPF_JIT
 559struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
 560int bpf_trampoline_link_prog(struct bpf_prog *prog);
 561int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
 562void bpf_trampoline_put(struct bpf_trampoline *tr);
 563#define BPF_DISPATCHER_INIT(_name) {                            \
 564        .mutex = __MUTEX_INITIALIZER(_name.mutex),              \
 565        .func = &_name##_func,                                  \
 566        .progs = {},                                            \
 567        .num_progs = 0,                                         \
 568        .image = NULL,                                          \
 569        .image_off = 0,                                         \
 570        .ksym = {                                               \
 571                .name  = #_name,                                \
 572                .lnode = LIST_HEAD_INIT(_name.ksym.lnode),      \
 573        },                                                      \
 574}
 575
 576#define DEFINE_BPF_DISPATCHER(name)                                     \
 577        noinline unsigned int bpf_dispatcher_##name##_func(             \
 578                const void *ctx,                                        \
 579                const struct bpf_insn *insnsi,                          \
 580                unsigned int (*bpf_func)(const void *,                  \
 581                                         const struct bpf_insn *))      \
 582        {                                                               \
 583                return bpf_func(ctx, insnsi);                           \
 584        }                                                               \
 585        EXPORT_SYMBOL(bpf_dispatcher_##name##_func);                    \
 586        struct bpf_dispatcher bpf_dispatcher_##name =                   \
 587                BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
 588#define DECLARE_BPF_DISPATCHER(name)                                    \
 589        unsigned int bpf_dispatcher_##name##_func(                      \
 590                const void *ctx,                                        \
 591                const struct bpf_insn *insnsi,                          \
 592                unsigned int (*bpf_func)(const void *,                  \
 593                                         const struct bpf_insn *));     \
 594        extern struct bpf_dispatcher bpf_dispatcher_##name;
 595#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
 596#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
 597void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
 598                                struct bpf_prog *to);
 599/* Called only from JIT-enabled code, so there's no need for stubs. */
 600void *bpf_jit_alloc_exec_page(void);
 601void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
 602void bpf_image_ksym_del(struct bpf_ksym *ksym);
 603void bpf_ksym_add(struct bpf_ksym *ksym);
 604void bpf_ksym_del(struct bpf_ksym *ksym);
 605#else
 606static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
 607{
 608        return NULL;
 609}
 610static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
 611{
 612        return -ENOTSUPP;
 613}
 614static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
 615{
 616        return -ENOTSUPP;
 617}
 618static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
 619#define DEFINE_BPF_DISPATCHER(name)
 620#define DECLARE_BPF_DISPATCHER(name)
 621#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
 622#define BPF_DISPATCHER_PTR(name) NULL
 623static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
 624                                              struct bpf_prog *from,
 625                                              struct bpf_prog *to) {}
 626static inline bool is_bpf_image_address(unsigned long address)
 627{
 628        return false;
 629}
 630#endif
 631
 632struct bpf_func_info_aux {
 633        u16 linkage;
 634        bool unreliable;
 635};
 636
 637enum bpf_jit_poke_reason {
 638        BPF_POKE_REASON_TAIL_CALL,
 639};
 640
 641/* Descriptor of pokes pointing /into/ the JITed image. */
 642struct bpf_jit_poke_descriptor {
 643        void *ip;
 644        union {
 645                struct {
 646                        struct bpf_map *map;
 647                        u32 key;
 648                } tail_call;
 649        };
 650        bool ip_stable;
 651        u8 adj_off;
 652        u16 reason;
 653};
 654
 655/* reg_type info for ctx arguments */
 656struct bpf_ctx_arg_aux {
 657        u32 offset;
 658        enum bpf_reg_type reg_type;
 659};
 660
 661struct bpf_prog_aux {
 662        atomic64_t refcnt;
 663        u32 used_map_cnt;
 664        u32 max_ctx_offset;
 665        u32 max_pkt_offset;
 666        u32 max_tp_access;
 667        u32 stack_depth;
 668        u32 id;
 669        u32 func_cnt; /* used by non-func prog as the number of func progs */
 670        u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
 671        u32 attach_btf_id; /* in-kernel BTF type id to attach to */
 672        u32 ctx_arg_info_size;
 673        const struct bpf_ctx_arg_aux *ctx_arg_info;
 674        struct bpf_prog *linked_prog;
 675        bool verifier_zext; /* Zero extensions has been inserted by verifier. */
 676        bool offload_requested;
 677        bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
 678        bool func_proto_unreliable;
 679        enum bpf_tramp_prog_type trampoline_prog_type;
 680        struct bpf_trampoline *trampoline;
 681        struct hlist_node tramp_hlist;
 682        /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
 683        const struct btf_type *attach_func_proto;
 684        /* function name for valid attach_btf_id */
 685        const char *attach_func_name;
 686        struct bpf_prog **func;
 687        void *jit_data; /* JIT specific data. arch dependent */
 688        struct bpf_jit_poke_descriptor *poke_tab;
 689        u32 size_poke_tab;
 690        struct bpf_ksym ksym;
 691        const struct bpf_prog_ops *ops;
 692        struct bpf_map **used_maps;
 693        struct bpf_prog *prog;
 694        struct user_struct *user;
 695        u64 load_time; /* ns since boottime */
 696        struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
 697        char name[BPF_OBJ_NAME_LEN];
 698#ifdef CONFIG_SECURITY
 699        void *security;
 700#endif
 701        struct bpf_prog_offload *offload;
 702        struct btf *btf;
 703        struct bpf_func_info *func_info;
 704        struct bpf_func_info_aux *func_info_aux;
 705        /* bpf_line_info loaded from userspace.  linfo->insn_off
 706         * has the xlated insn offset.
 707         * Both the main and sub prog share the same linfo.
 708         * The subprog can access its first linfo by
 709         * using the linfo_idx.
 710         */
 711        struct bpf_line_info *linfo;
 712        /* jited_linfo is the jited addr of the linfo.  It has a
 713         * one to one mapping to linfo:
 714         * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
 715         * Both the main and sub prog share the same jited_linfo.
 716         * The subprog can access its first jited_linfo by
 717         * using the linfo_idx.
 718         */
 719        void **jited_linfo;
 720        u32 func_info_cnt;
 721        u32 nr_linfo;
 722        /* subprog can use linfo_idx to access its first linfo and
 723         * jited_linfo.
 724         * main prog always has linfo_idx == 0
 725         */
 726        u32 linfo_idx;
 727        u32 num_exentries;
 728        struct exception_table_entry *extable;
 729        struct bpf_prog_stats __percpu *stats;
 730        union {
 731                struct work_struct work;
 732                struct rcu_head rcu;
 733        };
 734};
 735
 736struct bpf_array_aux {
 737        /* 'Ownership' of prog array is claimed by the first program that
 738         * is going to use this map or by the first program which FD is
 739         * stored in the map to make sure that all callers and callees have
 740         * the same prog type and JITed flag.
 741         */
 742        enum bpf_prog_type type;
 743        bool jited;
 744        /* Programs with direct jumps into programs part of this array. */
 745        struct list_head poke_progs;
 746        struct bpf_map *map;
 747        struct mutex poke_mutex;
 748        struct work_struct work;
 749};
 750
 751struct bpf_struct_ops_value;
 752struct btf_type;
 753struct btf_member;
 754
 755#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
 756struct bpf_struct_ops {
 757        const struct bpf_verifier_ops *verifier_ops;
 758        int (*init)(struct btf *btf);
 759        int (*check_member)(const struct btf_type *t,
 760                            const struct btf_member *member);
 761        int (*init_member)(const struct btf_type *t,
 762                           const struct btf_member *member,
 763                           void *kdata, const void *udata);
 764        int (*reg)(void *kdata);
 765        void (*unreg)(void *kdata);
 766        const struct btf_type *type;
 767        const struct btf_type *value_type;
 768        const char *name;
 769        struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
 770        u32 type_id;
 771        u32 value_id;
 772};
 773
 774#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
 775#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
 776const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
 777void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
 778bool bpf_struct_ops_get(const void *kdata);
 779void bpf_struct_ops_put(const void *kdata);
 780int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
 781                                       void *value);
 782static inline bool bpf_try_module_get(const void *data, struct module *owner)
 783{
 784        if (owner == BPF_MODULE_OWNER)
 785                return bpf_struct_ops_get(data);
 786        else
 787                return try_module_get(owner);
 788}
 789static inline void bpf_module_put(const void *data, struct module *owner)
 790{
 791        if (owner == BPF_MODULE_OWNER)
 792                bpf_struct_ops_put(data);
 793        else
 794                module_put(owner);
 795}
 796#else
 797static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
 798{
 799        return NULL;
 800}
 801static inline void bpf_struct_ops_init(struct btf *btf,
 802                                       struct bpf_verifier_log *log)
 803{
 804}
 805static inline bool bpf_try_module_get(const void *data, struct module *owner)
 806{
 807        return try_module_get(owner);
 808}
 809static inline void bpf_module_put(const void *data, struct module *owner)
 810{
 811        module_put(owner);
 812}
 813static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
 814                                                     void *key,
 815                                                     void *value)
 816{
 817        return -EINVAL;
 818}
 819#endif
 820
 821struct bpf_array {
 822        struct bpf_map map;
 823        u32 elem_size;
 824        u32 index_mask;
 825        struct bpf_array_aux *aux;
 826        union {
 827                char value[0] __aligned(8);
 828                void *ptrs[0] __aligned(8);
 829                void __percpu *pptrs[0] __aligned(8);
 830        };
 831};
 832
 833#define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
 834#define MAX_TAIL_CALL_CNT 32
 835
 836#define BPF_F_ACCESS_MASK       (BPF_F_RDONLY |         \
 837                                 BPF_F_RDONLY_PROG |    \
 838                                 BPF_F_WRONLY |         \
 839                                 BPF_F_WRONLY_PROG)
 840
 841#define BPF_MAP_CAN_READ        BIT(0)
 842#define BPF_MAP_CAN_WRITE       BIT(1)
 843
 844static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
 845{
 846        u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
 847
 848        /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
 849         * not possible.
 850         */
 851        if (access_flags & BPF_F_RDONLY_PROG)
 852                return BPF_MAP_CAN_READ;
 853        else if (access_flags & BPF_F_WRONLY_PROG)
 854                return BPF_MAP_CAN_WRITE;
 855        else
 856                return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
 857}
 858
 859static inline bool bpf_map_flags_access_ok(u32 access_flags)
 860{
 861        return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
 862               (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
 863}
 864
 865struct bpf_event_entry {
 866        struct perf_event *event;
 867        struct file *perf_file;
 868        struct file *map_file;
 869        struct rcu_head rcu;
 870};
 871
 872bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
 873int bpf_prog_calc_tag(struct bpf_prog *fp);
 874const char *kernel_type_name(u32 btf_type_id);
 875
 876const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
 877
 878typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
 879                                        unsigned long off, unsigned long len);
 880typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
 881                                        const struct bpf_insn *src,
 882                                        struct bpf_insn *dst,
 883                                        struct bpf_prog *prog,
 884                                        u32 *target_size);
 885
 886u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 887                     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
 888
 889/* an array of programs to be executed under rcu_lock.
 890 *
 891 * Typical usage:
 892 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
 893 *
 894 * the structure returned by bpf_prog_array_alloc() should be populated
 895 * with program pointers and the last pointer must be NULL.
 896 * The user has to keep refcnt on the program and make sure the program
 897 * is removed from the array before bpf_prog_put().
 898 * The 'struct bpf_prog_array *' should only be replaced with xchg()
 899 * since other cpus are walking the array of pointers in parallel.
 900 */
 901struct bpf_prog_array_item {
 902        struct bpf_prog *prog;
 903        struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
 904};
 905
 906struct bpf_prog_array {
 907        struct rcu_head rcu;
 908        struct bpf_prog_array_item items[];
 909};
 910
 911struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
 912void bpf_prog_array_free(struct bpf_prog_array *progs);
 913int bpf_prog_array_length(struct bpf_prog_array *progs);
 914bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
 915int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
 916                                __u32 __user *prog_ids, u32 cnt);
 917
 918void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
 919                                struct bpf_prog *old_prog);
 920int bpf_prog_array_copy_info(struct bpf_prog_array *array,
 921                             u32 *prog_ids, u32 request_cnt,
 922                             u32 *prog_cnt);
 923int bpf_prog_array_copy(struct bpf_prog_array *old_array,
 924                        struct bpf_prog *exclude_prog,
 925                        struct bpf_prog *include_prog,
 926                        struct bpf_prog_array **new_array);
 927
 928#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null)  \
 929        ({                                              \
 930                struct bpf_prog_array_item *_item;      \
 931                struct bpf_prog *_prog;                 \
 932                struct bpf_prog_array *_array;          \
 933                u32 _ret = 1;                           \
 934                migrate_disable();                      \
 935                rcu_read_lock();                        \
 936                _array = rcu_dereference(array);        \
 937                if (unlikely(check_non_null && !_array))\
 938                        goto _out;                      \
 939                _item = &_array->items[0];              \
 940                while ((_prog = READ_ONCE(_item->prog))) {              \
 941                        bpf_cgroup_storage_set(_item->cgroup_storage);  \
 942                        _ret &= func(_prog, ctx);       \
 943                        _item++;                        \
 944                }                                       \
 945_out:                                                   \
 946                rcu_read_unlock();                      \
 947                migrate_enable();                       \
 948                _ret;                                   \
 949         })
 950
 951/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
 952 * so BPF programs can request cwr for TCP packets.
 953 *
 954 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
 955 * packet. This macro changes the behavior so the low order bit
 956 * indicates whether the packet should be dropped (0) or not (1)
 957 * and the next bit is a congestion notification bit. This could be
 958 * used by TCP to call tcp_enter_cwr()
 959 *
 960 * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
 961 *   0: drop packet
 962 *   1: keep packet
 963 *   2: drop packet and cn
 964 *   3: keep packet and cn
 965 *
 966 * This macro then converts it to one of the NET_XMIT or an error
 967 * code that is then interpreted as drop packet (and no cn):
 968 *   0: NET_XMIT_SUCCESS  skb should be transmitted
 969 *   1: NET_XMIT_DROP     skb should be dropped and cn
 970 *   2: NET_XMIT_CN       skb should be transmitted and cn
 971 *   3: -EPERM            skb should be dropped
 972 */
 973#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func)         \
 974        ({                                              \
 975                struct bpf_prog_array_item *_item;      \
 976                struct bpf_prog *_prog;                 \
 977                struct bpf_prog_array *_array;          \
 978                u32 ret;                                \
 979                u32 _ret = 1;                           \
 980                u32 _cn = 0;                            \
 981                migrate_disable();                      \
 982                rcu_read_lock();                        \
 983                _array = rcu_dereference(array);        \
 984                _item = &_array->items[0];              \
 985                while ((_prog = READ_ONCE(_item->prog))) {              \
 986                        bpf_cgroup_storage_set(_item->cgroup_storage);  \
 987                        ret = func(_prog, ctx);         \
 988                        _ret &= (ret & 1);              \
 989                        _cn |= (ret & 2);               \
 990                        _item++;                        \
 991                }                                       \
 992                rcu_read_unlock();                      \
 993                migrate_enable();                       \
 994                if (_ret)                               \
 995                        _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);  \
 996                else                                    \
 997                        _ret = (_cn ? NET_XMIT_DROP : -EPERM);          \
 998                _ret;                                   \
 999        })
1000
1001#define BPF_PROG_RUN_ARRAY(array, ctx, func)            \
1002        __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
1003
1004#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)      \
1005        __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
1006
1007#ifdef CONFIG_BPF_SYSCALL
1008DECLARE_PER_CPU(int, bpf_prog_active);
1009extern struct mutex bpf_stats_enabled_mutex;
1010
1011/*
1012 * Block execution of BPF programs attached to instrumentation (perf,
1013 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1014 * these events can happen inside a region which holds a map bucket lock
1015 * and can deadlock on it.
1016 *
1017 * Use the preemption safe inc/dec variants on RT because migrate disable
1018 * is preemptible on RT and preemption in the middle of the RMW operation
1019 * might lead to inconsistent state. Use the raw variants for non RT
1020 * kernels as migrate_disable() maps to preempt_disable() so the slightly
1021 * more expensive save operation can be avoided.
1022 */
1023static inline void bpf_disable_instrumentation(void)
1024{
1025        migrate_disable();
1026        if (IS_ENABLED(CONFIG_PREEMPT_RT))
1027                this_cpu_inc(bpf_prog_active);
1028        else
1029                __this_cpu_inc(bpf_prog_active);
1030}
1031
1032static inline void bpf_enable_instrumentation(void)
1033{
1034        if (IS_ENABLED(CONFIG_PREEMPT_RT))
1035                this_cpu_dec(bpf_prog_active);
1036        else
1037                __this_cpu_dec(bpf_prog_active);
1038        migrate_enable();
1039}
1040
1041extern const struct file_operations bpf_map_fops;
1042extern const struct file_operations bpf_prog_fops;
1043extern const struct file_operations bpf_iter_fops;
1044
1045#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1046        extern const struct bpf_prog_ops _name ## _prog_ops; \
1047        extern const struct bpf_verifier_ops _name ## _verifier_ops;
1048#define BPF_MAP_TYPE(_id, _ops) \
1049        extern const struct bpf_map_ops _ops;
1050#define BPF_LINK_TYPE(_id, _name)
1051#include <linux/bpf_types.h>
1052#undef BPF_PROG_TYPE
1053#undef BPF_MAP_TYPE
1054#undef BPF_LINK_TYPE
1055
1056extern const struct bpf_prog_ops bpf_offload_prog_ops;
1057extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1058extern const struct bpf_verifier_ops xdp_analyzer_ops;
1059
1060struct bpf_prog *bpf_prog_get(u32 ufd);
1061struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1062                                       bool attach_drv);
1063void bpf_prog_add(struct bpf_prog *prog, int i);
1064void bpf_prog_sub(struct bpf_prog *prog, int i);
1065void bpf_prog_inc(struct bpf_prog *prog);
1066struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1067void bpf_prog_put(struct bpf_prog *prog);
1068int __bpf_prog_charge(struct user_struct *user, u32 pages);
1069void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
1070void __bpf_free_used_maps(struct bpf_prog_aux *aux,
1071                          struct bpf_map **used_maps, u32 len);
1072
1073void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1074void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1075
1076struct bpf_map *bpf_map_get(u32 ufd);
1077struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1078struct bpf_map *__bpf_map_get(struct fd f);
1079void bpf_map_inc(struct bpf_map *map);
1080void bpf_map_inc_with_uref(struct bpf_map *map);
1081struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1082void bpf_map_put_with_uref(struct bpf_map *map);
1083void bpf_map_put(struct bpf_map *map);
1084int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
1085void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
1086int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
1087void bpf_map_charge_finish(struct bpf_map_memory *mem);
1088void bpf_map_charge_move(struct bpf_map_memory *dst,
1089                         struct bpf_map_memory *src);
1090void *bpf_map_area_alloc(u64 size, int numa_node);
1091void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1092void bpf_map_area_free(void *base);
1093void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1094int  generic_map_lookup_batch(struct bpf_map *map,
1095                              const union bpf_attr *attr,
1096                              union bpf_attr __user *uattr);
1097int  generic_map_update_batch(struct bpf_map *map,
1098                              const union bpf_attr *attr,
1099                              union bpf_attr __user *uattr);
1100int  generic_map_delete_batch(struct bpf_map *map,
1101                              const union bpf_attr *attr,
1102                              union bpf_attr __user *uattr);
1103struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
1104
1105extern int sysctl_unprivileged_bpf_disabled;
1106
1107static inline bool bpf_allow_ptr_leaks(void)
1108{
1109        return perfmon_capable();
1110}
1111
1112static inline bool bpf_bypass_spec_v1(void)
1113{
1114        return perfmon_capable();
1115}
1116
1117static inline bool bpf_bypass_spec_v4(void)
1118{
1119        return perfmon_capable();
1120}
1121
1122int bpf_map_new_fd(struct bpf_map *map, int flags);
1123int bpf_prog_new_fd(struct bpf_prog *prog);
1124
1125struct bpf_link {
1126        atomic64_t refcnt;
1127        u32 id;
1128        enum bpf_link_type type;
1129        const struct bpf_link_ops *ops;
1130        struct bpf_prog *prog;
1131        struct work_struct work;
1132};
1133
1134struct bpf_link_primer {
1135        struct bpf_link *link;
1136        struct file *file;
1137        int fd;
1138        u32 id;
1139};
1140
1141struct bpf_link_ops {
1142        void (*release)(struct bpf_link *link);
1143        void (*dealloc)(struct bpf_link *link);
1144        int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1145                           struct bpf_prog *old_prog);
1146        void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1147        int (*fill_link_info)(const struct bpf_link *link,
1148                              struct bpf_link_info *info);
1149};
1150
1151void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1152                   const struct bpf_link_ops *ops, struct bpf_prog *prog);
1153int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1154int bpf_link_settle(struct bpf_link_primer *primer);
1155void bpf_link_cleanup(struct bpf_link_primer *primer);
1156void bpf_link_inc(struct bpf_link *link);
1157void bpf_link_put(struct bpf_link *link);
1158int bpf_link_new_fd(struct bpf_link *link);
1159struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1160struct bpf_link *bpf_link_get_from_fd(u32 ufd);
1161
1162int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1163int bpf_obj_get_user(const char __user *pathname, int flags);
1164
1165#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1166#define DEFINE_BPF_ITER_FUNC(target, args...)                   \
1167        extern int bpf_iter_ ## target(args);                   \
1168        int __init bpf_iter_ ## target(args) { return 0; }
1169
1170typedef int (*bpf_iter_init_seq_priv_t)(void *private_data);
1171typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
1172
1173#define BPF_ITER_CTX_ARG_MAX 2
1174struct bpf_iter_reg {
1175        const char *target;
1176        const struct seq_operations *seq_ops;
1177        bpf_iter_init_seq_priv_t init_seq_private;
1178        bpf_iter_fini_seq_priv_t fini_seq_private;
1179        u32 seq_priv_size;
1180        u32 ctx_arg_info_size;
1181        struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1182};
1183
1184struct bpf_iter_meta {
1185        __bpf_md_ptr(struct seq_file *, seq);
1186        u64 session_id;
1187        u64 seq_num;
1188};
1189
1190int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1191void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1192bool bpf_iter_prog_supported(struct bpf_prog *prog);
1193int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
1194int bpf_iter_new_fd(struct bpf_link *link);
1195bool bpf_link_is_iter(struct bpf_link *link);
1196struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1197int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1198
1199int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1200int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1201int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1202                           u64 flags);
1203int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1204                            u64 flags);
1205
1206int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1207
1208int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1209                                 void *key, void *value, u64 map_flags);
1210int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1211int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1212                                void *key, void *value, u64 map_flags);
1213int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1214
1215int bpf_get_file_flag(int flags);
1216int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
1217                             size_t actual_size);
1218
1219/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1220 * forced to use 'long' read/writes to try to atomically copy long counters.
1221 * Best-effort only.  No barriers here, since it _will_ race with concurrent
1222 * updates from BPF programs. Called from bpf syscall and mostly used with
1223 * size 8 or 16 bytes, so ask compiler to inline it.
1224 */
1225static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1226{
1227        const long *lsrc = src;
1228        long *ldst = dst;
1229
1230        size /= sizeof(long);
1231        while (size--)
1232                *ldst++ = *lsrc++;
1233}
1234
1235/* verify correctness of eBPF program */
1236int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
1237              union bpf_attr __user *uattr);
1238void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1239
1240/* Map specifics */
1241struct xdp_buff;
1242struct sk_buff;
1243
1244struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
1245struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
1246void __dev_flush(void);
1247int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1248                    struct net_device *dev_rx);
1249int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1250                    struct net_device *dev_rx);
1251int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1252                             struct bpf_prog *xdp_prog);
1253bool dev_map_can_have_prog(struct bpf_map *map);
1254
1255struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
1256void __cpu_map_flush(void);
1257int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
1258                    struct net_device *dev_rx);
1259
1260/* Return map's numa specified by userspace */
1261static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1262{
1263        return (attr->map_flags & BPF_F_NUMA_NODE) ?
1264                attr->numa_node : NUMA_NO_NODE;
1265}
1266
1267struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1268int array_map_alloc_check(union bpf_attr *attr);
1269
1270int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1271                          union bpf_attr __user *uattr);
1272int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1273                          union bpf_attr __user *uattr);
1274int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1275                              const union bpf_attr *kattr,
1276                              union bpf_attr __user *uattr);
1277int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1278                                     const union bpf_attr *kattr,
1279                                     union bpf_attr __user *uattr);
1280bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1281                    const struct bpf_prog *prog,
1282                    struct bpf_insn_access_aux *info);
1283int btf_struct_access(struct bpf_verifier_log *log,
1284                      const struct btf_type *t, int off, int size,
1285                      enum bpf_access_type atype,
1286                      u32 *next_btf_id);
1287int btf_resolve_helper_id(struct bpf_verifier_log *log,
1288                          const struct bpf_func_proto *fn, int);
1289
1290int btf_distill_func_proto(struct bpf_verifier_log *log,
1291                           struct btf *btf,
1292                           const struct btf_type *func_proto,
1293                           const char *func_name,
1294                           struct btf_func_model *m);
1295
1296struct bpf_reg_state;
1297int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
1298                             struct bpf_reg_state *regs);
1299int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1300                          struct bpf_reg_state *reg);
1301int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
1302                         struct btf *btf, const struct btf_type *t);
1303
1304struct bpf_prog *bpf_prog_by_id(u32 id);
1305
1306const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1307#else /* !CONFIG_BPF_SYSCALL */
1308static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1309{
1310        return ERR_PTR(-EOPNOTSUPP);
1311}
1312
1313static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1314                                                     enum bpf_prog_type type,
1315                                                     bool attach_drv)
1316{
1317        return ERR_PTR(-EOPNOTSUPP);
1318}
1319
1320static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1321{
1322}
1323
1324static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1325{
1326}
1327
1328static inline void bpf_prog_put(struct bpf_prog *prog)
1329{
1330}
1331
1332static inline void bpf_prog_inc(struct bpf_prog *prog)
1333{
1334}
1335
1336static inline struct bpf_prog *__must_check
1337bpf_prog_inc_not_zero(struct bpf_prog *prog)
1338{
1339        return ERR_PTR(-EOPNOTSUPP);
1340}
1341
1342static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
1343{
1344        return 0;
1345}
1346
1347static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1348{
1349}
1350
1351static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1352{
1353        return -EOPNOTSUPP;
1354}
1355
1356static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
1357                                                       u32 key)
1358{
1359        return NULL;
1360}
1361
1362static inline struct net_device  *__dev_map_hash_lookup_elem(struct bpf_map *map,
1363                                                             u32 key)
1364{
1365        return NULL;
1366}
1367static inline bool dev_map_can_have_prog(struct bpf_map *map)
1368{
1369        return false;
1370}
1371
1372static inline void __dev_flush(void)
1373{
1374}
1375
1376struct xdp_buff;
1377struct bpf_dtab_netdev;
1378
1379static inline
1380int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1381                    struct net_device *dev_rx)
1382{
1383        return 0;
1384}
1385
1386static inline
1387int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1388                    struct net_device *dev_rx)
1389{
1390        return 0;
1391}
1392
1393struct sk_buff;
1394
1395static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1396                                           struct sk_buff *skb,
1397                                           struct bpf_prog *xdp_prog)
1398{
1399        return 0;
1400}
1401
1402static inline
1403struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
1404{
1405        return NULL;
1406}
1407
1408static inline void __cpu_map_flush(void)
1409{
1410}
1411
1412static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1413                                  struct xdp_buff *xdp,
1414                                  struct net_device *dev_rx)
1415{
1416        return 0;
1417}
1418
1419static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1420                                enum bpf_prog_type type)
1421{
1422        return ERR_PTR(-EOPNOTSUPP);
1423}
1424
1425static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1426                                        const union bpf_attr *kattr,
1427                                        union bpf_attr __user *uattr)
1428{
1429        return -ENOTSUPP;
1430}
1431
1432static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1433                                        const union bpf_attr *kattr,
1434                                        union bpf_attr __user *uattr)
1435{
1436        return -ENOTSUPP;
1437}
1438
1439static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1440                                            const union bpf_attr *kattr,
1441                                            union bpf_attr __user *uattr)
1442{
1443        return -ENOTSUPP;
1444}
1445
1446static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1447                                                   const union bpf_attr *kattr,
1448                                                   union bpf_attr __user *uattr)
1449{
1450        return -ENOTSUPP;
1451}
1452
1453static inline void bpf_map_put(struct bpf_map *map)
1454{
1455}
1456
1457static inline struct bpf_prog *bpf_prog_by_id(u32 id)
1458{
1459        return ERR_PTR(-ENOTSUPP);
1460}
1461
1462static inline const struct bpf_func_proto *
1463bpf_base_func_proto(enum bpf_func_id func_id)
1464{
1465        return NULL;
1466}
1467#endif /* CONFIG_BPF_SYSCALL */
1468
1469static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
1470                                                 enum bpf_prog_type type)
1471{
1472        return bpf_prog_get_type_dev(ufd, type, false);
1473}
1474
1475bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
1476
1477int bpf_prog_offload_compile(struct bpf_prog *prog);
1478void bpf_prog_offload_destroy(struct bpf_prog *prog);
1479int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
1480                               struct bpf_prog *prog);
1481
1482int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1483
1484int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
1485int bpf_map_offload_update_elem(struct bpf_map *map,
1486                                void *key, void *value, u64 flags);
1487int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
1488int bpf_map_offload_get_next_key(struct bpf_map *map,
1489                                 void *key, void *next_key);
1490
1491bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1492
1493struct bpf_offload_dev *
1494bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1495void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1496void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1497int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
1498                                    struct net_device *netdev);
1499void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
1500                                       struct net_device *netdev);
1501bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1502
1503#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
1504int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
1505
1506static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1507{
1508        return aux->offload_requested;
1509}
1510
1511static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1512{
1513        return unlikely(map->ops == &bpf_map_offload_ops);
1514}
1515
1516struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
1517void bpf_map_offload_map_free(struct bpf_map *map);
1518#else
1519static inline int bpf_prog_offload_init(struct bpf_prog *prog,
1520                                        union bpf_attr *attr)
1521{
1522        return -EOPNOTSUPP;
1523}
1524
1525static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
1526{
1527        return false;
1528}
1529
1530static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1531{
1532        return false;
1533}
1534
1535static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
1536{
1537        return ERR_PTR(-EOPNOTSUPP);
1538}
1539
1540static inline void bpf_map_offload_map_free(struct bpf_map *map)
1541{
1542}
1543#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
1544
1545#if defined(CONFIG_BPF_STREAM_PARSER)
1546int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1547                         struct bpf_prog *old, u32 which);
1548int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
1549int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
1550void sock_map_unhash(struct sock *sk);
1551void sock_map_close(struct sock *sk, long timeout);
1552#else
1553static inline int sock_map_prog_update(struct bpf_map *map,
1554                                       struct bpf_prog *prog,
1555                                       struct bpf_prog *old, u32 which)
1556{
1557        return -EOPNOTSUPP;
1558}
1559
1560static inline int sock_map_get_from_fd(const union bpf_attr *attr,
1561                                       struct bpf_prog *prog)
1562{
1563        return -EINVAL;
1564}
1565
1566static inline int sock_map_prog_detach(const union bpf_attr *attr,
1567                                       enum bpf_prog_type ptype)
1568{
1569        return -EOPNOTSUPP;
1570}
1571#endif /* CONFIG_BPF_STREAM_PARSER */
1572
1573#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
1574void bpf_sk_reuseport_detach(struct sock *sk);
1575int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
1576                                       void *value);
1577int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
1578                                       void *value, u64 map_flags);
1579#else
1580static inline void bpf_sk_reuseport_detach(struct sock *sk)
1581{
1582}
1583
1584#ifdef CONFIG_BPF_SYSCALL
1585static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
1586                                                     void *key, void *value)
1587{
1588        return -EOPNOTSUPP;
1589}
1590
1591static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
1592                                                     void *key, void *value,
1593                                                     u64 map_flags)
1594{
1595        return -EOPNOTSUPP;
1596}
1597#endif /* CONFIG_BPF_SYSCALL */
1598#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
1599
1600/* verifier prototypes for helper functions called from eBPF programs */
1601extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
1602extern const struct bpf_func_proto bpf_map_update_elem_proto;
1603extern const struct bpf_func_proto bpf_map_delete_elem_proto;
1604extern const struct bpf_func_proto bpf_map_push_elem_proto;
1605extern const struct bpf_func_proto bpf_map_pop_elem_proto;
1606extern const struct bpf_func_proto bpf_map_peek_elem_proto;
1607
1608extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
1609extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
1610extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
1611extern const struct bpf_func_proto bpf_tail_call_proto;
1612extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
1613extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
1614extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
1615extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
1616extern const struct bpf_func_proto bpf_get_current_comm_proto;
1617extern const struct bpf_func_proto bpf_get_stackid_proto;
1618extern const struct bpf_func_proto bpf_get_stack_proto;
1619extern const struct bpf_func_proto bpf_sock_map_update_proto;
1620extern const struct bpf_func_proto bpf_sock_hash_update_proto;
1621extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
1622extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
1623extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
1624extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
1625extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
1626extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
1627extern const struct bpf_func_proto bpf_spin_lock_proto;
1628extern const struct bpf_func_proto bpf_spin_unlock_proto;
1629extern const struct bpf_func_proto bpf_get_local_storage_proto;
1630extern const struct bpf_func_proto bpf_strtol_proto;
1631extern const struct bpf_func_proto bpf_strtoul_proto;
1632extern const struct bpf_func_proto bpf_tcp_sock_proto;
1633extern const struct bpf_func_proto bpf_jiffies64_proto;
1634extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
1635extern const struct bpf_func_proto bpf_event_output_data_proto;
1636extern const struct bpf_func_proto bpf_ringbuf_output_proto;
1637extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
1638extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
1639extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
1640extern const struct bpf_func_proto bpf_ringbuf_query_proto;
1641
1642const struct bpf_func_proto *bpf_tracing_func_proto(
1643        enum bpf_func_id func_id, const struct bpf_prog *prog);
1644
1645const struct bpf_func_proto *tracing_prog_func_proto(
1646  enum bpf_func_id func_id, const struct bpf_prog *prog);
1647
1648/* Shared helpers among cBPF and eBPF. */
1649void bpf_user_rnd_init_once(void);
1650u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
1651u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
1652
1653#if defined(CONFIG_NET)
1654bool bpf_sock_common_is_valid_access(int off, int size,
1655                                     enum bpf_access_type type,
1656                                     struct bpf_insn_access_aux *info);
1657bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1658                              struct bpf_insn_access_aux *info);
1659u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1660                                const struct bpf_insn *si,
1661                                struct bpf_insn *insn_buf,
1662                                struct bpf_prog *prog,
1663                                u32 *target_size);
1664#else
1665static inline bool bpf_sock_common_is_valid_access(int off, int size,
1666                                                   enum bpf_access_type type,
1667                                                   struct bpf_insn_access_aux *info)
1668{
1669        return false;
1670}
1671static inline bool bpf_sock_is_valid_access(int off, int size,
1672                                            enum bpf_access_type type,
1673                                            struct bpf_insn_access_aux *info)
1674{
1675        return false;
1676}
1677static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1678                                              const struct bpf_insn *si,
1679                                              struct bpf_insn *insn_buf,
1680                                              struct bpf_prog *prog,
1681                                              u32 *target_size)
1682{
1683        return 0;
1684}
1685#endif
1686
1687#ifdef CONFIG_INET
1688struct sk_reuseport_kern {
1689        struct sk_buff *skb;
1690        struct sock *sk;
1691        struct sock *selected_sk;
1692        void *data_end;
1693        u32 hash;
1694        u32 reuseport_id;
1695        bool bind_inany;
1696};
1697bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1698                                  struct bpf_insn_access_aux *info);
1699
1700u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1701                                    const struct bpf_insn *si,
1702                                    struct bpf_insn *insn_buf,
1703                                    struct bpf_prog *prog,
1704                                    u32 *target_size);
1705
1706bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1707                                  struct bpf_insn_access_aux *info);
1708
1709u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1710                                    const struct bpf_insn *si,
1711                                    struct bpf_insn *insn_buf,
1712                                    struct bpf_prog *prog,
1713                                    u32 *target_size);
1714#else
1715static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
1716                                                enum bpf_access_type type,
1717                                                struct bpf_insn_access_aux *info)
1718{
1719        return false;
1720}
1721
1722static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1723                                                  const struct bpf_insn *si,
1724                                                  struct bpf_insn *insn_buf,
1725                                                  struct bpf_prog *prog,
1726                                                  u32 *target_size)
1727{
1728        return 0;
1729}
1730static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
1731                                                enum bpf_access_type type,
1732                                                struct bpf_insn_access_aux *info)
1733{
1734        return false;
1735}
1736
1737static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1738                                                  const struct bpf_insn *si,
1739                                                  struct bpf_insn *insn_buf,
1740                                                  struct bpf_prog *prog,
1741                                                  u32 *target_size)
1742{
1743        return 0;
1744}
1745#endif /* CONFIG_INET */
1746
1747enum bpf_text_poke_type {
1748        BPF_MOD_CALL,
1749        BPF_MOD_JUMP,
1750};
1751
1752int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
1753                       void *addr1, void *addr2);
1754
1755#endif /* _LINUX_BPF_H */
1756