linux/include/linux/bpf-cgroup.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BPF_CGROUP_H
   3#define _BPF_CGROUP_H
   4
   5#include <linux/bpf.h>
   6#include <linux/bpf-cgroup-defs.h>
   7#include <linux/errno.h>
   8#include <linux/jump_label.h>
   9#include <linux/percpu.h>
  10#include <linux/rbtree.h>
  11#include <uapi/linux/bpf.h>
  12
  13struct sock;
  14struct sockaddr;
  15struct cgroup;
  16struct sk_buff;
  17struct bpf_map;
  18struct bpf_prog;
  19struct bpf_sock_ops_kern;
  20struct bpf_cgroup_storage;
  21struct ctl_table;
  22struct ctl_table_header;
  23struct task_struct;
  24
  25#ifdef CONFIG_CGROUP_BPF
  26
  27#define CGROUP_ATYPE(type) \
  28        case BPF_##type: return type
  29
  30static inline enum cgroup_bpf_attach_type
  31to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
  32{
  33        switch (attach_type) {
  34        CGROUP_ATYPE(CGROUP_INET_INGRESS);
  35        CGROUP_ATYPE(CGROUP_INET_EGRESS);
  36        CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
  37        CGROUP_ATYPE(CGROUP_SOCK_OPS);
  38        CGROUP_ATYPE(CGROUP_DEVICE);
  39        CGROUP_ATYPE(CGROUP_INET4_BIND);
  40        CGROUP_ATYPE(CGROUP_INET6_BIND);
  41        CGROUP_ATYPE(CGROUP_INET4_CONNECT);
  42        CGROUP_ATYPE(CGROUP_INET6_CONNECT);
  43        CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
  44        CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
  45        CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
  46        CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
  47        CGROUP_ATYPE(CGROUP_SYSCTL);
  48        CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
  49        CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
  50        CGROUP_ATYPE(CGROUP_GETSOCKOPT);
  51        CGROUP_ATYPE(CGROUP_SETSOCKOPT);
  52        CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
  53        CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
  54        CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
  55        CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
  56        CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
  57        default:
  58                return CGROUP_BPF_ATTACH_TYPE_INVALID;
  59        }
  60}
  61
  62#undef CGROUP_ATYPE
  63
  64extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
  65#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
  66
  67#define for_each_cgroup_storage_type(stype) \
  68        for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
  69
  70struct bpf_cgroup_storage_map;
  71
  72struct bpf_storage_buffer {
  73        struct rcu_head rcu;
  74        char data[];
  75};
  76
  77struct bpf_cgroup_storage {
  78        union {
  79                struct bpf_storage_buffer *buf;
  80                void __percpu *percpu_buf;
  81        };
  82        struct bpf_cgroup_storage_map *map;
  83        struct bpf_cgroup_storage_key key;
  84        struct list_head list_map;
  85        struct list_head list_cg;
  86        struct rb_node node;
  87        struct rcu_head rcu;
  88};
  89
  90struct bpf_cgroup_link {
  91        struct bpf_link link;
  92        struct cgroup *cgroup;
  93        enum bpf_attach_type type;
  94};
  95
  96struct bpf_prog_list {
  97        struct list_head node;
  98        struct bpf_prog *prog;
  99        struct bpf_cgroup_link *link;
 100        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
 101};
 102
 103int cgroup_bpf_inherit(struct cgroup *cgrp);
 104void cgroup_bpf_offline(struct cgroup *cgrp);
 105
 106int __cgroup_bpf_run_filter_skb(struct sock *sk,
 107                                struct sk_buff *skb,
 108                                enum cgroup_bpf_attach_type atype);
 109
 110int __cgroup_bpf_run_filter_sk(struct sock *sk,
 111                               enum cgroup_bpf_attach_type atype);
 112
 113int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
 114                                      struct sockaddr *uaddr,
 115                                      enum cgroup_bpf_attach_type atype,
 116                                      void *t_ctx,
 117                                      u32 *flags);
 118
 119int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
 120                                     struct bpf_sock_ops_kern *sock_ops,
 121                                     enum cgroup_bpf_attach_type atype);
 122
 123int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
 124                                      short access, enum cgroup_bpf_attach_type atype);
 125
 126int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
 127                                   struct ctl_table *table, int write,
 128                                   char **buf, size_t *pcount, loff_t *ppos,
 129                                   enum cgroup_bpf_attach_type atype);
 130
 131int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
 132                                       int *optname, char __user *optval,
 133                                       int *optlen, char **kernel_optval);
 134int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
 135                                       int optname, char __user *optval,
 136                                       int __user *optlen, int max_optlen,
 137                                       int retval);
 138
 139int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
 140                                            int optname, void *optval,
 141                                            int *optlen, int retval);
 142
 143static inline enum bpf_cgroup_storage_type cgroup_storage_type(
 144        struct bpf_map *map)
 145{
 146        if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 147                return BPF_CGROUP_STORAGE_PERCPU;
 148
 149        return BPF_CGROUP_STORAGE_SHARED;
 150}
 151
 152struct bpf_cgroup_storage *
 153cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
 154                      void *key, bool locked);
 155struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
 156                                        enum bpf_cgroup_storage_type stype);
 157void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
 158void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
 159                             struct cgroup *cgroup,
 160                             enum bpf_attach_type type);
 161void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
 162int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
 163
 164int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
 165int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 166                                     void *value, u64 flags);
 167
 168/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
 169#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)                             \
 170({                                                                            \
 171        int __ret = 0;                                                        \
 172        if (cgroup_bpf_enabled(CGROUP_INET_INGRESS))                  \
 173                __ret = __cgroup_bpf_run_filter_skb(sk, skb,                  \
 174                                                    CGROUP_INET_INGRESS); \
 175                                                                              \
 176        __ret;                                                                \
 177})
 178
 179#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)                               \
 180({                                                                             \
 181        int __ret = 0;                                                         \
 182        if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
 183                typeof(sk) __sk = sk_to_full_sk(sk);                           \
 184                if (sk_fullsock(__sk))                                         \
 185                        __ret = __cgroup_bpf_run_filter_skb(__sk, skb,         \
 186                                                      CGROUP_INET_EGRESS); \
 187        }                                                                      \
 188        __ret;                                                                 \
 189})
 190
 191#define BPF_CGROUP_RUN_SK_PROG(sk, atype)                                      \
 192({                                                                             \
 193        int __ret = 0;                                                         \
 194        if (cgroup_bpf_enabled(atype)) {                                               \
 195                __ret = __cgroup_bpf_run_filter_sk(sk, atype);                 \
 196        }                                                                      \
 197        __ret;                                                                 \
 198})
 199
 200#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)                                      \
 201        BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
 202
 203#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)                              \
 204        BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
 205
 206#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)                                \
 207        BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
 208
 209#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)                                \
 210        BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
 211
 212#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype)                                       \
 213({                                                                             \
 214        u32 __unused_flags;                                                    \
 215        int __ret = 0;                                                         \
 216        if (cgroup_bpf_enabled(atype))                                         \
 217                __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
 218                                                          NULL,                \
 219                                                          &__unused_flags);    \
 220        __ret;                                                                 \
 221})
 222
 223#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx)                   \
 224({                                                                             \
 225        u32 __unused_flags;                                                    \
 226        int __ret = 0;                                                         \
 227        if (cgroup_bpf_enabled(atype))  {                                      \
 228                lock_sock(sk);                                                 \
 229                __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
 230                                                          t_ctx,               \
 231                                                          &__unused_flags);    \
 232                release_sock(sk);                                              \
 233        }                                                                      \
 234        __ret;                                                                 \
 235})
 236
 237/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
 238 * via upper bits of return code. The only flag that is supported
 239 * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
 240 * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
 241 */
 242#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags)               \
 243({                                                                             \
 244        u32 __flags = 0;                                                       \
 245        int __ret = 0;                                                         \
 246        if (cgroup_bpf_enabled(atype))  {                                      \
 247                lock_sock(sk);                                                 \
 248                __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
 249                                                          NULL, &__flags);     \
 250                release_sock(sk);                                              \
 251                if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE)            \
 252                        *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE;           \
 253        }                                                                      \
 254        __ret;                                                                 \
 255})
 256
 257#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk)                                     \
 258        ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) ||                  \
 259          cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) &&                 \
 260         (sk)->sk_prot->pre_connect)
 261
 262#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)                           \
 263        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
 264
 265#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)                           \
 266        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
 267
 268#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)                      \
 269        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
 270
 271#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)                      \
 272        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
 273
 274#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
 275        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
 276
 277#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
 278        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
 279
 280#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)                        \
 281        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
 282
 283#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)                        \
 284        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
 285
 286/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
 287 * fullsock and its parent fullsock cannot be traced by
 288 * sk_to_full_sk().
 289 *
 290 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
 291 * Its listener-sk is not attached to the rsk_listener.
 292 * In this case, the caller holds the listener-sk (unlocked),
 293 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
 294 * the listener-sk such that the cgroup-bpf-progs of the
 295 * listener-sk will be run.
 296 *
 297 * Regardless of syncookie mode or not,
 298 * calling bpf_setsockopt on listener-sk will not make sense anyway,
 299 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
 300 */
 301#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)                   \
 302({                                                                      \
 303        int __ret = 0;                                                  \
 304        if (cgroup_bpf_enabled(CGROUP_SOCK_OPS))                        \
 305                __ret = __cgroup_bpf_run_filter_sock_ops(sk,            \
 306                                                         sock_ops,      \
 307                                                         CGROUP_SOCK_OPS); \
 308        __ret;                                                          \
 309})
 310
 311#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)                                 \
 312({                                                                             \
 313        int __ret = 0;                                                         \
 314        if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
 315                typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);               \
 316                if (__sk && sk_fullsock(__sk))                                 \
 317                        __ret = __cgroup_bpf_run_filter_sock_ops(__sk,         \
 318                                                                 sock_ops,     \
 319                                                         CGROUP_SOCK_OPS); \
 320        }                                                                      \
 321        __ret;                                                                 \
 322})
 323
 324#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access)        \
 325({                                                                            \
 326        int __ret = 0;                                                        \
 327        if (cgroup_bpf_enabled(CGROUP_DEVICE))                        \
 328                __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
 329                                                          access,             \
 330                                                          CGROUP_DEVICE); \
 331                                                                              \
 332        __ret;                                                                \
 333})
 334
 335
 336#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
 337({                                                                             \
 338        int __ret = 0;                                                         \
 339        if (cgroup_bpf_enabled(CGROUP_SYSCTL))                         \
 340                __ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
 341                                                       buf, count, pos,        \
 342                                                       CGROUP_SYSCTL);     \
 343        __ret;                                                                 \
 344})
 345
 346#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
 347                                       kernel_optval)                          \
 348({                                                                             \
 349        int __ret = 0;                                                         \
 350        if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT))                             \
 351                __ret = __cgroup_bpf_run_filter_setsockopt(sock, level,        \
 352                                                           optname, optval,    \
 353                                                           optlen,             \
 354                                                           kernel_optval);     \
 355        __ret;                                                                 \
 356})
 357
 358#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)                               \
 359({                                                                             \
 360        int __ret = 0;                                                         \
 361        if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))                             \
 362                get_user(__ret, optlen);                                       \
 363        __ret;                                                                 \
 364})
 365
 366#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
 367                                       max_optlen, retval)                     \
 368({                                                                             \
 369        int __ret = retval;                                                    \
 370        if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))                             \
 371                if (!(sock)->sk_prot->bpf_bypass_getsockopt ||                 \
 372                    !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
 373                                        tcp_bpf_bypass_getsockopt,             \
 374                                        level, optname))                       \
 375                        __ret = __cgroup_bpf_run_filter_getsockopt(            \
 376                                sock, level, optname, optval, optlen,          \
 377                                max_optlen, retval);                           \
 378        __ret;                                                                 \
 379})
 380
 381#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
 382                                            optlen, retval)                    \
 383({                                                                             \
 384        int __ret = retval;                                                    \
 385        if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))                             \
 386                __ret = __cgroup_bpf_run_filter_getsockopt_kern(               \
 387                        sock, level, optname, optval, optlen, retval);         \
 388        __ret;                                                                 \
 389})
 390
 391int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 392                           enum bpf_prog_type ptype, struct bpf_prog *prog);
 393int cgroup_bpf_prog_detach(const union bpf_attr *attr,
 394                           enum bpf_prog_type ptype);
 395int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
 396int cgroup_bpf_prog_query(const union bpf_attr *attr,
 397                          union bpf_attr __user *uattr);
 398#else
 399
 400static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
 401static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
 402
 403static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 404                                         enum bpf_prog_type ptype,
 405                                         struct bpf_prog *prog)
 406{
 407        return -EINVAL;
 408}
 409
 410static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
 411                                         enum bpf_prog_type ptype)
 412{
 413        return -EINVAL;
 414}
 415
 416static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
 417                                         struct bpf_prog *prog)
 418{
 419        return -EINVAL;
 420}
 421
 422static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
 423                                        union bpf_attr __user *uattr)
 424{
 425        return -EINVAL;
 426}
 427
 428static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
 429                                            struct bpf_map *map) { return 0; }
 430static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
 431        struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
 432static inline void bpf_cgroup_storage_free(
 433        struct bpf_cgroup_storage *storage) {}
 434static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
 435                                                 void *value) {
 436        return 0;
 437}
 438static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
 439                                        void *key, void *value, u64 flags) {
 440        return 0;
 441}
 442
 443#define cgroup_bpf_enabled(atype) (0)
 444#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
 445#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
 446#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
 447#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
 448#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
 449#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
 450#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
 451#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
 452#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
 453#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
 454#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
 455#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
 456#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
 457#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
 458#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
 459#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
 460#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
 461#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
 462#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
 463#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
 464#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
 465#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
 466#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
 467                                       optlen, max_optlen, retval) ({ retval; })
 468#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
 469                                            optlen, retval) ({ retval; })
 470#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
 471                                       kernel_optval) ({ 0; })
 472
 473#define for_each_cgroup_storage_type(stype) for (; false; )
 474
 475#endif /* CONFIG_CGROUP_BPF */
 476
 477#endif /* _BPF_CGROUP_H */
 478