linux/include/linux/bpf-cgroup.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BPF_CGROUP_H
   3#define _BPF_CGROUP_H
   4
   5#include <linux/bpf.h>
   6#include <linux/errno.h>
   7#include <linux/jump_label.h>
   8#include <linux/percpu.h>
   9#include <linux/percpu-refcount.h>
  10#include <linux/rbtree.h>
  11#include <uapi/linux/bpf.h>
  12
  13struct sock;
  14struct sockaddr;
  15struct cgroup;
  16struct sk_buff;
  17struct bpf_map;
  18struct bpf_prog;
  19struct bpf_sock_ops_kern;
  20struct bpf_cgroup_storage;
  21struct ctl_table;
  22struct ctl_table_header;
  23
  24#ifdef CONFIG_CGROUP_BPF
  25
  26extern struct static_key_false cgroup_bpf_enabled_key;
  27#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
  28
  29DECLARE_PER_CPU(struct bpf_cgroup_storage*,
  30                bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
  31
  32#define for_each_cgroup_storage_type(stype) \
  33        for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
  34
  35struct bpf_cgroup_storage_map;
  36
  37struct bpf_storage_buffer {
  38        struct rcu_head rcu;
  39        char data[0];
  40};
  41
  42struct bpf_cgroup_storage {
  43        union {
  44                struct bpf_storage_buffer *buf;
  45                void __percpu *percpu_buf;
  46        };
  47        struct bpf_cgroup_storage_map *map;
  48        struct bpf_cgroup_storage_key key;
  49        struct list_head list;
  50        struct rb_node node;
  51        struct rcu_head rcu;
  52};
  53
  54struct bpf_prog_list {
  55        struct list_head node;
  56        struct bpf_prog *prog;
  57        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
  58};
  59
  60struct bpf_prog_array;
  61
  62struct cgroup_bpf {
  63        /* array of effective progs in this cgroup */
  64        struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
  65
  66        /* attached progs to this cgroup and attach flags
  67         * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
  68         * have either zero or one element
  69         * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
  70         */
  71        struct list_head progs[MAX_BPF_ATTACH_TYPE];
  72        u32 flags[MAX_BPF_ATTACH_TYPE];
  73
  74        /* temp storage for effective prog array used by prog_attach/detach */
  75        struct bpf_prog_array *inactive;
  76
  77        /* reference counter used to detach bpf programs after cgroup removal */
  78        struct percpu_ref refcnt;
  79
  80        /* cgroup_bpf is released using a work queue */
  81        struct work_struct release_work;
  82};
  83
  84int cgroup_bpf_inherit(struct cgroup *cgrp);
  85void cgroup_bpf_offline(struct cgroup *cgrp);
  86
  87int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  88                        enum bpf_attach_type type, u32 flags);
  89int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  90                        enum bpf_attach_type type);
  91int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  92                       union bpf_attr __user *uattr);
  93
  94/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
  95int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  96                      enum bpf_attach_type type, u32 flags);
  97int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  98                      enum bpf_attach_type type, u32 flags);
  99int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
 100                     union bpf_attr __user *uattr);
 101
 102int __cgroup_bpf_run_filter_skb(struct sock *sk,
 103                                struct sk_buff *skb,
 104                                enum bpf_attach_type type);
 105
 106int __cgroup_bpf_run_filter_sk(struct sock *sk,
 107                               enum bpf_attach_type type);
 108
 109int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
 110                                      struct sockaddr *uaddr,
 111                                      enum bpf_attach_type type,
 112                                      void *t_ctx);
 113
 114int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
 115                                     struct bpf_sock_ops_kern *sock_ops,
 116                                     enum bpf_attach_type type);
 117
 118int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
 119                                      short access, enum bpf_attach_type type);
 120
 121int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
 122                                   struct ctl_table *table, int write,
 123                                   void __user *buf, size_t *pcount,
 124                                   loff_t *ppos, void **new_buf,
 125                                   enum bpf_attach_type type);
 126
 127int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
 128                                       int *optname, char __user *optval,
 129                                       int *optlen, char **kernel_optval);
 130int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
 131                                       int optname, char __user *optval,
 132                                       int __user *optlen, int max_optlen,
 133                                       int retval);
 134
 135static inline enum bpf_cgroup_storage_type cgroup_storage_type(
 136        struct bpf_map *map)
 137{
 138        if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 139                return BPF_CGROUP_STORAGE_PERCPU;
 140
 141        return BPF_CGROUP_STORAGE_SHARED;
 142}
 143
 144static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
 145                                          *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
 146{
 147        enum bpf_cgroup_storage_type stype;
 148
 149        for_each_cgroup_storage_type(stype)
 150                this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
 151}
 152
 153struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
 154                                        enum bpf_cgroup_storage_type stype);
 155void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
 156void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
 157                             struct cgroup *cgroup,
 158                             enum bpf_attach_type type);
 159void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
 160int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
 161void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
 162
 163int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
 164int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 165                                     void *value, u64 flags);
 166
 167/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
 168#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)                             \
 169({                                                                            \
 170        int __ret = 0;                                                        \
 171        if (cgroup_bpf_enabled)                                               \
 172                __ret = __cgroup_bpf_run_filter_skb(sk, skb,                  \
 173                                                    BPF_CGROUP_INET_INGRESS); \
 174                                                                              \
 175        __ret;                                                                \
 176})
 177
 178#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)                               \
 179({                                                                             \
 180        int __ret = 0;                                                         \
 181        if (cgroup_bpf_enabled && sk && sk == skb->sk) {                       \
 182                typeof(sk) __sk = sk_to_full_sk(sk);                           \
 183                if (sk_fullsock(__sk))                                         \
 184                        __ret = __cgroup_bpf_run_filter_skb(__sk, skb,         \
 185                                                      BPF_CGROUP_INET_EGRESS); \
 186        }                                                                      \
 187        __ret;                                                                 \
 188})
 189
 190#define BPF_CGROUP_RUN_SK_PROG(sk, type)                                       \
 191({                                                                             \
 192        int __ret = 0;                                                         \
 193        if (cgroup_bpf_enabled) {                                              \
 194                __ret = __cgroup_bpf_run_filter_sk(sk, type);                  \
 195        }                                                                      \
 196        __ret;                                                                 \
 197})
 198
 199#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)                                      \
 200        BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
 201
 202#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)                                \
 203        BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
 204
 205#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)                                \
 206        BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
 207
 208#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)                                \
 209({                                                                             \
 210        int __ret = 0;                                                         \
 211        if (cgroup_bpf_enabled)                                                \
 212                __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
 213                                                          NULL);               \
 214        __ret;                                                                 \
 215})
 216
 217#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)                    \
 218({                                                                             \
 219        int __ret = 0;                                                         \
 220        if (cgroup_bpf_enabled) {                                              \
 221                lock_sock(sk);                                                 \
 222                __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
 223                                                          t_ctx);              \
 224                release_sock(sk);                                              \
 225        }                                                                      \
 226        __ret;                                                                 \
 227})
 228
 229#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)                              \
 230        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
 231
 232#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)                              \
 233        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
 234
 235#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
 236                                            sk->sk_prot->pre_connect)
 237
 238#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)                           \
 239        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
 240
 241#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)                           \
 242        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
 243
 244#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)                      \
 245        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
 246
 247#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)                      \
 248        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
 249
 250#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
 251        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
 252
 253#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
 254        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
 255
 256#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)                        \
 257        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
 258
 259#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)                        \
 260        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
 261
 262#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)                                 \
 263({                                                                             \
 264        int __ret = 0;                                                         \
 265        if (cgroup_bpf_enabled && (sock_ops)->sk) {            \
 266                typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);               \
 267                if (__sk && sk_fullsock(__sk))                                 \
 268                        __ret = __cgroup_bpf_run_filter_sock_ops(__sk,         \
 269                                                                 sock_ops,     \
 270                                                         BPF_CGROUP_SOCK_OPS); \
 271        }                                                                      \
 272        __ret;                                                                 \
 273})
 274
 275#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)         \
 276({                                                                            \
 277        int __ret = 0;                                                        \
 278        if (cgroup_bpf_enabled)                                               \
 279                __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
 280                                                          access,             \
 281                                                          BPF_CGROUP_DEVICE); \
 282                                                                              \
 283        __ret;                                                                \
 284})
 285
 286
 287#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf)  \
 288({                                                                             \
 289        int __ret = 0;                                                         \
 290        if (cgroup_bpf_enabled)                                                \
 291                __ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
 292                                                       buf, count, pos, nbuf,  \
 293                                                       BPF_CGROUP_SYSCTL);     \
 294        __ret;                                                                 \
 295})
 296
 297#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
 298                                       kernel_optval)                          \
 299({                                                                             \
 300        int __ret = 0;                                                         \
 301        if (cgroup_bpf_enabled)                                                \
 302                __ret = __cgroup_bpf_run_filter_setsockopt(sock, level,        \
 303                                                           optname, optval,    \
 304                                                           optlen,             \
 305                                                           kernel_optval);     \
 306        __ret;                                                                 \
 307})
 308
 309#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)                               \
 310({                                                                             \
 311        int __ret = 0;                                                         \
 312        if (cgroup_bpf_enabled)                                                \
 313                get_user(__ret, optlen);                                       \
 314        __ret;                                                                 \
 315})
 316
 317#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
 318                                       max_optlen, retval)                     \
 319({                                                                             \
 320        int __ret = retval;                                                    \
 321        if (cgroup_bpf_enabled)                                                \
 322                __ret = __cgroup_bpf_run_filter_getsockopt(sock, level,        \
 323                                                           optname, optval,    \
 324                                                           optlen, max_optlen, \
 325                                                           retval);            \
 326        __ret;                                                                 \
 327})
 328
 329int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 330                           enum bpf_prog_type ptype, struct bpf_prog *prog);
 331int cgroup_bpf_prog_detach(const union bpf_attr *attr,
 332                           enum bpf_prog_type ptype);
 333int cgroup_bpf_prog_query(const union bpf_attr *attr,
 334                          union bpf_attr __user *uattr);
 335#else
 336
 337struct bpf_prog;
 338struct cgroup_bpf {};
 339static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
 340static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
 341
 342static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 343                                         enum bpf_prog_type ptype,
 344                                         struct bpf_prog *prog)
 345{
 346        return -EINVAL;
 347}
 348
 349static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
 350                                         enum bpf_prog_type ptype)
 351{
 352        return -EINVAL;
 353}
 354
 355static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
 356                                        union bpf_attr __user *uattr)
 357{
 358        return -EINVAL;
 359}
 360
 361static inline void bpf_cgroup_storage_set(
 362        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
 363static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
 364                                            struct bpf_map *map) { return 0; }
 365static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
 366                                              struct bpf_map *map) {}
 367static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
 368        struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
 369static inline void bpf_cgroup_storage_free(
 370        struct bpf_cgroup_storage *storage) {}
 371static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
 372                                                 void *value) {
 373        return 0;
 374}
 375static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
 376                                        void *key, void *value, u64 flags) {
 377        return 0;
 378}
 379
 380#define cgroup_bpf_enabled (0)
 381#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
 382#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
 383#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
 384#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
 385#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
 386#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
 387#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
 388#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
 389#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
 390#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
 391#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
 392#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
 393#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
 394#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
 395#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
 396#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
 397#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
 398#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
 399#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
 400#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
 401#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
 402                                       optlen, max_optlen, retval) ({ retval; })
 403#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
 404                                       kernel_optval) ({ 0; })
 405
 406#define for_each_cgroup_storage_type(stype) for (; false; )
 407
 408#endif /* CONFIG_CGROUP_BPF */
 409
 410#endif /* _BPF_CGROUP_H */
 411