linux/include/linux/bpf-cgroup.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BPF_CGROUP_H
   3#define _BPF_CGROUP_H
   4
   5#include <linux/bpf.h>
   6#include <linux/errno.h>
   7#include <linux/jump_label.h>
   8#include <linux/percpu.h>
   9#include <linux/rbtree.h>
  10#include <uapi/linux/bpf.h>
  11
  12struct sock;
  13struct sockaddr;
  14struct cgroup;
  15struct sk_buff;
  16struct bpf_map;
  17struct bpf_prog;
  18struct bpf_sock_ops_kern;
  19struct bpf_cgroup_storage;
  20struct ctl_table;
  21struct ctl_table_header;
  22
  23#ifdef CONFIG_CGROUP_BPF
  24
  25extern struct static_key_false cgroup_bpf_enabled_key;
  26#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
  27
  28DECLARE_PER_CPU(struct bpf_cgroup_storage*,
  29                bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
  30
  31#define for_each_cgroup_storage_type(stype) \
  32        for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
  33
  34struct bpf_cgroup_storage_map;
  35
  36struct bpf_storage_buffer {
  37        struct rcu_head rcu;
  38        char data[0];
  39};
  40
  41struct bpf_cgroup_storage {
  42        union {
  43                struct bpf_storage_buffer *buf;
  44                void __percpu *percpu_buf;
  45        };
  46        struct bpf_cgroup_storage_map *map;
  47        struct bpf_cgroup_storage_key key;
  48        struct list_head list;
  49        struct rb_node node;
  50        struct rcu_head rcu;
  51};
  52
  53struct bpf_prog_list {
  54        struct list_head node;
  55        struct bpf_prog *prog;
  56        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
  57};
  58
  59struct bpf_prog_array;
  60
  61struct cgroup_bpf {
  62        /* array of effective progs in this cgroup */
  63        struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
  64
  65        /* attached progs to this cgroup and attach flags
  66         * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
  67         * have either zero or one element
  68         * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
  69         */
  70        struct list_head progs[MAX_BPF_ATTACH_TYPE];
  71        u32 flags[MAX_BPF_ATTACH_TYPE];
  72
  73        /* temp storage for effective prog array used by prog_attach/detach */
  74        struct bpf_prog_array __rcu *inactive;
  75};
  76
  77void cgroup_bpf_put(struct cgroup *cgrp);
  78int cgroup_bpf_inherit(struct cgroup *cgrp);
  79
  80int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  81                        enum bpf_attach_type type, u32 flags);
  82int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  83                        enum bpf_attach_type type);
  84int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  85                       union bpf_attr __user *uattr);
  86
  87/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
  88int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  89                      enum bpf_attach_type type, u32 flags);
  90int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  91                      enum bpf_attach_type type, u32 flags);
  92int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  93                     union bpf_attr __user *uattr);
  94
  95int __cgroup_bpf_run_filter_skb(struct sock *sk,
  96                                struct sk_buff *skb,
  97                                enum bpf_attach_type type);
  98
  99int __cgroup_bpf_run_filter_sk(struct sock *sk,
 100                               enum bpf_attach_type type);
 101
 102int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
 103                                      struct sockaddr *uaddr,
 104                                      enum bpf_attach_type type,
 105                                      void *t_ctx);
 106
 107int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
 108                                     struct bpf_sock_ops_kern *sock_ops,
 109                                     enum bpf_attach_type type);
 110
 111int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
 112                                      short access, enum bpf_attach_type type);
 113
 114int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
 115                                   struct ctl_table *table, int write,
 116                                   void __user *buf, size_t *pcount,
 117                                   loff_t *ppos, void **new_buf,
 118                                   enum bpf_attach_type type);
 119
 120static inline enum bpf_cgroup_storage_type cgroup_storage_type(
 121        struct bpf_map *map)
 122{
 123        if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
 124                return BPF_CGROUP_STORAGE_PERCPU;
 125
 126        return BPF_CGROUP_STORAGE_SHARED;
 127}
 128
 129static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
 130                                          *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
 131{
 132        enum bpf_cgroup_storage_type stype;
 133
 134        for_each_cgroup_storage_type(stype)
 135                this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
 136}
 137
 138struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
 139                                        enum bpf_cgroup_storage_type stype);
 140void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
 141void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
 142                             struct cgroup *cgroup,
 143                             enum bpf_attach_type type);
 144void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
 145int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
 146void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
 147
 148int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
 149int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 150                                     void *value, u64 flags);
 151
 152/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
 153#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)                             \
 154({                                                                            \
 155        int __ret = 0;                                                        \
 156        if (cgroup_bpf_enabled)                                               \
 157                __ret = __cgroup_bpf_run_filter_skb(sk, skb,                  \
 158                                                    BPF_CGROUP_INET_INGRESS); \
 159                                                                              \
 160        __ret;                                                                \
 161})
 162
 163#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)                               \
 164({                                                                             \
 165        int __ret = 0;                                                         \
 166        if (cgroup_bpf_enabled && sk && sk == skb->sk) {                       \
 167                typeof(sk) __sk = sk_to_full_sk(sk);                           \
 168                if (sk_fullsock(__sk))                                         \
 169                        __ret = __cgroup_bpf_run_filter_skb(__sk, skb,         \
 170                                                      BPF_CGROUP_INET_EGRESS); \
 171        }                                                                      \
 172        __ret;                                                                 \
 173})
 174
 175#define BPF_CGROUP_RUN_SK_PROG(sk, type)                                       \
 176({                                                                             \
 177        int __ret = 0;                                                         \
 178        if (cgroup_bpf_enabled) {                                              \
 179                __ret = __cgroup_bpf_run_filter_sk(sk, type);                  \
 180        }                                                                      \
 181        __ret;                                                                 \
 182})
 183
 184#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)                                      \
 185        BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
 186
 187#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)                                \
 188        BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
 189
 190#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)                                \
 191        BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
 192
 193#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)                                \
 194({                                                                             \
 195        int __ret = 0;                                                         \
 196        if (cgroup_bpf_enabled)                                                \
 197                __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
 198                                                          NULL);               \
 199        __ret;                                                                 \
 200})
 201
 202#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)                    \
 203({                                                                             \
 204        int __ret = 0;                                                         \
 205        if (cgroup_bpf_enabled) {                                              \
 206                lock_sock(sk);                                                 \
 207                __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
 208                                                          t_ctx);              \
 209                release_sock(sk);                                              \
 210        }                                                                      \
 211        __ret;                                                                 \
 212})
 213
 214#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)                              \
 215        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
 216
 217#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)                              \
 218        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
 219
 220#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
 221                                            sk->sk_prot->pre_connect)
 222
 223#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)                           \
 224        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
 225
 226#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)                           \
 227        BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
 228
 229#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)                      \
 230        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
 231
 232#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)                      \
 233        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
 234
 235#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
 236        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
 237
 238#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)                \
 239        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
 240
 241#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)                        \
 242        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
 243
 244#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)                        \
 245        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
 246
 247#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)                                 \
 248({                                                                             \
 249        int __ret = 0;                                                         \
 250        if (cgroup_bpf_enabled && (sock_ops)->sk) {            \
 251                typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);               \
 252                if (__sk && sk_fullsock(__sk))                                 \
 253                        __ret = __cgroup_bpf_run_filter_sock_ops(__sk,         \
 254                                                                 sock_ops,     \
 255                                                         BPF_CGROUP_SOCK_OPS); \
 256        }                                                                      \
 257        __ret;                                                                 \
 258})
 259
 260#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)         \
 261({                                                                            \
 262        int __ret = 0;                                                        \
 263        if (cgroup_bpf_enabled)                                               \
 264                __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
 265                                                          access,             \
 266                                                          BPF_CGROUP_DEVICE); \
 267                                                                              \
 268        __ret;                                                                \
 269})
 270
 271
 272#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf)  \
 273({                                                                             \
 274        int __ret = 0;                                                         \
 275        if (cgroup_bpf_enabled)                                                \
 276                __ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
 277                                                       buf, count, pos, nbuf,  \
 278                                                       BPF_CGROUP_SYSCTL);     \
 279        __ret;                                                                 \
 280})
 281
 282int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 283                           enum bpf_prog_type ptype, struct bpf_prog *prog);
 284int cgroup_bpf_prog_detach(const union bpf_attr *attr,
 285                           enum bpf_prog_type ptype);
 286int cgroup_bpf_prog_query(const union bpf_attr *attr,
 287                          union bpf_attr __user *uattr);
 288#else
 289
 290struct bpf_prog;
 291struct cgroup_bpf {};
 292static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
 293static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
 294
 295static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 296                                         enum bpf_prog_type ptype,
 297                                         struct bpf_prog *prog)
 298{
 299        return -EINVAL;
 300}
 301
 302static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
 303                                         enum bpf_prog_type ptype)
 304{
 305        return -EINVAL;
 306}
 307
 308static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
 309                                        union bpf_attr __user *uattr)
 310{
 311        return -EINVAL;
 312}
 313
 314static inline void bpf_cgroup_storage_set(
 315        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
 316static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
 317                                            struct bpf_map *map) { return 0; }
 318static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
 319                                              struct bpf_map *map) {}
 320static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
 321        struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
 322static inline void bpf_cgroup_storage_free(
 323        struct bpf_cgroup_storage *storage) {}
 324static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
 325                                                 void *value) {
 326        return 0;
 327}
 328static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
 329                                        void *key, void *value, u64 flags) {
 330        return 0;
 331}
 332
 333#define cgroup_bpf_enabled (0)
 334#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
 335#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
 336#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
 337#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
 338#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
 339#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
 340#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
 341#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
 342#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
 343#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
 344#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
 345#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
 346#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
 347#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
 348#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
 349#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
 350#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
 351#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
 352#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
 353
 354#define for_each_cgroup_storage_type(stype) for (; false; )
 355
 356#endif /* CONFIG_CGROUP_BPF */
 357
 358#endif /* _BPF_CGROUP_H */
 359