1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/atomic.h>
13#include <linux/cgroup.h>
14#include <linux/slab.h>
15#include <linux/bpf.h>
16#include <linux/bpf-cgroup.h>
17#include <net/sock.h>
18
19DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
20EXPORT_SYMBOL(cgroup_bpf_enabled_key);
21
22
23
24
25
26void cgroup_bpf_put(struct cgroup *cgrp)
27{
28 unsigned int type;
29
30 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.prog); type++) {
31 struct bpf_prog *prog = cgrp->bpf.prog[type];
32
33 if (prog) {
34 bpf_prog_put(prog);
35 static_branch_dec(&cgroup_bpf_enabled_key);
36 }
37 }
38}
39
40
41
42
43
44
45void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
46{
47 unsigned int type;
48
49 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.effective); type++) {
50 struct bpf_prog *e;
51
52 e = rcu_dereference_protected(parent->bpf.effective[type],
53 lockdep_is_held(&cgroup_mutex));
54 rcu_assign_pointer(cgrp->bpf.effective[type], e);
55 cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type];
56 }
57}
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
87 struct bpf_prog *prog, enum bpf_attach_type type,
88 bool new_overridable)
89{
90 struct bpf_prog *old_prog, *effective = NULL;
91 struct cgroup_subsys_state *pos;
92 bool overridable = true;
93
94 if (parent) {
95 overridable = !parent->bpf.disallow_override[type];
96 effective = rcu_dereference_protected(parent->bpf.effective[type],
97 lockdep_is_held(&cgroup_mutex));
98 }
99
100 if (prog && effective && !overridable)
101
102
103
104 return -EPERM;
105
106 if (prog && effective && overridable != new_overridable)
107
108
109
110 return -EPERM;
111
112 old_prog = cgrp->bpf.prog[type];
113
114 if (prog) {
115 overridable = new_overridable;
116 effective = prog;
117 if (old_prog &&
118 cgrp->bpf.disallow_override[type] == new_overridable)
119
120
121
122
123 return -EPERM;
124 }
125
126 if (!prog && !old_prog)
127
128 return -ENOENT;
129
130 cgrp->bpf.prog[type] = prog;
131
132 css_for_each_descendant_pre(pos, &cgrp->self) {
133 struct cgroup *desc = container_of(pos, struct cgroup, self);
134
135
136 if (desc->bpf.prog[type] && desc != cgrp) {
137 pos = css_rightmost_descendant(pos);
138 } else {
139 rcu_assign_pointer(desc->bpf.effective[type],
140 effective);
141 desc->bpf.disallow_override[type] = !overridable;
142 }
143 }
144
145 if (prog)
146 static_branch_inc(&cgroup_bpf_enabled_key);
147
148 if (old_prog) {
149 bpf_prog_put(old_prog);
150 static_branch_dec(&cgroup_bpf_enabled_key);
151 }
152 return 0;
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170int __cgroup_bpf_run_filter_skb(struct sock *sk,
171 struct sk_buff *skb,
172 enum bpf_attach_type type)
173{
174 struct bpf_prog *prog;
175 struct cgroup *cgrp;
176 int ret = 0;
177
178 if (!sk || !sk_fullsock(sk))
179 return 0;
180
181 if (sk->sk_family != AF_INET &&
182 sk->sk_family != AF_INET6)
183 return 0;
184
185 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
186
187 rcu_read_lock();
188
189 prog = rcu_dereference(cgrp->bpf.effective[type]);
190 if (prog) {
191 unsigned int offset = skb->data - skb_network_header(skb);
192
193 __skb_push(skb, offset);
194 ret = bpf_prog_run_save_cb(prog, skb) == 1 ? 0 : -EPERM;
195 __skb_pull(skb, offset);
196 }
197
198 rcu_read_unlock();
199
200 return ret;
201}
202EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217int __cgroup_bpf_run_filter_sk(struct sock *sk,
218 enum bpf_attach_type type)
219{
220 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
221 struct bpf_prog *prog;
222 int ret = 0;
223
224
225 rcu_read_lock();
226
227 prog = rcu_dereference(cgrp->bpf.effective[type]);
228 if (prog)
229 ret = BPF_PROG_RUN(prog, sk) == 1 ? 0 : -EPERM;
230
231 rcu_read_unlock();
232
233 return ret;
234}
235EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
236