1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/filter.h>
16#include <linux/bpf.h>
17
18#include <net/netlink.h>
19#include <net/pkt_sched.h>
20
21#include <linux/tc_act/tc_bpf.h>
22#include <net/tc_act/tc_bpf.h>
23
24#define BPF_TAB_MASK 15
25#define ACT_BPF_NAME_LEN 256
26
27struct tcf_bpf_cfg {
28 struct bpf_prog *filter;
29 struct sock_filter *bpf_ops;
30 const char *bpf_name;
31 u32 bpf_fd;
32 u16 bpf_num_ops;
33 bool is_ebpf;
34};
35
36static int bpf_net_id;
37static struct tc_action_ops act_bpf_ops;
38
39static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
40 struct tcf_result *res)
41{
42 struct tcf_bpf *prog = to_bpf(act);
43 struct bpf_prog *filter;
44 int action, filter_res;
45 bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
46
47 if (unlikely(!skb_mac_header_was_set(skb)))
48 return TC_ACT_UNSPEC;
49
50 tcf_lastuse_update(&prog->tcf_tm);
51 bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
52
53 rcu_read_lock();
54 filter = rcu_dereference(prog->filter);
55 if (at_ingress) {
56 __skb_push(skb, skb->mac_len);
57 bpf_compute_data_end(skb);
58 filter_res = BPF_PROG_RUN(filter, skb);
59 __skb_pull(skb, skb->mac_len);
60 } else {
61 bpf_compute_data_end(skb);
62 filter_res = BPF_PROG_RUN(filter, skb);
63 }
64 rcu_read_unlock();
65
66
67
68
69
70
71
72
73
74
75
76 switch (filter_res) {
77 case TC_ACT_PIPE:
78 case TC_ACT_RECLASSIFY:
79 case TC_ACT_OK:
80 case TC_ACT_REDIRECT:
81 action = filter_res;
82 break;
83 case TC_ACT_SHOT:
84 action = filter_res;
85 qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
86 break;
87 case TC_ACT_UNSPEC:
88 action = prog->tcf_action;
89 break;
90 default:
91 action = TC_ACT_UNSPEC;
92 break;
93 }
94
95 return action;
96}
97
98static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
99{
100 return !prog->bpf_ops;
101}
102
103static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
104 struct sk_buff *skb)
105{
106 struct nlattr *nla;
107
108 if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
109 return -EMSGSIZE;
110
111 nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
112 sizeof(struct sock_filter));
113 if (nla == NULL)
114 return -EMSGSIZE;
115
116 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
117
118 return 0;
119}
120
121static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
122 struct sk_buff *skb)
123{
124 if (nla_put_u32(skb, TCA_ACT_BPF_FD, prog->bpf_fd))
125 return -EMSGSIZE;
126
127 if (prog->bpf_name &&
128 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
129 return -EMSGSIZE;
130
131 return 0;
132}
133
134static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
135 int bind, int ref)
136{
137 unsigned char *tp = skb_tail_pointer(skb);
138 struct tcf_bpf *prog = to_bpf(act);
139 struct tc_act_bpf opt = {
140 .index = prog->tcf_index,
141 .refcnt = prog->tcf_refcnt - ref,
142 .bindcnt = prog->tcf_bindcnt - bind,
143 .action = prog->tcf_action,
144 };
145 struct tcf_t tm;
146 int ret;
147
148 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
149 goto nla_put_failure;
150
151 if (tcf_bpf_is_ebpf(prog))
152 ret = tcf_bpf_dump_ebpf_info(prog, skb);
153 else
154 ret = tcf_bpf_dump_bpf_info(prog, skb);
155 if (ret)
156 goto nla_put_failure;
157
158 tcf_tm_dump(&tm, &prog->tcf_tm);
159 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
160 TCA_ACT_BPF_PAD))
161 goto nla_put_failure;
162
163 return skb->len;
164
165nla_put_failure:
166 nlmsg_trim(skb, tp);
167 return -1;
168}
169
170static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
171 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
172 [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
173 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
174 .len = ACT_BPF_NAME_LEN },
175 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
176 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
177 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
178};
179
180static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
181{
182 struct sock_filter *bpf_ops;
183 struct sock_fprog_kern fprog_tmp;
184 struct bpf_prog *fp;
185 u16 bpf_size, bpf_num_ops;
186 int ret;
187
188 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
189 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
190 return -EINVAL;
191
192 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
193 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
194 return -EINVAL;
195
196 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
197 if (bpf_ops == NULL)
198 return -ENOMEM;
199
200 memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size);
201
202 fprog_tmp.len = bpf_num_ops;
203 fprog_tmp.filter = bpf_ops;
204
205 ret = bpf_prog_create(&fp, &fprog_tmp);
206 if (ret < 0) {
207 kfree(bpf_ops);
208 return ret;
209 }
210
211 cfg->bpf_ops = bpf_ops;
212 cfg->bpf_num_ops = bpf_num_ops;
213 cfg->filter = fp;
214 cfg->is_ebpf = false;
215
216 return 0;
217}
218
219static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
220{
221 struct bpf_prog *fp;
222 char *name = NULL;
223 u32 bpf_fd;
224
225 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
226
227 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
228 if (IS_ERR(fp))
229 return PTR_ERR(fp);
230
231 if (tb[TCA_ACT_BPF_NAME]) {
232 name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]),
233 nla_len(tb[TCA_ACT_BPF_NAME]),
234 GFP_KERNEL);
235 if (!name) {
236 bpf_prog_put(fp);
237 return -ENOMEM;
238 }
239 }
240
241 cfg->bpf_fd = bpf_fd;
242 cfg->bpf_name = name;
243 cfg->filter = fp;
244 cfg->is_ebpf = true;
245
246 return 0;
247}
248
249static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
250{
251 if (cfg->is_ebpf)
252 bpf_prog_put(cfg->filter);
253 else
254 bpf_prog_destroy(cfg->filter);
255
256 kfree(cfg->bpf_ops);
257 kfree(cfg->bpf_name);
258}
259
260static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
261 struct tcf_bpf_cfg *cfg)
262{
263 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
264
265
266
267 cfg->filter = rcu_dereference_protected(prog->filter, 1);
268
269 cfg->bpf_ops = prog->bpf_ops;
270 cfg->bpf_name = prog->bpf_name;
271}
272
273static int tcf_bpf_init(struct net *net, struct nlattr *nla,
274 struct nlattr *est, struct tc_action **act,
275 int replace, int bind)
276{
277 struct tc_action_net *tn = net_generic(net, bpf_net_id);
278 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
279 struct tcf_bpf_cfg cfg, old;
280 struct tc_act_bpf *parm;
281 struct tcf_bpf *prog;
282 bool is_bpf, is_ebpf;
283 int ret, res = 0;
284
285 if (!nla)
286 return -EINVAL;
287
288 ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy);
289 if (ret < 0)
290 return ret;
291
292 if (!tb[TCA_ACT_BPF_PARMS])
293 return -EINVAL;
294
295 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
296
297 if (!tcf_hash_check(tn, parm->index, act, bind)) {
298 ret = tcf_hash_create(tn, parm->index, est, act,
299 &act_bpf_ops, bind, true);
300 if (ret < 0)
301 return ret;
302
303 res = ACT_P_CREATED;
304 } else {
305
306 if (bind)
307 return 0;
308
309 tcf_hash_release(*act, bind);
310 if (!replace)
311 return -EEXIST;
312 }
313
314 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
315 is_ebpf = tb[TCA_ACT_BPF_FD];
316
317 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
318 ret = -EINVAL;
319 goto out;
320 }
321
322 memset(&cfg, 0, sizeof(cfg));
323
324 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
325 tcf_bpf_init_from_efd(tb, &cfg);
326 if (ret < 0)
327 goto out;
328
329 prog = to_bpf(*act);
330 ASSERT_RTNL();
331
332 if (res != ACT_P_CREATED)
333 tcf_bpf_prog_fill_cfg(prog, &old);
334
335 prog->bpf_ops = cfg.bpf_ops;
336 prog->bpf_name = cfg.bpf_name;
337
338 if (cfg.bpf_num_ops)
339 prog->bpf_num_ops = cfg.bpf_num_ops;
340 if (cfg.bpf_fd)
341 prog->bpf_fd = cfg.bpf_fd;
342
343 prog->tcf_action = parm->action;
344 rcu_assign_pointer(prog->filter, cfg.filter);
345
346 if (res == ACT_P_CREATED) {
347 tcf_hash_insert(tn, *act);
348 } else {
349
350 synchronize_rcu();
351 tcf_bpf_cfg_cleanup(&old);
352 }
353
354 return res;
355out:
356 if (res == ACT_P_CREATED)
357 tcf_hash_cleanup(*act, est);
358
359 return ret;
360}
361
362static void tcf_bpf_cleanup(struct tc_action *act, int bind)
363{
364 struct tcf_bpf_cfg tmp;
365
366 tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
367 tcf_bpf_cfg_cleanup(&tmp);
368}
369
370static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
371 struct netlink_callback *cb, int type,
372 const struct tc_action_ops *ops)
373{
374 struct tc_action_net *tn = net_generic(net, bpf_net_id);
375
376 return tcf_generic_walker(tn, skb, cb, type, ops);
377}
378
379static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index)
380{
381 struct tc_action_net *tn = net_generic(net, bpf_net_id);
382
383 return tcf_hash_search(tn, a, index);
384}
385
386static struct tc_action_ops act_bpf_ops __read_mostly = {
387 .kind = "bpf",
388 .type = TCA_ACT_BPF,
389 .owner = THIS_MODULE,
390 .act = tcf_bpf,
391 .dump = tcf_bpf_dump,
392 .cleanup = tcf_bpf_cleanup,
393 .init = tcf_bpf_init,
394 .walk = tcf_bpf_walker,
395 .lookup = tcf_bpf_search,
396 .size = sizeof(struct tcf_bpf),
397};
398
399static __net_init int bpf_init_net(struct net *net)
400{
401 struct tc_action_net *tn = net_generic(net, bpf_net_id);
402
403 return tc_action_net_init(tn, &act_bpf_ops, BPF_TAB_MASK);
404}
405
406static void __net_exit bpf_exit_net(struct net *net)
407{
408 struct tc_action_net *tn = net_generic(net, bpf_net_id);
409
410 tc_action_net_exit(tn);
411}
412
413static struct pernet_operations bpf_net_ops = {
414 .init = bpf_init_net,
415 .exit = bpf_exit_net,
416 .id = &bpf_net_id,
417 .size = sizeof(struct tc_action_net),
418};
419
420static int __init bpf_init_module(void)
421{
422 return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
423}
424
425static void __exit bpf_cleanup_module(void)
426{
427 tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
428}
429
430module_init(bpf_init_module);
431module_exit(bpf_cleanup_module);
432
433MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
434MODULE_DESCRIPTION("TC BPF based action");
435MODULE_LICENSE("GPL v2");
436