1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/filter.h>
16#include <linux/bpf.h>
17
18#include <net/netlink.h>
19#include <net/pkt_sched.h>
20
21#include <linux/tc_act/tc_bpf.h>
22#include <net/tc_act/tc_bpf.h>
23
24#define BPF_TAB_MASK 15
25#define ACT_BPF_NAME_LEN 256
26
27struct tcf_bpf_cfg {
28 struct bpf_prog *filter;
29 struct sock_filter *bpf_ops;
30 const char *bpf_name;
31 u32 bpf_fd;
32 u16 bpf_num_ops;
33 bool is_ebpf;
34};
35
36static int bpf_net_id;
37static struct tc_action_ops act_bpf_ops;
38
39static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
40 struct tcf_result *res)
41{
42 bool at_ingress = skb_at_tc_ingress(skb);
43 struct tcf_bpf *prog = to_bpf(act);
44 struct bpf_prog *filter;
45 int action, filter_res;
46
47 tcf_lastuse_update(&prog->tcf_tm);
48 bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
49
50 rcu_read_lock();
51 filter = rcu_dereference(prog->filter);
52 if (at_ingress) {
53 __skb_push(skb, skb->mac_len);
54 bpf_compute_data_end(skb);
55 filter_res = BPF_PROG_RUN(filter, skb);
56 __skb_pull(skb, skb->mac_len);
57 } else {
58 bpf_compute_data_end(skb);
59 filter_res = BPF_PROG_RUN(filter, skb);
60 }
61 rcu_read_unlock();
62
63
64
65
66
67
68
69
70
71
72
73 switch (filter_res) {
74 case TC_ACT_PIPE:
75 case TC_ACT_RECLASSIFY:
76 case TC_ACT_OK:
77 case TC_ACT_REDIRECT:
78 action = filter_res;
79 break;
80 case TC_ACT_SHOT:
81 action = filter_res;
82 qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
83 break;
84 case TC_ACT_UNSPEC:
85 action = prog->tcf_action;
86 break;
87 default:
88 action = TC_ACT_UNSPEC;
89 break;
90 }
91
92 return action;
93}
94
95static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
96{
97 return !prog->bpf_ops;
98}
99
100static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
101 struct sk_buff *skb)
102{
103 struct nlattr *nla;
104
105 if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
106 return -EMSGSIZE;
107
108 nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
109 sizeof(struct sock_filter));
110 if (nla == NULL)
111 return -EMSGSIZE;
112
113 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
114
115 return 0;
116}
117
118static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
119 struct sk_buff *skb)
120{
121 if (nla_put_u32(skb, TCA_ACT_BPF_FD, prog->bpf_fd))
122 return -EMSGSIZE;
123
124 if (prog->bpf_name &&
125 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
126 return -EMSGSIZE;
127
128 return 0;
129}
130
131static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
132 int bind, int ref)
133{
134 unsigned char *tp = skb_tail_pointer(skb);
135 struct tcf_bpf *prog = to_bpf(act);
136 struct tc_act_bpf opt = {
137 .index = prog->tcf_index,
138 .refcnt = prog->tcf_refcnt - ref,
139 .bindcnt = prog->tcf_bindcnt - bind,
140 .action = prog->tcf_action,
141 };
142 struct tcf_t tm;
143 int ret;
144
145 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
146 goto nla_put_failure;
147
148 if (tcf_bpf_is_ebpf(prog))
149 ret = tcf_bpf_dump_ebpf_info(prog, skb);
150 else
151 ret = tcf_bpf_dump_bpf_info(prog, skb);
152 if (ret)
153 goto nla_put_failure;
154
155 tcf_tm_dump(&tm, &prog->tcf_tm);
156 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
157 TCA_ACT_BPF_PAD))
158 goto nla_put_failure;
159
160 return skb->len;
161
162nla_put_failure:
163 nlmsg_trim(skb, tp);
164 return -1;
165}
166
167static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
168 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
169 [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
170 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
171 .len = ACT_BPF_NAME_LEN },
172 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
173 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
174 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
175};
176
177static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
178{
179 struct sock_filter *bpf_ops;
180 struct sock_fprog_kern fprog_tmp;
181 struct bpf_prog *fp;
182 u16 bpf_size, bpf_num_ops;
183 int ret;
184
185 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
186 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
187 return -EINVAL;
188
189 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
190 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
191 return -EINVAL;
192
193 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
194 if (bpf_ops == NULL)
195 return -ENOMEM;
196
197 memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size);
198
199 fprog_tmp.len = bpf_num_ops;
200 fprog_tmp.filter = bpf_ops;
201
202 ret = bpf_prog_create(&fp, &fprog_tmp);
203 if (ret < 0) {
204 kfree(bpf_ops);
205 return ret;
206 }
207
208 cfg->bpf_ops = bpf_ops;
209 cfg->bpf_num_ops = bpf_num_ops;
210 cfg->filter = fp;
211 cfg->is_ebpf = false;
212
213 return 0;
214}
215
216static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
217{
218 struct bpf_prog *fp;
219 char *name = NULL;
220 u32 bpf_fd;
221
222 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
223
224 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
225 if (IS_ERR(fp))
226 return PTR_ERR(fp);
227
228 if (tb[TCA_ACT_BPF_NAME]) {
229 name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]),
230 nla_len(tb[TCA_ACT_BPF_NAME]),
231 GFP_KERNEL);
232 if (!name) {
233 bpf_prog_put(fp);
234 return -ENOMEM;
235 }
236 }
237
238 cfg->bpf_fd = bpf_fd;
239 cfg->bpf_name = name;
240 cfg->filter = fp;
241 cfg->is_ebpf = true;
242
243 return 0;
244}
245
246static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
247{
248 if (cfg->is_ebpf)
249 bpf_prog_put(cfg->filter);
250 else
251 bpf_prog_destroy(cfg->filter);
252
253 kfree(cfg->bpf_ops);
254 kfree(cfg->bpf_name);
255}
256
257static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
258 struct tcf_bpf_cfg *cfg)
259{
260 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
261
262
263
264 cfg->filter = rcu_dereference_protected(prog->filter, 1);
265
266 cfg->bpf_ops = prog->bpf_ops;
267 cfg->bpf_name = prog->bpf_name;
268}
269
270static int tcf_bpf_init(struct net *net, struct nlattr *nla,
271 struct nlattr *est, struct tc_action **act,
272 int replace, int bind)
273{
274 struct tc_action_net *tn = net_generic(net, bpf_net_id);
275 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
276 struct tcf_bpf_cfg cfg, old;
277 struct tc_act_bpf *parm;
278 struct tcf_bpf *prog;
279 bool is_bpf, is_ebpf;
280 int ret, res = 0;
281
282 if (!nla)
283 return -EINVAL;
284
285 ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy);
286 if (ret < 0)
287 return ret;
288
289 if (!tb[TCA_ACT_BPF_PARMS])
290 return -EINVAL;
291
292 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
293
294 if (!tcf_hash_check(tn, parm->index, act, bind)) {
295 ret = tcf_hash_create(tn, parm->index, est, act,
296 &act_bpf_ops, bind, true);
297 if (ret < 0)
298 return ret;
299
300 res = ACT_P_CREATED;
301 } else {
302
303 if (bind)
304 return 0;
305
306 tcf_hash_release(*act, bind);
307 if (!replace)
308 return -EEXIST;
309 }
310
311 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
312 is_ebpf = tb[TCA_ACT_BPF_FD];
313
314 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
315 ret = -EINVAL;
316 goto out;
317 }
318
319 memset(&cfg, 0, sizeof(cfg));
320
321 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
322 tcf_bpf_init_from_efd(tb, &cfg);
323 if (ret < 0)
324 goto out;
325
326 prog = to_bpf(*act);
327 ASSERT_RTNL();
328
329 if (res != ACT_P_CREATED)
330 tcf_bpf_prog_fill_cfg(prog, &old);
331
332 prog->bpf_ops = cfg.bpf_ops;
333 prog->bpf_name = cfg.bpf_name;
334
335 if (cfg.bpf_num_ops)
336 prog->bpf_num_ops = cfg.bpf_num_ops;
337 if (cfg.bpf_fd)
338 prog->bpf_fd = cfg.bpf_fd;
339
340 prog->tcf_action = parm->action;
341 rcu_assign_pointer(prog->filter, cfg.filter);
342
343 if (res == ACT_P_CREATED) {
344 tcf_hash_insert(tn, *act);
345 } else {
346
347 synchronize_rcu();
348 tcf_bpf_cfg_cleanup(&old);
349 }
350
351 return res;
352out:
353 if (res == ACT_P_CREATED)
354 tcf_hash_cleanup(*act, est);
355
356 return ret;
357}
358
359static void tcf_bpf_cleanup(struct tc_action *act, int bind)
360{
361 struct tcf_bpf_cfg tmp;
362
363 tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
364 tcf_bpf_cfg_cleanup(&tmp);
365}
366
367static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
368 struct netlink_callback *cb, int type,
369 const struct tc_action_ops *ops)
370{
371 struct tc_action_net *tn = net_generic(net, bpf_net_id);
372
373 return tcf_generic_walker(tn, skb, cb, type, ops);
374}
375
376static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index)
377{
378 struct tc_action_net *tn = net_generic(net, bpf_net_id);
379
380 return tcf_hash_search(tn, a, index);
381}
382
383static struct tc_action_ops act_bpf_ops __read_mostly = {
384 .kind = "bpf",
385 .type = TCA_ACT_BPF,
386 .owner = THIS_MODULE,
387 .act = tcf_bpf,
388 .dump = tcf_bpf_dump,
389 .cleanup = tcf_bpf_cleanup,
390 .init = tcf_bpf_init,
391 .walk = tcf_bpf_walker,
392 .lookup = tcf_bpf_search,
393 .size = sizeof(struct tcf_bpf),
394};
395
396static __net_init int bpf_init_net(struct net *net)
397{
398 struct tc_action_net *tn = net_generic(net, bpf_net_id);
399
400 return tc_action_net_init(tn, &act_bpf_ops, BPF_TAB_MASK);
401}
402
403static void __net_exit bpf_exit_net(struct net *net)
404{
405 struct tc_action_net *tn = net_generic(net, bpf_net_id);
406
407 tc_action_net_exit(tn);
408}
409
410static struct pernet_operations bpf_net_ops = {
411 .init = bpf_init_net,
412 .exit = bpf_exit_net,
413 .id = &bpf_net_id,
414 .size = sizeof(struct tc_action_net),
415};
416
417static int __init bpf_init_module(void)
418{
419 return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
420}
421
422static void __exit bpf_cleanup_module(void)
423{
424 tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
425}
426
427module_init(bpf_init_module);
428module_exit(bpf_cleanup_module);
429
430MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
431MODULE_DESCRIPTION("TC BPF based action");
432MODULE_LICENSE("GPL v2");
433