1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/filter.h>
16#include <linux/bpf.h>
17
18#include <net/netlink.h>
19#include <net/sock.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22
23#include <linux/tc_act/tc_bpf.h>
24#include <net/tc_act/tc_bpf.h>
25
26#include <linux/rh_features.h>
27
28#define ACT_BPF_NAME_LEN 256
29
30struct tcf_bpf_cfg {
31 struct bpf_prog *filter;
32 struct sock_filter *bpf_ops;
33 const char *bpf_name;
34 u16 bpf_num_ops;
35 bool is_ebpf;
36};
37
38static unsigned int bpf_net_id;
39static struct tc_action_ops act_bpf_ops;
40
41static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
42 struct tcf_result *res)
43{
44 bool at_ingress = skb_at_tc_ingress(skb);
45 struct tcf_bpf *prog = to_bpf(act);
46 struct bpf_prog *filter;
47 int action, filter_res;
48
49 tcf_lastuse_update(&prog->tcf_tm);
50 bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
51
52 rcu_read_lock();
53 filter = rcu_dereference(prog->filter);
54 if (at_ingress) {
55 __skb_push(skb, skb->mac_len);
56 bpf_compute_data_pointers(skb);
57 filter_res = BPF_PROG_RUN(filter, skb);
58 __skb_pull(skb, skb->mac_len);
59 } else {
60 bpf_compute_data_pointers(skb);
61 filter_res = BPF_PROG_RUN(filter, skb);
62 }
63 if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
64 skb_orphan(skb);
65 rcu_read_unlock();
66
67
68
69
70
71
72
73
74
75
76
77 switch (filter_res) {
78 case TC_ACT_PIPE:
79 case TC_ACT_RECLASSIFY:
80 case TC_ACT_OK:
81 case TC_ACT_REDIRECT:
82 action = filter_res;
83 break;
84 case TC_ACT_SHOT:
85 action = filter_res;
86 qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
87 break;
88 case TC_ACT_UNSPEC:
89 action = prog->tcf_action;
90 break;
91 default:
92 action = TC_ACT_UNSPEC;
93 break;
94 }
95
96 return action;
97}
98
99static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
100{
101 return !prog->bpf_ops;
102}
103
104static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
105 struct sk_buff *skb)
106{
107 struct nlattr *nla;
108
109 if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
110 return -EMSGSIZE;
111
112 nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
113 sizeof(struct sock_filter));
114 if (nla == NULL)
115 return -EMSGSIZE;
116
117 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
118
119 return 0;
120}
121
122static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
123 struct sk_buff *skb)
124{
125 struct nlattr *nla;
126
127 if (prog->bpf_name &&
128 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
129 return -EMSGSIZE;
130
131 if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
132 return -EMSGSIZE;
133
134 nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
135 if (nla == NULL)
136 return -EMSGSIZE;
137
138 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
139
140 return 0;
141}
142
143static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
144 int bind, int ref)
145{
146 unsigned char *tp = skb_tail_pointer(skb);
147 struct tcf_bpf *prog = to_bpf(act);
148 struct tc_act_bpf opt = {
149 .index = prog->tcf_index,
150 .refcnt = refcount_read(&prog->tcf_refcnt) - ref,
151 .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
152 };
153 struct tcf_t tm;
154 int ret;
155
156 spin_lock_bh(&prog->tcf_lock);
157 opt.action = prog->tcf_action;
158 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
159 goto nla_put_failure;
160
161 if (tcf_bpf_is_ebpf(prog))
162 ret = tcf_bpf_dump_ebpf_info(prog, skb);
163 else
164 ret = tcf_bpf_dump_bpf_info(prog, skb);
165 if (ret)
166 goto nla_put_failure;
167
168 tcf_tm_dump(&tm, &prog->tcf_tm);
169 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
170 TCA_ACT_BPF_PAD))
171 goto nla_put_failure;
172
173 spin_unlock_bh(&prog->tcf_lock);
174 return skb->len;
175
176nla_put_failure:
177 spin_unlock_bh(&prog->tcf_lock);
178 nlmsg_trim(skb, tp);
179 return -1;
180}
181
182static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
183 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
184 [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
185 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
186 .len = ACT_BPF_NAME_LEN },
187 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
188 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
189 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
190};
191
192static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
193{
194 struct sock_filter *bpf_ops;
195 struct sock_fprog_kern fprog_tmp;
196 struct bpf_prog *fp;
197 u16 bpf_size, bpf_num_ops;
198 int ret;
199
200 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
201 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
202 return -EINVAL;
203
204 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
205 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
206 return -EINVAL;
207
208 bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
209 if (bpf_ops == NULL)
210 return -ENOMEM;
211
212 fprog_tmp.len = bpf_num_ops;
213 fprog_tmp.filter = bpf_ops;
214
215 ret = bpf_prog_create(&fp, &fprog_tmp);
216 if (ret < 0) {
217 kfree(bpf_ops);
218 return ret;
219 }
220
221 cfg->bpf_ops = bpf_ops;
222 cfg->bpf_num_ops = bpf_num_ops;
223 cfg->filter = fp;
224 cfg->is_ebpf = false;
225
226 return 0;
227}
228
229static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
230{
231 struct bpf_prog *fp;
232 char *name = NULL;
233 u32 bpf_fd;
234
235 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
236
237 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
238 if (IS_ERR(fp))
239 return PTR_ERR(fp);
240
241 if (tb[TCA_ACT_BPF_NAME]) {
242 name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
243 if (!name) {
244 bpf_prog_put(fp);
245 return -ENOMEM;
246 }
247 }
248
249 rh_mark_used_feature("eBPF/act");
250
251 cfg->bpf_name = name;
252 cfg->filter = fp;
253 cfg->is_ebpf = true;
254
255 return 0;
256}
257
258static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
259{
260 struct bpf_prog *filter = cfg->filter;
261
262 if (filter) {
263 if (cfg->is_ebpf)
264 bpf_prog_put(filter);
265 else
266 bpf_prog_destroy(filter);
267 }
268
269 kfree(cfg->bpf_ops);
270 kfree(cfg->bpf_name);
271}
272
273static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
274 struct tcf_bpf_cfg *cfg)
275{
276 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
277
278
279
280 cfg->filter = rcu_dereference_protected(prog->filter, 1);
281
282 cfg->bpf_ops = prog->bpf_ops;
283 cfg->bpf_name = prog->bpf_name;
284}
285
286static int tcf_bpf_init(struct net *net, struct nlattr *nla,
287 struct nlattr *est, struct tc_action **act,
288 int replace, int bind, bool rtnl_held,
289 struct tcf_proto *tp, u32 flags,
290 struct netlink_ext_ack *extack)
291{
292 struct tc_action_net *tn = net_generic(net, bpf_net_id);
293 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
294 struct tcf_chain *goto_ch = NULL;
295 struct tcf_bpf_cfg cfg, old;
296 struct tc_act_bpf *parm;
297 struct tcf_bpf *prog;
298 bool is_bpf, is_ebpf;
299 int ret, res = 0;
300 u32 index;
301
302 if (!nla)
303 return -EINVAL;
304
305 ret = nla_parse_nested_deprecated(tb, TCA_ACT_BPF_MAX, nla,
306 act_bpf_policy, NULL);
307 if (ret < 0)
308 return ret;
309
310 if (!tb[TCA_ACT_BPF_PARMS])
311 return -EINVAL;
312
313 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
314 index = parm->index;
315 ret = tcf_idr_check_alloc(tn, &index, act, bind);
316 if (!ret) {
317 ret = tcf_idr_create(tn, index, est, act,
318 &act_bpf_ops, bind, true, 0);
319 if (ret < 0) {
320 tcf_idr_cleanup(tn, index);
321 return ret;
322 }
323
324 res = ACT_P_CREATED;
325 } else if (ret > 0) {
326
327 if (bind)
328 return 0;
329
330 if (!replace) {
331 tcf_idr_release(*act, bind);
332 return -EEXIST;
333 }
334 } else {
335 return ret;
336 }
337
338 ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
339 if (ret < 0)
340 goto release_idr;
341
342 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
343 is_ebpf = tb[TCA_ACT_BPF_FD];
344
345 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
346 ret = -EINVAL;
347 goto put_chain;
348 }
349
350 memset(&cfg, 0, sizeof(cfg));
351
352 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
353 tcf_bpf_init_from_efd(tb, &cfg);
354 if (ret < 0)
355 goto put_chain;
356
357 prog = to_bpf(*act);
358
359 spin_lock_bh(&prog->tcf_lock);
360 if (res != ACT_P_CREATED)
361 tcf_bpf_prog_fill_cfg(prog, &old);
362
363 prog->bpf_ops = cfg.bpf_ops;
364 prog->bpf_name = cfg.bpf_name;
365
366 if (cfg.bpf_num_ops)
367 prog->bpf_num_ops = cfg.bpf_num_ops;
368
369 goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
370 rcu_assign_pointer(prog->filter, cfg.filter);
371 spin_unlock_bh(&prog->tcf_lock);
372
373 if (goto_ch)
374 tcf_chain_put_by_act(goto_ch);
375
376 if (res != ACT_P_CREATED) {
377
378 synchronize_rcu();
379 tcf_bpf_cfg_cleanup(&old);
380 }
381
382 return res;
383
384put_chain:
385 if (goto_ch)
386 tcf_chain_put_by_act(goto_ch);
387
388release_idr:
389 tcf_idr_release(*act, bind);
390 return ret;
391}
392
393static void tcf_bpf_cleanup(struct tc_action *act)
394{
395 struct tcf_bpf_cfg tmp;
396
397 tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
398 tcf_bpf_cfg_cleanup(&tmp);
399}
400
401static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
402 struct netlink_callback *cb, int type,
403 const struct tc_action_ops *ops,
404 struct netlink_ext_ack *extack)
405{
406 struct tc_action_net *tn = net_generic(net, bpf_net_id);
407
408 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
409}
410
411static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index)
412{
413 struct tc_action_net *tn = net_generic(net, bpf_net_id);
414
415 return tcf_idr_search(tn, a, index);
416}
417
418static struct tc_action_ops act_bpf_ops __read_mostly = {
419 .kind = "bpf",
420 .id = TCA_ID_BPF,
421 .owner = THIS_MODULE,
422 .act = tcf_bpf_act,
423 .dump = tcf_bpf_dump,
424 .cleanup = tcf_bpf_cleanup,
425 .init = tcf_bpf_init,
426 .walk = tcf_bpf_walker,
427 .lookup = tcf_bpf_search,
428 .size = sizeof(struct tcf_bpf),
429};
430
431static __net_init int bpf_init_net(struct net *net)
432{
433 struct tc_action_net *tn = net_generic(net, bpf_net_id);
434
435 return tc_action_net_init(net, tn, &act_bpf_ops);
436}
437
438static void __net_exit bpf_exit_net(struct list_head *net_list)
439{
440 tc_action_net_exit(net_list, bpf_net_id);
441}
442
443static struct pernet_operations bpf_net_ops = {
444 .init = bpf_init_net,
445 .exit_batch = bpf_exit_net,
446 .id = &bpf_net_id,
447 .size = sizeof(struct tc_action_net),
448};
449
450static int __init bpf_init_module(void)
451{
452 return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
453}
454
455static void __exit bpf_cleanup_module(void)
456{
457 tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
458}
459
460module_init(bpf_init_module);
461module_exit(bpf_cleanup_module);
462
463MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
464MODULE_DESCRIPTION("TC BPF based action");
465MODULE_LICENSE("GPL v2");
466