1
2#include <linux/init.h>
3#include <linux/module.h>
4#include <linux/netfilter.h>
5#include <net/flow_offload.h>
6#include <net/netfilter/nf_tables.h>
7#include <net/netfilter/nf_tables_offload.h>
8#include <net/pkt_cls.h>
9
10static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
11{
12 struct nft_flow_rule *flow;
13
14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
15 if (!flow)
16 return NULL;
17
18 flow->rule = flow_rule_alloc(num_actions);
19 if (!flow->rule) {
20 kfree(flow);
21 return NULL;
22 }
23
24 flow->rule->match.dissector = &flow->match.dissector;
25 flow->rule->match.mask = &flow->match.mask;
26 flow->rule->match.key = &flow->match.key;
27
28 return flow;
29}
30
31void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
32 enum flow_dissector_key_id addr_type)
33{
34 struct nft_flow_match *match = &flow->match;
35 struct nft_flow_key *mask = &match->mask;
36 struct nft_flow_key *key = &match->key;
37
38 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
39 return;
40
41 key->control.addr_type = addr_type;
42 mask->control.addr_type = 0xffff;
43 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
44 match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
45 offsetof(struct nft_flow_key, control);
46}
47
48struct nft_flow_rule *nft_flow_rule_create(struct net *net,
49 const struct nft_rule *rule)
50{
51 struct nft_offload_ctx *ctx;
52 struct nft_flow_rule *flow;
53 int num_actions = 0, err;
54 struct nft_expr *expr;
55
56 expr = nft_expr_first(rule);
57 while (expr->ops && expr != nft_expr_last(rule)) {
58 if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
59 num_actions++;
60
61 expr = nft_expr_next(expr);
62 }
63
64 if (num_actions == 0)
65 return ERR_PTR(-EOPNOTSUPP);
66
67 flow = nft_flow_rule_alloc(num_actions);
68 if (!flow)
69 return ERR_PTR(-ENOMEM);
70
71 expr = nft_expr_first(rule);
72
73 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
74 if (!ctx) {
75 err = -ENOMEM;
76 goto err_out;
77 }
78 ctx->net = net;
79 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
80
81 while (expr->ops && expr != nft_expr_last(rule)) {
82 if (!expr->ops->offload) {
83 err = -EOPNOTSUPP;
84 goto err_out;
85 }
86 err = expr->ops->offload(ctx, flow, expr);
87 if (err < 0)
88 goto err_out;
89
90 expr = nft_expr_next(expr);
91 }
92 flow->proto = ctx->dep.l3num;
93 kfree(ctx);
94
95 return flow;
96err_out:
97 kfree(ctx);
98 nft_flow_rule_destroy(flow);
99
100 return ERR_PTR(err);
101}
102
103void nft_flow_rule_destroy(struct nft_flow_rule *flow)
104{
105 struct flow_action_entry *entry;
106 int i;
107
108 flow_action_for_each(i, entry, &flow->rule->action) {
109 switch (entry->id) {
110 case FLOW_ACTION_REDIRECT:
111 case FLOW_ACTION_MIRRED:
112 dev_put(entry->dev);
113 break;
114 default:
115 break;
116 }
117 }
118 kfree(flow->rule);
119 kfree(flow);
120}
121
122void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
123 enum nft_offload_dep_type type)
124{
125 ctx->dep.type = type;
126}
127
128void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
129 const void *data, u32 len)
130{
131 switch (ctx->dep.type) {
132 case NFT_OFFLOAD_DEP_NETWORK:
133 WARN_ON(len != sizeof(__u16));
134 memcpy(&ctx->dep.l3num, data, sizeof(__u16));
135 break;
136 case NFT_OFFLOAD_DEP_TRANSPORT:
137 WARN_ON(len != sizeof(__u8));
138 memcpy(&ctx->dep.protonum, data, sizeof(__u8));
139 break;
140 default:
141 break;
142 }
143 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
144}
145
146static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
147 __be16 proto, int priority,
148 struct netlink_ext_ack *extack)
149{
150 common->protocol = proto;
151 common->prio = priority;
152 common->extack = extack;
153}
154
155static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
156 struct list_head *cb_list)
157{
158 struct flow_block_cb *block_cb;
159 int err;
160
161 list_for_each_entry(block_cb, cb_list, list) {
162 err = block_cb->cb(type, type_data, block_cb->cb_priv);
163 if (err < 0)
164 return err;
165 }
166 return 0;
167}
168
169int nft_chain_offload_priority(struct nft_base_chain *basechain)
170{
171 if (basechain->ops.priority <= 0 ||
172 basechain->ops.priority > USHRT_MAX)
173 return -1;
174
175 return 0;
176}
177
178static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
179 const struct nft_base_chain *basechain,
180 const struct nft_rule *rule,
181 const struct nft_flow_rule *flow,
182 struct netlink_ext_ack *extack,
183 enum flow_cls_command command)
184{
185 __be16 proto = ETH_P_ALL;
186
187 memset(cls_flow, 0, sizeof(*cls_flow));
188
189 if (flow)
190 proto = flow->proto;
191
192 nft_flow_offload_common_init(&cls_flow->common, proto,
193 basechain->ops.priority, extack);
194 cls_flow->command = command;
195 cls_flow->cookie = (unsigned long) rule;
196 if (flow)
197 cls_flow->rule = flow->rule;
198}
199
200static int nft_flow_offload_rule(struct nft_chain *chain,
201 struct nft_rule *rule,
202 struct nft_flow_rule *flow,
203 enum flow_cls_command command)
204{
205 struct netlink_ext_ack extack = {};
206 struct flow_cls_offload cls_flow;
207 struct nft_base_chain *basechain;
208
209 if (!nft_is_base_chain(chain))
210 return -EOPNOTSUPP;
211
212 basechain = nft_base_chain(chain);
213 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, &extack,
214 command);
215
216 return nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow,
217 &basechain->flow_block.cb_list);
218}
219
220static int nft_flow_offload_bind(struct flow_block_offload *bo,
221 struct nft_base_chain *basechain)
222{
223 list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
224 return 0;
225}
226
227static int nft_flow_offload_unbind(struct flow_block_offload *bo,
228 struct nft_base_chain *basechain)
229{
230 struct flow_block_cb *block_cb, *next;
231 struct flow_cls_offload cls_flow;
232 struct netlink_ext_ack extack;
233 struct nft_chain *chain;
234 struct nft_rule *rule;
235
236 chain = &basechain->chain;
237 list_for_each_entry(rule, &chain->rules, list) {
238 memset(&extack, 0, sizeof(extack));
239 nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
240 &extack, FLOW_CLS_DESTROY);
241 nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
242 }
243
244 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
245 list_del(&block_cb->list);
246 flow_block_cb_free(block_cb);
247 }
248
249 return 0;
250}
251
252static int nft_block_setup(struct nft_base_chain *basechain,
253 struct flow_block_offload *bo,
254 enum flow_block_command cmd)
255{
256 int err;
257
258 switch (cmd) {
259 case FLOW_BLOCK_BIND:
260 err = nft_flow_offload_bind(bo, basechain);
261 break;
262 case FLOW_BLOCK_UNBIND:
263 err = nft_flow_offload_unbind(bo, basechain);
264 break;
265 default:
266 WARN_ON_ONCE(1);
267 err = -EOPNOTSUPP;
268 }
269
270 return err;
271}
272
273static void nft_flow_block_offload_init(struct flow_block_offload *bo,
274 struct net *net,
275 enum flow_block_command cmd,
276 struct nft_base_chain *basechain,
277 struct netlink_ext_ack *extack)
278{
279 memset(bo, 0, sizeof(*bo));
280 bo->net = net;
281 bo->block = &basechain->flow_block;
282 bo->command = cmd;
283 bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
284 bo->extack = extack;
285 INIT_LIST_HEAD(&bo->cb_list);
286}
287
288static int nft_block_offload_cmd(struct nft_base_chain *chain,
289 struct net_device *dev,
290 enum flow_block_command cmd)
291{
292 struct netlink_ext_ack extack = {};
293 struct flow_block_offload bo;
294 int err;
295
296 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
297
298 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
299 if (err < 0)
300 return err;
301
302 return nft_block_setup(chain, &bo, cmd);
303}
304
305static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
306{
307 struct nft_base_chain *basechain = block_cb->indr.data;
308 struct net_device *dev = block_cb->indr.dev;
309 struct netlink_ext_ack extack = {};
310 struct net *net = dev_net(dev);
311 struct flow_block_offload bo;
312
313 nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
314 basechain, &extack);
315 mutex_lock(&net->nft_commit_mutex);
316 list_del(&block_cb->driver_list);
317 list_move(&block_cb->list, &bo.cb_list);
318 nft_flow_offload_unbind(&bo, basechain);
319 mutex_unlock(&net->nft_commit_mutex);
320}
321
322static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
323 struct net_device *dev,
324 enum flow_block_command cmd)
325{
326 struct netlink_ext_ack extack = {};
327 struct flow_block_offload bo;
328 int err;
329
330 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
331
332 err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
333 nft_indr_block_cleanup);
334 if (err < 0)
335 return err;
336
337 if (list_empty(&bo.cb_list))
338 return -EOPNOTSUPP;
339
340 return nft_block_setup(basechain, &bo, cmd);
341}
342
343#define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
344
345static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
346 struct net_device *dev,
347 enum flow_block_command cmd)
348{
349 int err;
350
351 if (dev->netdev_ops->ndo_setup_tc)
352 err = nft_block_offload_cmd(basechain, dev, cmd);
353 else
354 err = nft_indr_block_offload_cmd(basechain, dev, cmd);
355
356 return err;
357}
358
359static int nft_flow_block_chain(struct nft_base_chain *basechain,
360 const struct net_device *this_dev,
361 enum flow_block_command cmd)
362{
363 struct net_device *dev;
364 struct nft_hook *hook;
365 int err, i = 0;
366
367 list_for_each_entry(hook, &basechain->hook_list, list) {
368 dev = hook->ops.dev;
369 if (this_dev && this_dev != dev)
370 continue;
371
372 err = nft_chain_offload_cmd(basechain, dev, cmd);
373 if (err < 0 && cmd == FLOW_BLOCK_BIND) {
374 if (!this_dev)
375 goto err_flow_block;
376
377 return err;
378 }
379 i++;
380 }
381
382 return 0;
383
384err_flow_block:
385 list_for_each_entry(hook, &basechain->hook_list, list) {
386 if (i-- <= 0)
387 break;
388
389 dev = hook->ops.dev;
390 nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
391 }
392 return err;
393}
394
395static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
396 enum flow_block_command cmd)
397{
398 struct nft_base_chain *basechain;
399 u8 policy;
400
401 if (!nft_is_base_chain(chain))
402 return -EOPNOTSUPP;
403
404 basechain = nft_base_chain(chain);
405 policy = ppolicy ? *ppolicy : basechain->policy;
406
407
408 if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
409 return -EOPNOTSUPP;
410
411 return nft_flow_block_chain(basechain, NULL, cmd);
412}
413
414static void nft_flow_rule_offload_abort(struct net *net,
415 struct nft_trans *trans)
416{
417 int err = 0;
418
419 list_for_each_entry_continue_reverse(trans, &net->nft.commit_list, list) {
420 if (trans->ctx.family != NFPROTO_NETDEV)
421 continue;
422
423 switch (trans->msg_type) {
424 case NFT_MSG_NEWCHAIN:
425 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
426 nft_trans_chain_update(trans))
427 continue;
428
429 err = nft_flow_offload_chain(trans->ctx.chain, NULL,
430 FLOW_BLOCK_UNBIND);
431 break;
432 case NFT_MSG_DELCHAIN:
433 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
434 continue;
435
436 err = nft_flow_offload_chain(trans->ctx.chain, NULL,
437 FLOW_BLOCK_BIND);
438 break;
439 case NFT_MSG_NEWRULE:
440 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
441 continue;
442
443 err = nft_flow_offload_rule(trans->ctx.chain,
444 nft_trans_rule(trans),
445 NULL, FLOW_CLS_DESTROY);
446 break;
447 case NFT_MSG_DELRULE:
448 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
449 continue;
450
451 err = nft_flow_offload_rule(trans->ctx.chain,
452 nft_trans_rule(trans),
453 nft_trans_flow_rule(trans),
454 FLOW_CLS_REPLACE);
455 break;
456 }
457
458 if (WARN_ON_ONCE(err))
459 break;
460 }
461}
462
463int nft_flow_rule_offload_commit(struct net *net)
464{
465 struct nft_trans *trans;
466 int err = 0;
467 u8 policy;
468
469 list_for_each_entry(trans, &net->nft.commit_list, list) {
470 if (trans->ctx.family != NFPROTO_NETDEV)
471 continue;
472
473 switch (trans->msg_type) {
474 case NFT_MSG_NEWCHAIN:
475 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
476 nft_trans_chain_update(trans))
477 continue;
478
479 policy = nft_trans_chain_policy(trans);
480 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
481 FLOW_BLOCK_BIND);
482 break;
483 case NFT_MSG_DELCHAIN:
484 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
485 continue;
486
487 policy = nft_trans_chain_policy(trans);
488 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
489 FLOW_BLOCK_UNBIND);
490 break;
491 case NFT_MSG_NEWRULE:
492 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
493 continue;
494
495 if (trans->ctx.flags & NLM_F_REPLACE ||
496 !(trans->ctx.flags & NLM_F_APPEND)) {
497 err = -EOPNOTSUPP;
498 break;
499 }
500 err = nft_flow_offload_rule(trans->ctx.chain,
501 nft_trans_rule(trans),
502 nft_trans_flow_rule(trans),
503 FLOW_CLS_REPLACE);
504 break;
505 case NFT_MSG_DELRULE:
506 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
507 continue;
508
509 err = nft_flow_offload_rule(trans->ctx.chain,
510 nft_trans_rule(trans),
511 NULL, FLOW_CLS_DESTROY);
512 break;
513 }
514
515 if (err) {
516 nft_flow_rule_offload_abort(net, trans);
517 break;
518 }
519 }
520
521 list_for_each_entry(trans, &net->nft.commit_list, list) {
522 if (trans->ctx.family != NFPROTO_NETDEV)
523 continue;
524
525 switch (trans->msg_type) {
526 case NFT_MSG_NEWRULE:
527 case NFT_MSG_DELRULE:
528 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
529 continue;
530
531 nft_flow_rule_destroy(nft_trans_flow_rule(trans));
532 break;
533 default:
534 break;
535 }
536 }
537
538 return err;
539}
540
541static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
542{
543 struct nft_base_chain *basechain;
544 struct net *net = dev_net(dev);
545 struct nft_hook *hook, *found;
546 const struct nft_table *table;
547 struct nft_chain *chain;
548
549 list_for_each_entry(table, &net->nft.tables, list) {
550 if (table->family != NFPROTO_NETDEV)
551 continue;
552
553 list_for_each_entry(chain, &table->chains, list) {
554 if (!nft_is_base_chain(chain) ||
555 !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
556 continue;
557
558 found = NULL;
559 basechain = nft_base_chain(chain);
560 list_for_each_entry(hook, &basechain->hook_list, list) {
561 if (hook->ops.dev != dev)
562 continue;
563
564 found = hook;
565 break;
566 }
567 if (!found)
568 continue;
569
570 return chain;
571 }
572 }
573
574 return NULL;
575}
576
577static int nft_offload_netdev_event(struct notifier_block *this,
578 unsigned long event, void *ptr)
579{
580 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
581 struct net *net = dev_net(dev);
582 struct nft_chain *chain;
583
584 if (event != NETDEV_UNREGISTER)
585 return NOTIFY_DONE;
586
587 mutex_lock(&net->nft_commit_mutex);
588 chain = __nft_offload_get_chain(dev);
589 if (chain)
590 nft_flow_block_chain(nft_base_chain(chain), dev,
591 FLOW_BLOCK_UNBIND);
592
593 mutex_unlock(&net->nft_commit_mutex);
594
595 return NOTIFY_DONE;
596}
597
598static struct notifier_block nft_offload_netdev_notifier = {
599 .notifier_call = nft_offload_netdev_event,
600};
601
602int nft_offload_init(void)
603{
604 return register_netdevice_notifier(&nft_offload_netdev_notifier);
605}
606
607void nft_offload_exit(void)
608{
609 unregister_netdevice_notifier(&nft_offload_netdev_notifier);
610}
611