1
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/netfilter.h>
6#include <linux/rhashtable.h>
7#include <linux/netdevice.h>
8#include <net/ip.h>
9#include <net/ip6_route.h>
10#include <net/netfilter/nf_tables.h>
11#include <net/netfilter/nf_flow_table.h>
12#include <net/netfilter/nf_conntrack.h>
13#include <net/netfilter/nf_conntrack_core.h>
14#include <net/netfilter/nf_conntrack_l4proto.h>
15#include <net/netfilter/nf_conntrack_tuple.h>
16
17static DEFINE_MUTEX(flowtable_lock);
18static LIST_HEAD(flowtables);
19
20static void
21flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
23{
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
26
27 ft->dir = dir;
28
29 switch (ctt->src.l3num) {
30 case NFPROTO_IPV4:
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
33 break;
34 case NFPROTO_IPV6:
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
37 break;
38 }
39
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42 ft->src_port = ctt->src.u.tcp.port;
43 ft->dst_port = ctt->dst.u.tcp.port;
44}
45
46struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
47{
48 struct flow_offload *flow;
49
50 if (unlikely(nf_ct_is_dying(ct) ||
51 !atomic_inc_not_zero(&ct->ct_general.use)))
52 return NULL;
53
54 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
55 if (!flow)
56 goto err_ct_refcnt;
57
58 flow->ct = ct;
59
60 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
61 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
62
63 if (ct->status & IPS_SRC_NAT)
64 __set_bit(NF_FLOW_SNAT, &flow->flags);
65 if (ct->status & IPS_DST_NAT)
66 __set_bit(NF_FLOW_DNAT, &flow->flags);
67
68 return flow;
69
70err_ct_refcnt:
71 nf_ct_put(ct);
72
73 return NULL;
74}
75EXPORT_SYMBOL_GPL(flow_offload_alloc);
76
77static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
78{
79 const struct rt6_info *rt;
80
81 if (flow_tuple->l3proto == NFPROTO_IPV6) {
82 rt = (const struct rt6_info *)flow_tuple->dst_cache;
83 return rt6_get_cookie(rt);
84 }
85
86 return 0;
87}
88
89static int flow_offload_fill_route(struct flow_offload *flow,
90 const struct nf_flow_route *route,
91 enum flow_offload_tuple_dir dir)
92{
93 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
94 struct dst_entry *dst = route->tuple[dir].dst;
95 int i, j = 0;
96
97 switch (flow_tuple->l3proto) {
98 case NFPROTO_IPV4:
99 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
100 break;
101 case NFPROTO_IPV6:
102 flow_tuple->mtu = ip6_dst_mtu_forward(dst);
103 break;
104 }
105
106 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
107 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
108 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
109 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
110 if (route->tuple[dir].in.ingress_vlans & BIT(i))
111 flow_tuple->in_vlan_ingress |= BIT(j);
112 j++;
113 }
114 flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
115
116 switch (route->tuple[dir].xmit_type) {
117 case FLOW_OFFLOAD_XMIT_DIRECT:
118 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
119 ETH_ALEN);
120 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
121 ETH_ALEN);
122 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
123 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
124 break;
125 case FLOW_OFFLOAD_XMIT_XFRM:
126 case FLOW_OFFLOAD_XMIT_NEIGH:
127 if (!dst_hold_safe(route->tuple[dir].dst))
128 return -1;
129
130 flow_tuple->dst_cache = dst;
131 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
132 break;
133 default:
134 WARN_ON_ONCE(1);
135 break;
136 }
137 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
138
139 return 0;
140}
141
142static void nft_flow_dst_release(struct flow_offload *flow,
143 enum flow_offload_tuple_dir dir)
144{
145 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
146 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
147 dst_release(flow->tuplehash[dir].tuple.dst_cache);
148}
149
150int flow_offload_route_init(struct flow_offload *flow,
151 const struct nf_flow_route *route)
152{
153 int err;
154
155 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
156 if (err < 0)
157 return err;
158
159 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
160 if (err < 0)
161 goto err_route_reply;
162
163 flow->type = NF_FLOW_OFFLOAD_ROUTE;
164
165 return 0;
166
167err_route_reply:
168 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
169
170 return err;
171}
172EXPORT_SYMBOL_GPL(flow_offload_route_init);
173
174static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
175{
176 tcp->state = TCP_CONNTRACK_ESTABLISHED;
177 tcp->seen[0].td_maxwin = 0;
178 tcp->seen[1].td_maxwin = 0;
179}
180
181static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
182{
183 const struct nf_conntrack_l4proto *l4proto;
184 struct net *net = nf_ct_net(ct);
185 int l4num = nf_ct_protonum(ct);
186 s32 timeout;
187
188 l4proto = nf_ct_l4proto_find(l4num);
189 if (!l4proto)
190 return;
191
192 if (l4num == IPPROTO_TCP) {
193 struct nf_tcp_net *tn = nf_tcp_pernet(net);
194
195 timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
196 timeout -= tn->offload_timeout;
197 } else if (l4num == IPPROTO_UDP) {
198 struct nf_udp_net *tn = nf_udp_pernet(net);
199
200 timeout = tn->timeouts[UDP_CT_REPLIED];
201 timeout -= tn->offload_timeout;
202 } else {
203 return;
204 }
205
206 if (timeout < 0)
207 timeout = 0;
208
209 if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
210 ct->timeout = nfct_time_stamp + timeout;
211}
212
213static void flow_offload_fixup_ct_state(struct nf_conn *ct)
214{
215 if (nf_ct_protonum(ct) == IPPROTO_TCP)
216 flow_offload_fixup_tcp(&ct->proto.tcp);
217}
218
219static void flow_offload_fixup_ct(struct nf_conn *ct)
220{
221 flow_offload_fixup_ct_state(ct);
222 flow_offload_fixup_ct_timeout(ct);
223}
224
225static void flow_offload_route_release(struct flow_offload *flow)
226{
227 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
228 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
229}
230
231void flow_offload_free(struct flow_offload *flow)
232{
233 switch (flow->type) {
234 case NF_FLOW_OFFLOAD_ROUTE:
235 flow_offload_route_release(flow);
236 break;
237 default:
238 break;
239 }
240 nf_ct_put(flow->ct);
241 kfree_rcu(flow, rcu_head);
242}
243EXPORT_SYMBOL_GPL(flow_offload_free);
244
245static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
246{
247 const struct flow_offload_tuple *tuple = data;
248
249 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
250}
251
252static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
253{
254 const struct flow_offload_tuple_rhash *tuplehash = data;
255
256 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
257}
258
259static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
260 const void *ptr)
261{
262 const struct flow_offload_tuple *tuple = arg->key;
263 const struct flow_offload_tuple_rhash *x = ptr;
264
265 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
266 return 1;
267
268 return 0;
269}
270
271static const struct rhashtable_params nf_flow_offload_rhash_params = {
272 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
273 .hashfn = flow_offload_hash,
274 .obj_hashfn = flow_offload_hash_obj,
275 .obj_cmpfn = flow_offload_hash_cmp,
276 .automatic_shrinking = true,
277};
278
279unsigned long flow_offload_get_timeout(struct flow_offload *flow)
280{
281 const struct nf_conntrack_l4proto *l4proto;
282 unsigned long timeout = NF_FLOW_TIMEOUT;
283 struct net *net = nf_ct_net(flow->ct);
284 int l4num = nf_ct_protonum(flow->ct);
285
286 l4proto = nf_ct_l4proto_find(l4num);
287 if (!l4proto)
288 return timeout;
289
290 if (l4num == IPPROTO_TCP) {
291 struct nf_tcp_net *tn = nf_tcp_pernet(net);
292
293 timeout = tn->offload_timeout;
294 } else if (l4num == IPPROTO_UDP) {
295 struct nf_udp_net *tn = nf_udp_pernet(net);
296
297 timeout = tn->offload_timeout;
298 }
299
300 return timeout;
301}
302
303int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
304{
305 int err;
306
307 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
308
309 err = rhashtable_insert_fast(&flow_table->rhashtable,
310 &flow->tuplehash[0].node,
311 nf_flow_offload_rhash_params);
312 if (err < 0)
313 return err;
314
315 err = rhashtable_insert_fast(&flow_table->rhashtable,
316 &flow->tuplehash[1].node,
317 nf_flow_offload_rhash_params);
318 if (err < 0) {
319 rhashtable_remove_fast(&flow_table->rhashtable,
320 &flow->tuplehash[0].node,
321 nf_flow_offload_rhash_params);
322 return err;
323 }
324
325 nf_ct_offload_timeout(flow->ct);
326
327 if (nf_flowtable_hw_offload(flow_table)) {
328 __set_bit(NF_FLOW_HW, &flow->flags);
329 nf_flow_offload_add(flow_table, flow);
330 }
331
332 return 0;
333}
334EXPORT_SYMBOL_GPL(flow_offload_add);
335
336void flow_offload_refresh(struct nf_flowtable *flow_table,
337 struct flow_offload *flow)
338{
339 u32 timeout;
340
341 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
342 if (READ_ONCE(flow->timeout) != timeout)
343 WRITE_ONCE(flow->timeout, timeout);
344
345 if (likely(!nf_flowtable_hw_offload(flow_table)))
346 return;
347
348 nf_flow_offload_add(flow_table, flow);
349}
350EXPORT_SYMBOL_GPL(flow_offload_refresh);
351
352static inline bool nf_flow_has_expired(const struct flow_offload *flow)
353{
354 return nf_flow_timeout_delta(flow->timeout) <= 0;
355}
356
357static void flow_offload_del(struct nf_flowtable *flow_table,
358 struct flow_offload *flow)
359{
360 rhashtable_remove_fast(&flow_table->rhashtable,
361 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
362 nf_flow_offload_rhash_params);
363 rhashtable_remove_fast(&flow_table->rhashtable,
364 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
365 nf_flow_offload_rhash_params);
366
367 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
368
369 if (nf_flow_has_expired(flow))
370 flow_offload_fixup_ct(flow->ct);
371 else
372 flow_offload_fixup_ct_timeout(flow->ct);
373
374 flow_offload_free(flow);
375}
376
377void flow_offload_teardown(struct flow_offload *flow)
378{
379 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
380
381 flow_offload_fixup_ct_state(flow->ct);
382}
383EXPORT_SYMBOL_GPL(flow_offload_teardown);
384
385struct flow_offload_tuple_rhash *
386flow_offload_lookup(struct nf_flowtable *flow_table,
387 struct flow_offload_tuple *tuple)
388{
389 struct flow_offload_tuple_rhash *tuplehash;
390 struct flow_offload *flow;
391 int dir;
392
393 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
394 nf_flow_offload_rhash_params);
395 if (!tuplehash)
396 return NULL;
397
398 dir = tuplehash->tuple.dir;
399 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
400 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
401 return NULL;
402
403 if (unlikely(nf_ct_is_dying(flow->ct)))
404 return NULL;
405
406 return tuplehash;
407}
408EXPORT_SYMBOL_GPL(flow_offload_lookup);
409
410static int
411nf_flow_table_iterate(struct nf_flowtable *flow_table,
412 void (*iter)(struct flow_offload *flow, void *data),
413 void *data)
414{
415 struct flow_offload_tuple_rhash *tuplehash;
416 struct rhashtable_iter hti;
417 struct flow_offload *flow;
418 int err = 0;
419
420 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
421 rhashtable_walk_start(&hti);
422
423 while ((tuplehash = rhashtable_walk_next(&hti))) {
424 if (IS_ERR(tuplehash)) {
425 if (PTR_ERR(tuplehash) != -EAGAIN) {
426 err = PTR_ERR(tuplehash);
427 break;
428 }
429 continue;
430 }
431 if (tuplehash->tuple.dir)
432 continue;
433
434 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
435
436 iter(flow, data);
437 }
438 rhashtable_walk_stop(&hti);
439 rhashtable_walk_exit(&hti);
440
441 return err;
442}
443
444static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
445{
446 struct dst_entry *dst;
447
448 if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
449 tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
450 dst = tuple->dst_cache;
451 if (!dst_check(dst, tuple->dst_cookie))
452 return true;
453 }
454
455 return false;
456}
457
458static bool nf_flow_has_stale_dst(struct flow_offload *flow)
459{
460 return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
461 flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
462}
463
464static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
465{
466 struct nf_flowtable *flow_table = data;
467
468 if (nf_flow_has_expired(flow) ||
469 nf_ct_is_dying(flow->ct) ||
470 nf_flow_has_stale_dst(flow))
471 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
472
473 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
474 if (test_bit(NF_FLOW_HW, &flow->flags)) {
475 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
476 nf_flow_offload_del(flow_table, flow);
477 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
478 flow_offload_del(flow_table, flow);
479 } else {
480 flow_offload_del(flow_table, flow);
481 }
482 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
483 nf_flow_offload_stats(flow_table, flow);
484 }
485}
486
487static void nf_flow_offload_work_gc(struct work_struct *work)
488{
489 struct nf_flowtable *flow_table;
490
491 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
492 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
493 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
494}
495
496static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
497 __be16 port, __be16 new_port)
498{
499 struct tcphdr *tcph;
500
501 tcph = (void *)(skb_network_header(skb) + thoff);
502 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
503}
504
505static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
506 __be16 port, __be16 new_port)
507{
508 struct udphdr *udph;
509
510 udph = (void *)(skb_network_header(skb) + thoff);
511 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
512 inet_proto_csum_replace2(&udph->check, skb, port,
513 new_port, false);
514 if (!udph->check)
515 udph->check = CSUM_MANGLED_0;
516 }
517}
518
519static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
520 u8 protocol, __be16 port, __be16 new_port)
521{
522 switch (protocol) {
523 case IPPROTO_TCP:
524 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
525 break;
526 case IPPROTO_UDP:
527 nf_flow_nat_port_udp(skb, thoff, port, new_port);
528 break;
529 }
530}
531
532void nf_flow_snat_port(const struct flow_offload *flow,
533 struct sk_buff *skb, unsigned int thoff,
534 u8 protocol, enum flow_offload_tuple_dir dir)
535{
536 struct flow_ports *hdr;
537 __be16 port, new_port;
538
539 hdr = (void *)(skb_network_header(skb) + thoff);
540
541 switch (dir) {
542 case FLOW_OFFLOAD_DIR_ORIGINAL:
543 port = hdr->source;
544 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
545 hdr->source = new_port;
546 break;
547 case FLOW_OFFLOAD_DIR_REPLY:
548 port = hdr->dest;
549 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
550 hdr->dest = new_port;
551 break;
552 }
553
554 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
555}
556EXPORT_SYMBOL_GPL(nf_flow_snat_port);
557
558void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
559 unsigned int thoff, u8 protocol,
560 enum flow_offload_tuple_dir dir)
561{
562 struct flow_ports *hdr;
563 __be16 port, new_port;
564
565 hdr = (void *)(skb_network_header(skb) + thoff);
566
567 switch (dir) {
568 case FLOW_OFFLOAD_DIR_ORIGINAL:
569 port = hdr->dest;
570 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
571 hdr->dest = new_port;
572 break;
573 case FLOW_OFFLOAD_DIR_REPLY:
574 port = hdr->source;
575 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
576 hdr->source = new_port;
577 break;
578 }
579
580 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
581}
582EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
583
584int nf_flow_table_init(struct nf_flowtable *flowtable)
585{
586 int err;
587
588 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
589 flow_block_init(&flowtable->flow_block);
590 init_rwsem(&flowtable->flow_block_lock);
591
592 err = rhashtable_init(&flowtable->rhashtable,
593 &nf_flow_offload_rhash_params);
594 if (err < 0)
595 return err;
596
597 queue_delayed_work(system_power_efficient_wq,
598 &flowtable->gc_work, HZ);
599
600 mutex_lock(&flowtable_lock);
601 list_add(&flowtable->list, &flowtables);
602 mutex_unlock(&flowtable_lock);
603
604 return 0;
605}
606EXPORT_SYMBOL_GPL(nf_flow_table_init);
607
608static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
609{
610 struct net_device *dev = data;
611
612 if (!dev) {
613 flow_offload_teardown(flow);
614 return;
615 }
616
617 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
618 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
619 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
620 flow_offload_teardown(flow);
621}
622
623void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
624 struct net_device *dev)
625{
626 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
627 flush_delayed_work(&flowtable->gc_work);
628 nf_flow_table_offload_flush(flowtable);
629}
630
631void nf_flow_table_cleanup(struct net_device *dev)
632{
633 struct nf_flowtable *flowtable;
634
635 mutex_lock(&flowtable_lock);
636 list_for_each_entry(flowtable, &flowtables, list)
637 nf_flow_table_gc_cleanup(flowtable, dev);
638 mutex_unlock(&flowtable_lock);
639}
640EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
641
642void nf_flow_table_free(struct nf_flowtable *flow_table)
643{
644 mutex_lock(&flowtable_lock);
645 list_del(&flow_table->list);
646 mutex_unlock(&flowtable_lock);
647
648 cancel_delayed_work_sync(&flow_table->gc_work);
649 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
650 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
651 nf_flow_table_offload_flush(flow_table);
652 if (nf_flowtable_hw_offload(flow_table))
653 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
654 flow_table);
655 rhashtable_destroy(&flow_table->rhashtable);
656}
657EXPORT_SYMBOL_GPL(nf_flow_table_free);
658
659static int __init nf_flow_table_module_init(void)
660{
661 return nf_flow_table_offload_init();
662}
663
664static void __exit nf_flow_table_module_exit(void)
665{
666 nf_flow_table_offload_exit();
667}
668
669module_init(nf_flow_table_module_init);
670module_exit(nf_flow_table_module_exit);
671
672MODULE_LICENSE("GPL");
673MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
674MODULE_DESCRIPTION("Netfilter flow table module");
675