1
2
3
4#include <linux/etherdevice.h>
5#include <linux/inetdevice.h>
6#include <net/netevent.h>
7#include <linux/idr.h>
8#include <net/dst_metadata.h>
9#include <net/arp.h>
10
11#include "cmsg.h"
12#include "main.h"
13#include "../nfp_net_repr.h"
14#include "../nfp_net.h"
15
16#define NFP_FL_MAX_ROUTES 32
17
18#define NFP_TUN_PRE_TUN_RULE_LIMIT 32
19#define NFP_TUN_PRE_TUN_RULE_DEL BIT(0)
20#define NFP_TUN_PRE_TUN_IDX_BIT BIT(3)
21#define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7)
22
23
24
25
26
27
28
29
30struct nfp_tun_pre_tun_rule {
31 __be32 flags;
32 __be16 port_idx;
33 __be16 vlan_tci;
34 __be32 host_ctx_id;
35};
36
37
38
39
40
41
42
43
44
45
46
47struct nfp_tun_active_tuns {
48 __be32 seq;
49 __be32 count;
50 __be32 flags;
51 struct route_ip_info {
52 __be32 ipv4;
53 __be32 egress_port;
54 __be32 extra[2];
55 } tun_info[];
56};
57
58
59
60
61
62
63
64
65
66
67
68struct nfp_tun_active_tuns_v6 {
69 __be32 seq;
70 __be32 count;
71 __be32 flags;
72 struct route_ip_info_v6 {
73 struct in6_addr ipv6;
74 __be32 egress_port;
75 __be32 extra[2];
76 } tun_info[];
77};
78
79
80
81
82
83
84
85
86
87struct nfp_tun_neigh {
88 __be32 dst_ipv4;
89 __be32 src_ipv4;
90 u8 dst_addr[ETH_ALEN];
91 u8 src_addr[ETH_ALEN];
92 __be32 port_id;
93};
94
95
96
97
98
99
100
101
102
103struct nfp_tun_neigh_v6 {
104 struct in6_addr dst_ipv6;
105 struct in6_addr src_ipv6;
106 u8 dst_addr[ETH_ALEN];
107 u8 src_addr[ETH_ALEN];
108 __be32 port_id;
109};
110
111
112
113
114
115
116
117struct nfp_tun_req_route_ipv4 {
118 __be32 ingress_port;
119 __be32 ipv4_addr;
120 __be32 reserved[2];
121};
122
123
124
125
126
127
128struct nfp_tun_req_route_ipv6 {
129 __be32 ingress_port;
130 struct in6_addr ipv6_addr;
131};
132
133
134
135
136
137
138struct nfp_offloaded_route {
139 struct list_head list;
140 u8 ip_add[];
141};
142
143#define NFP_FL_IPV4_ADDRS_MAX 32
144
145
146
147
148
149
150struct nfp_tun_ipv4_addr {
151 __be32 count;
152 __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
153};
154
155
156
157
158
159
160
161struct nfp_ipv4_addr_entry {
162 __be32 ipv4_addr;
163 int ref_count;
164 struct list_head list;
165};
166
167#define NFP_FL_IPV6_ADDRS_MAX 4
168
169
170
171
172
173
174struct nfp_tun_ipv6_addr {
175 __be32 count;
176 struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
177};
178
179#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
180
181
182
183
184
185
186
187
188struct nfp_tun_mac_addr_offload {
189 __be16 flags;
190 __be16 count;
191 __be16 index;
192 u8 addr[ETH_ALEN];
193};
194
195enum nfp_flower_mac_offload_cmd {
196 NFP_TUNNEL_MAC_OFFLOAD_ADD = 0,
197 NFP_TUNNEL_MAC_OFFLOAD_DEL = 1,
198 NFP_TUNNEL_MAC_OFFLOAD_MOD = 2,
199};
200
201#define NFP_MAX_MAC_INDEX 0xff
202
203
204
205
206
207
208
209
210
211
212struct nfp_tun_offloaded_mac {
213 struct rhash_head ht_node;
214 u8 addr[ETH_ALEN];
215 u16 index;
216 int ref_count;
217 struct list_head repr_list;
218 int bridge_count;
219};
220
221static const struct rhashtable_params offloaded_macs_params = {
222 .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr),
223 .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node),
224 .key_len = ETH_ALEN,
225 .automatic_shrinking = true,
226};
227
228void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
229{
230 struct nfp_tun_active_tuns *payload;
231 struct net_device *netdev;
232 int count, i, pay_len;
233 struct neighbour *n;
234 __be32 ipv4_addr;
235 u32 port;
236
237 payload = nfp_flower_cmsg_get_data(skb);
238 count = be32_to_cpu(payload->count);
239 if (count > NFP_FL_MAX_ROUTES) {
240 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
241 return;
242 }
243
244 pay_len = nfp_flower_cmsg_get_data_len(skb);
245 if (pay_len != struct_size(payload, tun_info, count)) {
246 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
247 return;
248 }
249
250 rcu_read_lock();
251 for (i = 0; i < count; i++) {
252 ipv4_addr = payload->tun_info[i].ipv4;
253 port = be32_to_cpu(payload->tun_info[i].egress_port);
254 netdev = nfp_app_dev_get(app, port, NULL);
255 if (!netdev)
256 continue;
257
258 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
259 if (!n)
260 continue;
261
262
263 neigh_event_send(n, NULL);
264 neigh_release(n);
265 }
266 rcu_read_unlock();
267}
268
269void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
270{
271#if IS_ENABLED(CONFIG_IPV6)
272 struct nfp_tun_active_tuns_v6 *payload;
273 struct net_device *netdev;
274 int count, i, pay_len;
275 struct neighbour *n;
276 void *ipv6_add;
277 u32 port;
278
279 payload = nfp_flower_cmsg_get_data(skb);
280 count = be32_to_cpu(payload->count);
281 if (count > NFP_FL_IPV6_ADDRS_MAX) {
282 nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
283 return;
284 }
285
286 pay_len = nfp_flower_cmsg_get_data_len(skb);
287 if (pay_len != struct_size(payload, tun_info, count)) {
288 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
289 return;
290 }
291
292 rcu_read_lock();
293 for (i = 0; i < count; i++) {
294 ipv6_add = &payload->tun_info[i].ipv6;
295 port = be32_to_cpu(payload->tun_info[i].egress_port);
296 netdev = nfp_app_dev_get(app, port, NULL);
297 if (!netdev)
298 continue;
299
300 n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
301 if (!n)
302 continue;
303
304
305 neigh_event_send(n, NULL);
306 neigh_release(n);
307 }
308 rcu_read_unlock();
309#endif
310}
311
312static int
313nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
314 gfp_t flag)
315{
316 struct sk_buff *skb;
317 unsigned char *msg;
318
319 skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
320 if (!skb)
321 return -ENOMEM;
322
323 msg = nfp_flower_cmsg_get_data(skb);
324 memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
325
326 nfp_ctrl_tx(app->ctrl, skb);
327 return 0;
328}
329
330static bool
331__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
332 void *add, int add_len)
333{
334 struct nfp_offloaded_route *entry;
335
336 spin_lock_bh(list_lock);
337 list_for_each_entry(entry, route_list, list)
338 if (!memcmp(entry->ip_add, add, add_len)) {
339 spin_unlock_bh(list_lock);
340 return true;
341 }
342 spin_unlock_bh(list_lock);
343 return false;
344}
345
346static int
347__nfp_tun_add_route_to_cache(struct list_head *route_list,
348 spinlock_t *list_lock, void *add, int add_len)
349{
350 struct nfp_offloaded_route *entry;
351
352 spin_lock_bh(list_lock);
353 list_for_each_entry(entry, route_list, list)
354 if (!memcmp(entry->ip_add, add, add_len)) {
355 spin_unlock_bh(list_lock);
356 return 0;
357 }
358
359 entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
360 if (!entry) {
361 spin_unlock_bh(list_lock);
362 return -ENOMEM;
363 }
364
365 memcpy(entry->ip_add, add, add_len);
366 list_add_tail(&entry->list, route_list);
367 spin_unlock_bh(list_lock);
368
369 return 0;
370}
371
372static void
373__nfp_tun_del_route_from_cache(struct list_head *route_list,
374 spinlock_t *list_lock, void *add, int add_len)
375{
376 struct nfp_offloaded_route *entry;
377
378 spin_lock_bh(list_lock);
379 list_for_each_entry(entry, route_list, list)
380 if (!memcmp(entry->ip_add, add, add_len)) {
381 list_del(&entry->list);
382 kfree(entry);
383 break;
384 }
385 spin_unlock_bh(list_lock);
386}
387
388static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
389{
390 struct nfp_flower_priv *priv = app->priv;
391
392 return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
393 &priv->tun.neigh_off_lock_v4, ipv4_addr,
394 sizeof(*ipv4_addr));
395}
396
397static bool
398nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
399{
400 struct nfp_flower_priv *priv = app->priv;
401
402 return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
403 &priv->tun.neigh_off_lock_v6, ipv6_addr,
404 sizeof(*ipv6_addr));
405}
406
407static void
408nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
409{
410 struct nfp_flower_priv *priv = app->priv;
411
412 __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
413 &priv->tun.neigh_off_lock_v4, ipv4_addr,
414 sizeof(*ipv4_addr));
415}
416
417static void
418nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
419{
420 struct nfp_flower_priv *priv = app->priv;
421
422 __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
423 &priv->tun.neigh_off_lock_v6, ipv6_addr,
424 sizeof(*ipv6_addr));
425}
426
427static void
428nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
429{
430 struct nfp_flower_priv *priv = app->priv;
431
432 __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
433 &priv->tun.neigh_off_lock_v4, ipv4_addr,
434 sizeof(*ipv4_addr));
435}
436
437static void
438nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
439{
440 struct nfp_flower_priv *priv = app->priv;
441
442 __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
443 &priv->tun.neigh_off_lock_v6, ipv6_addr,
444 sizeof(*ipv6_addr));
445}
446
447static void
448nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
449 struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
450{
451 struct nfp_tun_neigh payload;
452 u32 port_id;
453
454 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
455 if (!port_id)
456 return;
457
458 memset(&payload, 0, sizeof(struct nfp_tun_neigh));
459 payload.dst_ipv4 = flow->daddr;
460
461
462 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
463 nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
464
465 neigh_event_send(neigh, NULL);
466 goto send_msg;
467 }
468
469
470 payload.src_ipv4 = flow->saddr;
471 ether_addr_copy(payload.src_addr, netdev->dev_addr);
472 neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
473 payload.port_id = cpu_to_be32(port_id);
474
475 nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
476
477send_msg:
478 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
479 sizeof(struct nfp_tun_neigh),
480 (unsigned char *)&payload, flag);
481}
482
483static void
484nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
485 struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
486{
487 struct nfp_tun_neigh_v6 payload;
488 u32 port_id;
489
490 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
491 if (!port_id)
492 return;
493
494 memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
495 payload.dst_ipv6 = flow->daddr;
496
497
498 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
499 nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
500
501 neigh_event_send(neigh, NULL);
502 goto send_msg;
503 }
504
505
506 payload.src_ipv6 = flow->saddr;
507 ether_addr_copy(payload.src_addr, netdev->dev_addr);
508 neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
509 payload.port_id = cpu_to_be32(port_id);
510
511 nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
512
513send_msg:
514 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
515 sizeof(struct nfp_tun_neigh_v6),
516 (unsigned char *)&payload, flag);
517}
518
519static int
520nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
521 void *ptr)
522{
523 struct nfp_flower_priv *app_priv;
524 struct netevent_redirect *redir;
525 struct flowi4 flow4 = {};
526 struct flowi6 flow6 = {};
527 struct neighbour *n;
528 struct nfp_app *app;
529 struct rtable *rt;
530 bool ipv6 = false;
531 int err;
532
533 switch (event) {
534 case NETEVENT_REDIRECT:
535 redir = (struct netevent_redirect *)ptr;
536 n = redir->neigh;
537 break;
538 case NETEVENT_NEIGH_UPDATE:
539 n = (struct neighbour *)ptr;
540 break;
541 default:
542 return NOTIFY_DONE;
543 }
544
545 if (n->tbl->family == AF_INET6)
546 ipv6 = true;
547
548 if (ipv6)
549 flow6.daddr = *(struct in6_addr *)n->primary_key;
550 else
551 flow4.daddr = *(__be32 *)n->primary_key;
552
553 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
554 app = app_priv->app;
555
556 if (!nfp_netdev_is_nfp_repr(n->dev) &&
557 !nfp_flower_internal_port_can_offload(app, n->dev))
558 return NOTIFY_DONE;
559
560
561 if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
562 (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
563 return NOTIFY_DONE;
564
565#if IS_ENABLED(CONFIG_INET)
566 if (ipv6) {
567#if IS_ENABLED(CONFIG_IPV6)
568 struct dst_entry *dst;
569
570 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
571 &flow6, NULL);
572 if (IS_ERR(dst))
573 return NOTIFY_DONE;
574
575 dst_release(dst);
576 flow6.flowi6_proto = IPPROTO_UDP;
577 nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
578#else
579 return NOTIFY_DONE;
580#endif
581 } else {
582
583 rt = ip_route_output_key(dev_net(n->dev), &flow4);
584 err = PTR_ERR_OR_ZERO(rt);
585 if (err)
586 return NOTIFY_DONE;
587
588 ip_rt_put(rt);
589
590 flow4.flowi4_proto = IPPROTO_UDP;
591 nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
592 }
593#else
594 return NOTIFY_DONE;
595#endif
596
597 return NOTIFY_OK;
598}
599
600void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
601{
602 struct nfp_tun_req_route_ipv4 *payload;
603 struct net_device *netdev;
604 struct flowi4 flow = {};
605 struct neighbour *n;
606 struct rtable *rt;
607 int err;
608
609 payload = nfp_flower_cmsg_get_data(skb);
610
611 rcu_read_lock();
612 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
613 if (!netdev)
614 goto fail_rcu_unlock;
615
616 flow.daddr = payload->ipv4_addr;
617 flow.flowi4_proto = IPPROTO_UDP;
618
619#if IS_ENABLED(CONFIG_INET)
620
621 rt = ip_route_output_key(dev_net(netdev), &flow);
622 err = PTR_ERR_OR_ZERO(rt);
623 if (err)
624 goto fail_rcu_unlock;
625#else
626 goto fail_rcu_unlock;
627#endif
628
629
630 n = dst_neigh_lookup(&rt->dst, &flow.daddr);
631 ip_rt_put(rt);
632 if (!n)
633 goto fail_rcu_unlock;
634 nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
635 neigh_release(n);
636 rcu_read_unlock();
637 return;
638
639fail_rcu_unlock:
640 rcu_read_unlock();
641 nfp_flower_cmsg_warn(app, "Requested route not found.\n");
642}
643
644void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
645{
646 struct nfp_tun_req_route_ipv6 *payload;
647 struct net_device *netdev;
648 struct flowi6 flow = {};
649 struct dst_entry *dst;
650 struct neighbour *n;
651
652 payload = nfp_flower_cmsg_get_data(skb);
653
654 rcu_read_lock();
655 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
656 if (!netdev)
657 goto fail_rcu_unlock;
658
659 flow.daddr = payload->ipv6_addr;
660 flow.flowi6_proto = IPPROTO_UDP;
661
662#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
663 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
664 NULL);
665 if (IS_ERR(dst))
666 goto fail_rcu_unlock;
667#else
668 goto fail_rcu_unlock;
669#endif
670
671 n = dst_neigh_lookup(dst, &flow.daddr);
672 dst_release(dst);
673 if (!n)
674 goto fail_rcu_unlock;
675
676 nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
677 neigh_release(n);
678 rcu_read_unlock();
679 return;
680
681fail_rcu_unlock:
682 rcu_read_unlock();
683 nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
684}
685
686static void nfp_tun_write_ipv4_list(struct nfp_app *app)
687{
688 struct nfp_flower_priv *priv = app->priv;
689 struct nfp_ipv4_addr_entry *entry;
690 struct nfp_tun_ipv4_addr payload;
691 struct list_head *ptr, *storage;
692 int count;
693
694 memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
695 mutex_lock(&priv->tun.ipv4_off_lock);
696 count = 0;
697 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
698 if (count >= NFP_FL_IPV4_ADDRS_MAX) {
699 mutex_unlock(&priv->tun.ipv4_off_lock);
700 nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
701 return;
702 }
703 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
704 payload.ipv4_addr[count++] = entry->ipv4_addr;
705 }
706 payload.count = cpu_to_be32(count);
707 mutex_unlock(&priv->tun.ipv4_off_lock);
708
709 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
710 sizeof(struct nfp_tun_ipv4_addr),
711 &payload, GFP_KERNEL);
712}
713
714void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
715{
716 struct nfp_flower_priv *priv = app->priv;
717 struct nfp_ipv4_addr_entry *entry;
718 struct list_head *ptr, *storage;
719
720 mutex_lock(&priv->tun.ipv4_off_lock);
721 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
722 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
723 if (entry->ipv4_addr == ipv4) {
724 entry->ref_count++;
725 mutex_unlock(&priv->tun.ipv4_off_lock);
726 return;
727 }
728 }
729
730 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
731 if (!entry) {
732 mutex_unlock(&priv->tun.ipv4_off_lock);
733 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
734 return;
735 }
736 entry->ipv4_addr = ipv4;
737 entry->ref_count = 1;
738 list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
739 mutex_unlock(&priv->tun.ipv4_off_lock);
740
741 nfp_tun_write_ipv4_list(app);
742}
743
744void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
745{
746 struct nfp_flower_priv *priv = app->priv;
747 struct nfp_ipv4_addr_entry *entry;
748 struct list_head *ptr, *storage;
749
750 mutex_lock(&priv->tun.ipv4_off_lock);
751 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
752 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
753 if (entry->ipv4_addr == ipv4) {
754 entry->ref_count--;
755 if (!entry->ref_count) {
756 list_del(&entry->list);
757 kfree(entry);
758 }
759 break;
760 }
761 }
762 mutex_unlock(&priv->tun.ipv4_off_lock);
763
764 nfp_tun_write_ipv4_list(app);
765}
766
767static void nfp_tun_write_ipv6_list(struct nfp_app *app)
768{
769 struct nfp_flower_priv *priv = app->priv;
770 struct nfp_ipv6_addr_entry *entry;
771 struct nfp_tun_ipv6_addr payload;
772 int count = 0;
773
774 memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
775 mutex_lock(&priv->tun.ipv6_off_lock);
776 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
777 if (count >= NFP_FL_IPV6_ADDRS_MAX) {
778 nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
779 break;
780 }
781 payload.ipv6_addr[count++] = entry->ipv6_addr;
782 }
783 mutex_unlock(&priv->tun.ipv6_off_lock);
784 payload.count = cpu_to_be32(count);
785
786 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
787 sizeof(struct nfp_tun_ipv6_addr),
788 &payload, GFP_KERNEL);
789}
790
791struct nfp_ipv6_addr_entry *
792nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
793{
794 struct nfp_flower_priv *priv = app->priv;
795 struct nfp_ipv6_addr_entry *entry;
796
797 mutex_lock(&priv->tun.ipv6_off_lock);
798 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
799 if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
800 entry->ref_count++;
801 mutex_unlock(&priv->tun.ipv6_off_lock);
802 return entry;
803 }
804
805 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
806 if (!entry) {
807 mutex_unlock(&priv->tun.ipv6_off_lock);
808 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
809 return NULL;
810 }
811 entry->ipv6_addr = *ipv6;
812 entry->ref_count = 1;
813 list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
814 mutex_unlock(&priv->tun.ipv6_off_lock);
815
816 nfp_tun_write_ipv6_list(app);
817
818 return entry;
819}
820
821void
822nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
823{
824 struct nfp_flower_priv *priv = app->priv;
825 bool freed = false;
826
827 mutex_lock(&priv->tun.ipv6_off_lock);
828 if (!--entry->ref_count) {
829 list_del(&entry->list);
830 kfree(entry);
831 freed = true;
832 }
833 mutex_unlock(&priv->tun.ipv6_off_lock);
834
835 if (freed)
836 nfp_tun_write_ipv6_list(app);
837}
838
839static int
840__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
841{
842 struct nfp_tun_mac_addr_offload payload;
843
844 memset(&payload, 0, sizeof(payload));
845
846 if (del)
847 payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
848
849
850 payload.count = cpu_to_be16(1);
851 payload.index = cpu_to_be16(idx);
852 ether_addr_copy(payload.addr, mac);
853
854 return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
855 sizeof(struct nfp_tun_mac_addr_offload),
856 &payload, GFP_KERNEL);
857}
858
859static bool nfp_tunnel_port_is_phy_repr(int port)
860{
861 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
862 NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
863 return true;
864
865 return false;
866}
867
868static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
869{
870 return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
871}
872
873static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
874{
875 return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
876}
877
878static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
879{
880 return nfp_mac_idx >> 8;
881}
882
883static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
884{
885 return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
886}
887
888static struct nfp_tun_offloaded_mac *
889nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
890{
891 struct nfp_flower_priv *priv = app->priv;
892
893 return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
894 offloaded_macs_params);
895}
896
897static void
898nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
899 struct net_device *netdev, bool mod)
900{
901 if (nfp_netdev_is_nfp_repr(netdev)) {
902 struct nfp_flower_repr_priv *repr_priv;
903 struct nfp_repr *repr;
904
905 repr = netdev_priv(netdev);
906 repr_priv = repr->app_priv;
907
908
909 if (mod)
910 list_del(&repr_priv->mac_list);
911
912 list_add_tail(&repr_priv->mac_list, &entry->repr_list);
913 } else if (nfp_flower_is_supported_bridge(netdev)) {
914 entry->bridge_count++;
915 }
916
917 entry->ref_count++;
918}
919
920static int
921nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
922 int port, bool mod)
923{
924 struct nfp_flower_priv *priv = app->priv;
925 int ida_idx = NFP_MAX_MAC_INDEX, err;
926 struct nfp_tun_offloaded_mac *entry;
927 u16 nfp_mac_idx = 0;
928
929 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
930 if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
931 if (entry->bridge_count ||
932 !nfp_flower_is_supported_bridge(netdev)) {
933 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
934 netdev, mod);
935 return 0;
936 }
937
938
939 nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
940 }
941
942 if (!nfp_mac_idx) {
943
944 if (entry || !port) {
945 ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
946 NFP_MAX_MAC_INDEX, GFP_KERNEL);
947 if (ida_idx < 0)
948 return ida_idx;
949
950 nfp_mac_idx =
951 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
952
953 if (nfp_flower_is_supported_bridge(netdev))
954 nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
955
956 } else {
957 nfp_mac_idx =
958 nfp_tunnel_get_mac_idx_from_phy_port_id(port);
959 }
960 }
961
962 if (!entry) {
963 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
964 if (!entry) {
965 err = -ENOMEM;
966 goto err_free_ida;
967 }
968
969 ether_addr_copy(entry->addr, netdev->dev_addr);
970 INIT_LIST_HEAD(&entry->repr_list);
971
972 if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
973 &entry->ht_node,
974 offloaded_macs_params)) {
975 err = -ENOMEM;
976 goto err_free_entry;
977 }
978 }
979
980 err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
981 nfp_mac_idx, false);
982 if (err) {
983
984 if (!entry->ref_count)
985 goto err_remove_hash;
986 goto err_free_ida;
987 }
988
989 entry->index = nfp_mac_idx;
990 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
991
992 return 0;
993
994err_remove_hash:
995 rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
996 offloaded_macs_params);
997err_free_entry:
998 kfree(entry);
999err_free_ida:
1000 if (ida_idx != NFP_MAX_MAC_INDEX)
1001 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1002
1003 return err;
1004}
1005
1006static int
1007nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
1008 u8 *mac, bool mod)
1009{
1010 struct nfp_flower_priv *priv = app->priv;
1011 struct nfp_flower_repr_priv *repr_priv;
1012 struct nfp_tun_offloaded_mac *entry;
1013 struct nfp_repr *repr;
1014 int ida_idx;
1015
1016 entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
1017 if (!entry)
1018 return 0;
1019
1020 entry->ref_count--;
1021
1022 if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
1023 repr = netdev_priv(netdev);
1024 repr_priv = repr->app_priv;
1025 list_del(&repr_priv->mac_list);
1026 }
1027
1028 if (nfp_flower_is_supported_bridge(netdev)) {
1029 entry->bridge_count--;
1030
1031 if (!entry->bridge_count && entry->ref_count) {
1032 u16 nfp_mac_idx;
1033
1034 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1035 if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
1036 false)) {
1037 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1038 netdev_name(netdev));
1039 return 0;
1040 }
1041
1042 entry->index = nfp_mac_idx;
1043 return 0;
1044 }
1045 }
1046
1047
1048 if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
1049 u16 nfp_mac_idx;
1050 int port, err;
1051
1052 repr_priv = list_first_entry(&entry->repr_list,
1053 struct nfp_flower_repr_priv,
1054 mac_list);
1055 repr = repr_priv->nfp_repr;
1056 port = nfp_repr_get_port_id(repr->netdev);
1057 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1058 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
1059 if (err) {
1060 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1061 netdev_name(netdev));
1062 return 0;
1063 }
1064
1065 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1066 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1067 entry->index = nfp_mac_idx;
1068 return 0;
1069 }
1070
1071 if (entry->ref_count)
1072 return 0;
1073
1074 WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
1075 &entry->ht_node,
1076 offloaded_macs_params));
1077
1078 if (nfp_tunnel_is_mac_idx_global(entry->index)) {
1079 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1080 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1081 }
1082
1083 kfree(entry);
1084
1085 return __nfp_tunnel_offload_mac(app, mac, 0, true);
1086}
1087
1088static int
1089nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
1090 enum nfp_flower_mac_offload_cmd cmd)
1091{
1092 struct nfp_flower_non_repr_priv *nr_priv = NULL;
1093 bool non_repr = false, *mac_offloaded;
1094 u8 *off_mac = NULL;
1095 int err, port = 0;
1096
1097 if (nfp_netdev_is_nfp_repr(netdev)) {
1098 struct nfp_flower_repr_priv *repr_priv;
1099 struct nfp_repr *repr;
1100
1101 repr = netdev_priv(netdev);
1102 if (repr->app != app)
1103 return 0;
1104
1105 repr_priv = repr->app_priv;
1106 if (repr_priv->on_bridge)
1107 return 0;
1108
1109 mac_offloaded = &repr_priv->mac_offloaded;
1110 off_mac = &repr_priv->offloaded_mac_addr[0];
1111 port = nfp_repr_get_port_id(netdev);
1112 if (!nfp_tunnel_port_is_phy_repr(port))
1113 return 0;
1114 } else if (nfp_fl_is_netdev_to_offload(netdev)) {
1115 nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
1116 if (!nr_priv)
1117 return -ENOMEM;
1118
1119 mac_offloaded = &nr_priv->mac_offloaded;
1120 off_mac = &nr_priv->offloaded_mac_addr[0];
1121 non_repr = true;
1122 } else {
1123 return 0;
1124 }
1125
1126 if (!is_valid_ether_addr(netdev->dev_addr)) {
1127 err = -EINVAL;
1128 goto err_put_non_repr_priv;
1129 }
1130
1131 if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
1132 cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
1133
1134 switch (cmd) {
1135 case NFP_TUNNEL_MAC_OFFLOAD_ADD:
1136 err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
1137 if (err)
1138 goto err_put_non_repr_priv;
1139
1140 if (non_repr)
1141 __nfp_flower_non_repr_priv_get(nr_priv);
1142
1143 *mac_offloaded = true;
1144 ether_addr_copy(off_mac, netdev->dev_addr);
1145 break;
1146 case NFP_TUNNEL_MAC_OFFLOAD_DEL:
1147
1148 if (!*mac_offloaded)
1149 break;
1150
1151 if (non_repr)
1152 __nfp_flower_non_repr_priv_put(nr_priv);
1153
1154 *mac_offloaded = false;
1155
1156 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
1157 false);
1158 if (err)
1159 goto err_put_non_repr_priv;
1160
1161 break;
1162 case NFP_TUNNEL_MAC_OFFLOAD_MOD:
1163
1164 if (ether_addr_equal(netdev->dev_addr, off_mac))
1165 break;
1166
1167 err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
1168 if (err)
1169 goto err_put_non_repr_priv;
1170
1171
1172 err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
1173 if (err)
1174 nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
1175 netdev_name(netdev));
1176
1177 ether_addr_copy(off_mac, netdev->dev_addr);
1178 break;
1179 default:
1180 err = -EINVAL;
1181 goto err_put_non_repr_priv;
1182 }
1183
1184 if (non_repr)
1185 __nfp_flower_non_repr_priv_put(nr_priv);
1186
1187 return 0;
1188
1189err_put_non_repr_priv:
1190 if (non_repr)
1191 __nfp_flower_non_repr_priv_put(nr_priv);
1192
1193 return err;
1194}
1195
1196int nfp_tunnel_mac_event_handler(struct nfp_app *app,
1197 struct net_device *netdev,
1198 unsigned long event, void *ptr)
1199{
1200 int err;
1201
1202 if (event == NETDEV_DOWN) {
1203 err = nfp_tunnel_offload_mac(app, netdev,
1204 NFP_TUNNEL_MAC_OFFLOAD_DEL);
1205 if (err)
1206 nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
1207 netdev_name(netdev));
1208 } else if (event == NETDEV_UP) {
1209 err = nfp_tunnel_offload_mac(app, netdev,
1210 NFP_TUNNEL_MAC_OFFLOAD_ADD);
1211 if (err)
1212 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1213 netdev_name(netdev));
1214 } else if (event == NETDEV_CHANGEADDR) {
1215
1216 if (!(netdev->flags & IFF_UP))
1217 return NOTIFY_OK;
1218
1219 err = nfp_tunnel_offload_mac(app, netdev,
1220 NFP_TUNNEL_MAC_OFFLOAD_MOD);
1221 if (err)
1222 nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
1223 netdev_name(netdev));
1224 } else if (event == NETDEV_CHANGEUPPER) {
1225
1226
1227
1228
1229
1230
1231 struct netdev_notifier_changeupper_info *info = ptr;
1232 struct net_device *upper = info->upper_dev;
1233 struct nfp_flower_repr_priv *repr_priv;
1234 struct nfp_repr *repr;
1235
1236 if (!nfp_netdev_is_nfp_repr(netdev) ||
1237 !nfp_flower_is_supported_bridge(upper))
1238 return NOTIFY_OK;
1239
1240 repr = netdev_priv(netdev);
1241 if (repr->app != app)
1242 return NOTIFY_OK;
1243
1244 repr_priv = repr->app_priv;
1245
1246 if (info->linking) {
1247 if (nfp_tunnel_offload_mac(app, netdev,
1248 NFP_TUNNEL_MAC_OFFLOAD_DEL))
1249 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
1250 netdev_name(netdev));
1251 repr_priv->on_bridge = true;
1252 } else {
1253 repr_priv->on_bridge = false;
1254
1255 if (!(netdev->flags & IFF_UP))
1256 return NOTIFY_OK;
1257
1258 if (nfp_tunnel_offload_mac(app, netdev,
1259 NFP_TUNNEL_MAC_OFFLOAD_ADD))
1260 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1261 netdev_name(netdev));
1262 }
1263 }
1264 return NOTIFY_OK;
1265}
1266
1267int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
1268 struct nfp_fl_payload *flow)
1269{
1270 struct nfp_flower_priv *app_priv = app->priv;
1271 struct nfp_tun_offloaded_mac *mac_entry;
1272 struct nfp_flower_meta_tci *key_meta;
1273 struct nfp_tun_pre_tun_rule payload;
1274 struct net_device *internal_dev;
1275 int err;
1276
1277 if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
1278 return -ENOSPC;
1279
1280 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1281
1282 internal_dev = flow->pre_tun_rule.dev;
1283 payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1284 payload.host_ctx_id = flow->meta.host_ctx_id;
1285
1286
1287
1288
1289
1290 mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
1291 internal_dev->dev_addr);
1292 if (!mac_entry)
1293 return -ENOENT;
1294
1295
1296
1297
1298 key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1299 if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
1300 mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
1301 else
1302 mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
1303
1304 payload.port_idx = cpu_to_be16(mac_entry->index);
1305
1306
1307 flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
1308 flow->pre_tun_rule.port_idx = payload.port_idx;
1309
1310 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1311 sizeof(struct nfp_tun_pre_tun_rule),
1312 (unsigned char *)&payload, GFP_KERNEL);
1313 if (err)
1314 return err;
1315
1316 app_priv->pre_tun_rule_cnt++;
1317
1318 return 0;
1319}
1320
1321int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
1322 struct nfp_fl_payload *flow)
1323{
1324 struct nfp_flower_priv *app_priv = app->priv;
1325 struct nfp_tun_pre_tun_rule payload;
1326 u32 tmp_flags = 0;
1327 int err;
1328
1329 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1330
1331 tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
1332 payload.flags = cpu_to_be32(tmp_flags);
1333 payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1334 payload.port_idx = flow->pre_tun_rule.port_idx;
1335
1336 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1337 sizeof(struct nfp_tun_pre_tun_rule),
1338 (unsigned char *)&payload, GFP_KERNEL);
1339 if (err)
1340 return err;
1341
1342 app_priv->pre_tun_rule_cnt--;
1343
1344 return 0;
1345}
1346
1347int nfp_tunnel_config_start(struct nfp_app *app)
1348{
1349 struct nfp_flower_priv *priv = app->priv;
1350 int err;
1351
1352
1353 err = rhashtable_init(&priv->tun.offloaded_macs,
1354 &offloaded_macs_params);
1355 if (err)
1356 return err;
1357
1358 ida_init(&priv->tun.mac_off_ids);
1359
1360
1361 mutex_init(&priv->tun.ipv4_off_lock);
1362 INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
1363 mutex_init(&priv->tun.ipv6_off_lock);
1364 INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
1365
1366
1367 spin_lock_init(&priv->tun.neigh_off_lock_v4);
1368 INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
1369 spin_lock_init(&priv->tun.neigh_off_lock_v6);
1370 INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
1371 priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
1372
1373 err = register_netevent_notifier(&priv->tun.neigh_nb);
1374 if (err) {
1375 rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1376 nfp_check_rhashtable_empty, NULL);
1377 return err;
1378 }
1379
1380 return 0;
1381}
1382
1383void nfp_tunnel_config_stop(struct nfp_app *app)
1384{
1385 struct nfp_offloaded_route *route_entry, *temp;
1386 struct nfp_flower_priv *priv = app->priv;
1387 struct nfp_ipv4_addr_entry *ip_entry;
1388 struct nfp_tun_neigh_v6 ipv6_route;
1389 struct nfp_tun_neigh ipv4_route;
1390 struct list_head *ptr, *storage;
1391
1392 unregister_netevent_notifier(&priv->tun.neigh_nb);
1393
1394 ida_destroy(&priv->tun.mac_off_ids);
1395
1396
1397 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
1398 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
1399 list_del(&ip_entry->list);
1400 kfree(ip_entry);
1401 }
1402
1403 mutex_destroy(&priv->tun.ipv6_off_lock);
1404
1405
1406 list_for_each_entry_safe(route_entry, temp,
1407 &priv->tun.neigh_off_list_v4, list) {
1408 memset(&ipv4_route, 0, sizeof(ipv4_route));
1409 memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
1410 sizeof(ipv4_route.dst_ipv4));
1411 list_del(&route_entry->list);
1412 kfree(route_entry);
1413
1414 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
1415 sizeof(struct nfp_tun_neigh),
1416 (unsigned char *)&ipv4_route,
1417 GFP_KERNEL);
1418 }
1419
1420 list_for_each_entry_safe(route_entry, temp,
1421 &priv->tun.neigh_off_list_v6, list) {
1422 memset(&ipv6_route, 0, sizeof(ipv6_route));
1423 memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
1424 sizeof(ipv6_route.dst_ipv6));
1425 list_del(&route_entry->list);
1426 kfree(route_entry);
1427
1428 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
1429 sizeof(struct nfp_tun_neigh),
1430 (unsigned char *)&ipv6_route,
1431 GFP_KERNEL);
1432 }
1433
1434
1435 rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1436 nfp_check_rhashtable_empty, NULL);
1437}
1438