1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "core_priv.h"
34
35#include <linux/in.h>
36#include <linux/in6.h>
37
38
39#include <net/addrconf.h>
40#include <net/bonding.h>
41
42#include <rdma/ib_cache.h>
43#include <rdma/ib_addr.h>
44
45static struct workqueue_struct *gid_cache_wq;
46
47static struct workqueue_struct *gid_cache_wq;
48
49enum gid_op_type {
50 GID_DEL = 0,
51 GID_ADD
52};
53
54struct update_gid_event_work {
55 struct work_struct work;
56 union ib_gid gid;
57 struct ib_gid_attr gid_attr;
58 enum gid_op_type gid_op;
59};
60
61#define ROCE_NETDEV_CALLBACK_SZ 3
62struct netdev_event_work_cmd {
63 roce_netdev_callback cb;
64 roce_netdev_filter filter;
65 struct net_device *ndev;
66 struct net_device *filter_ndev;
67};
68
69struct netdev_event_work {
70 struct work_struct work;
71 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ];
72};
73
74static const struct {
75 bool (*is_supported)(const struct ib_device *device, u8 port_num);
76 enum ib_gid_type gid_type;
77} PORT_CAP_TO_GID_TYPE[] = {
78 {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
79 {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
80};
81
82#define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
83
84unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
85{
86 int i;
87 unsigned int ret_flags = 0;
88
89 if (!rdma_protocol_roce(ib_dev, port))
90 return 1UL << IB_GID_TYPE_IB;
91
92 for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
93 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
94 ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
95
96 return ret_flags;
97}
98EXPORT_SYMBOL(roce_gid_type_mask_support);
99
100static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
101 u8 port, union ib_gid *gid,
102 struct ib_gid_attr *gid_attr)
103{
104 int i;
105 unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
106
107 for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
108 if ((1UL << i) & gid_type_mask) {
109 gid_attr->gid_type = i;
110 switch (gid_op) {
111 case GID_ADD:
112 ib_cache_gid_add(ib_dev, port,
113 gid, gid_attr);
114 break;
115 case GID_DEL:
116 ib_cache_gid_del(ib_dev, port,
117 gid, gid_attr);
118 break;
119 }
120 }
121 }
122}
123
124enum bonding_slave_state {
125 BONDING_SLAVE_STATE_ACTIVE = 1UL << 0,
126 BONDING_SLAVE_STATE_INACTIVE = 1UL << 1,
127
128 BONDING_SLAVE_STATE_NA = 1UL << 2,
129};
130
131static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
132 struct net_device *upper)
133{
134 if (upper && netif_is_bond_master(upper)) {
135 struct net_device *pdev =
136 bond_option_active_slave_get_rcu(netdev_priv(upper));
137
138 if (pdev)
139 return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
140 BONDING_SLAVE_STATE_INACTIVE;
141 }
142
143 return BONDING_SLAVE_STATE_NA;
144}
145
146#define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
147 BONDING_SLAVE_STATE_NA)
148static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
149 struct net_device *rdma_ndev, void *cookie)
150{
151 struct net_device *real_dev;
152 int res;
153
154 if (!rdma_ndev)
155 return 0;
156
157 rcu_read_lock();
158 real_dev = rdma_vlan_dev_real_dev(cookie);
159 if (!real_dev)
160 real_dev = cookie;
161
162 res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
163 (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
164 REQUIRED_BOND_STATES)) ||
165 real_dev == rdma_ndev);
166
167 rcu_read_unlock();
168 return res;
169}
170
171static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
172 struct net_device *rdma_ndev, void *cookie)
173{
174 struct net_device *master_dev;
175 int res;
176
177 if (!rdma_ndev)
178 return 0;
179
180 rcu_read_lock();
181 master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
182 res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
183 BONDING_SLAVE_STATE_INACTIVE;
184 rcu_read_unlock();
185
186 return res;
187}
188
189static int pass_all_filter(struct ib_device *ib_dev, u8 port,
190 struct net_device *rdma_ndev, void *cookie)
191{
192 return 1;
193}
194
195static int upper_device_filter(struct ib_device *ib_dev, u8 port,
196 struct net_device *rdma_ndev, void *cookie)
197{
198 int res;
199
200 if (!rdma_ndev)
201 return 0;
202
203 if (rdma_ndev == cookie)
204 return 1;
205
206 rcu_read_lock();
207 res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
208 rcu_read_unlock();
209
210 return res;
211}
212
213static void update_gid_ip(enum gid_op_type gid_op,
214 struct ib_device *ib_dev,
215 u8 port, struct net_device *ndev,
216 struct sockaddr *addr)
217{
218 union ib_gid gid;
219 struct ib_gid_attr gid_attr;
220
221 rdma_ip2gid(addr, &gid);
222 memset(&gid_attr, 0, sizeof(gid_attr));
223 gid_attr.ndev = ndev;
224
225 update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
226}
227
228static void enum_netdev_default_gids(struct ib_device *ib_dev,
229 u8 port, struct net_device *event_ndev,
230 struct net_device *rdma_ndev)
231{
232 unsigned long gid_type_mask;
233
234 rcu_read_lock();
235 if (!rdma_ndev ||
236 ((rdma_ndev != event_ndev &&
237 !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
238 is_eth_active_slave_of_bonding_rcu(rdma_ndev,
239 netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
240 BONDING_SLAVE_STATE_INACTIVE)) {
241 rcu_read_unlock();
242 return;
243 }
244 rcu_read_unlock();
245
246 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
247
248 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask,
249 IB_CACHE_GID_DEFAULT_MODE_SET);
250}
251
252static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
253 u8 port,
254 struct net_device *event_ndev,
255 struct net_device *rdma_ndev)
256{
257 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
258 unsigned long gid_type_mask;
259
260 if (!rdma_ndev)
261 return;
262
263 if (!real_dev)
264 real_dev = event_ndev;
265
266 rcu_read_lock();
267
268 if (((rdma_ndev != event_ndev &&
269 !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
270 is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
271 ==
272 BONDING_SLAVE_STATE_INACTIVE)) {
273 rcu_read_unlock();
274 return;
275 }
276
277 rcu_read_unlock();
278
279 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
280
281 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
282 gid_type_mask,
283 IB_CACHE_GID_DEFAULT_MODE_DELETE);
284}
285
286static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
287 u8 port, struct net_device *ndev)
288{
289 struct in_device *in_dev;
290 struct sin_list {
291 struct list_head list;
292 struct sockaddr_in ip;
293 };
294 struct sin_list *sin_iter;
295 struct sin_list *sin_temp;
296
297 LIST_HEAD(sin_list);
298 if (ndev->reg_state >= NETREG_UNREGISTERING)
299 return;
300
301 rcu_read_lock();
302 in_dev = __in_dev_get_rcu(ndev);
303 if (!in_dev) {
304 rcu_read_unlock();
305 return;
306 }
307
308 for_ifa(in_dev) {
309 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
310
311 if (!entry)
312 continue;
313
314 entry->ip.sin_family = AF_INET;
315 entry->ip.sin_addr.s_addr = ifa->ifa_address;
316 list_add_tail(&entry->list, &sin_list);
317 }
318 endfor_ifa(in_dev);
319 rcu_read_unlock();
320
321 list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
322 update_gid_ip(GID_ADD, ib_dev, port, ndev,
323 (struct sockaddr *)&sin_iter->ip);
324 list_del(&sin_iter->list);
325 kfree(sin_iter);
326 }
327}
328
329static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
330 u8 port, struct net_device *ndev)
331{
332 struct inet6_ifaddr *ifp;
333 struct inet6_dev *in6_dev;
334 struct sin6_list {
335 struct list_head list;
336 struct sockaddr_in6 sin6;
337 };
338 struct sin6_list *sin6_iter;
339 struct sin6_list *sin6_temp;
340 struct ib_gid_attr gid_attr = {.ndev = ndev};
341 LIST_HEAD(sin6_list);
342
343 if (ndev->reg_state >= NETREG_UNREGISTERING)
344 return;
345
346 in6_dev = in6_dev_get(ndev);
347 if (!in6_dev)
348 return;
349
350 read_lock_bh(&in6_dev->lock);
351 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
352 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
353
354 if (!entry)
355 continue;
356
357 entry->sin6.sin6_family = AF_INET6;
358 entry->sin6.sin6_addr = ifp->addr;
359 list_add_tail(&entry->list, &sin6_list);
360 }
361 read_unlock_bh(&in6_dev->lock);
362
363 in6_dev_put(in6_dev);
364
365 list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
366 union ib_gid gid;
367
368 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
369 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
370 list_del(&sin6_iter->list);
371 kfree(sin6_iter);
372 }
373}
374
375static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
376 struct net_device *ndev)
377{
378 enum_netdev_ipv4_ips(ib_dev, port, ndev);
379 if (IS_ENABLED(CONFIG_IPV6))
380 enum_netdev_ipv6_ips(ib_dev, port, ndev);
381}
382
383static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
384 struct net_device *rdma_ndev, void *cookie)
385{
386 enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
387 _add_netdev_ips(ib_dev, port, cookie);
388}
389
390static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
391 struct net_device *rdma_ndev, void *cookie)
392{
393 ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
394}
395
396static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
397 u8 port,
398 struct net_device *rdma_ndev,
399 void *cookie)
400{
401 struct net *net;
402 struct net_device *ndev;
403
404
405
406
407 rtnl_lock();
408 down_read(&net_rwsem);
409 for_each_net(net)
410 for_each_netdev(net, ndev)
411 if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
412 add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
413 up_read(&net_rwsem);
414 rtnl_unlock();
415}
416
417
418
419
420
421
422
423void rdma_roce_rescan_device(struct ib_device *ib_dev)
424{
425 ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
426 enum_all_gids_of_dev_cb, NULL);
427}
428EXPORT_SYMBOL(rdma_roce_rescan_device);
429
430static void callback_for_addr_gid_device_scan(struct ib_device *device,
431 u8 port,
432 struct net_device *rdma_ndev,
433 void *cookie)
434{
435 struct update_gid_event_work *parsed = cookie;
436
437 return update_gid(parsed->gid_op, device,
438 port, &parsed->gid,
439 &parsed->gid_attr);
440}
441
442struct upper_list {
443 struct list_head list;
444 struct net_device *upper;
445};
446
447static int netdev_upper_walk(struct net_device *upper, void *data)
448{
449 struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
450 struct list_head *upper_list = data;
451
452 if (!entry)
453 return 0;
454
455 list_add_tail(&entry->list, upper_list);
456 dev_hold(upper);
457 entry->upper = upper;
458
459 return 0;
460}
461
462static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
463 void *cookie,
464 void (*handle_netdev)(struct ib_device *ib_dev,
465 u8 port,
466 struct net_device *ndev))
467{
468 struct net_device *ndev = cookie;
469 struct upper_list *upper_iter;
470 struct upper_list *upper_temp;
471 LIST_HEAD(upper_list);
472
473 rcu_read_lock();
474 netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
475 rcu_read_unlock();
476
477 handle_netdev(ib_dev, port, ndev);
478 list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
479 list) {
480 handle_netdev(ib_dev, port, upper_iter->upper);
481 dev_put(upper_iter->upper);
482 list_del(&upper_iter->list);
483 kfree(upper_iter);
484 }
485}
486
487static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
488 struct net_device *event_ndev)
489{
490 ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
491}
492
493static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
494 struct net_device *rdma_ndev, void *cookie)
495{
496 handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
497}
498
499static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
500 struct net_device *rdma_ndev, void *cookie)
501{
502 handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
503}
504
505static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
506 struct net_device *rdma_ndev,
507 void *cookie)
508{
509 struct net_device *master_ndev;
510
511 rcu_read_lock();
512 master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
513 if (master_ndev)
514 dev_hold(master_ndev);
515 rcu_read_unlock();
516
517 if (master_ndev) {
518 bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
519 rdma_ndev);
520 dev_put(master_ndev);
521 }
522}
523
524static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
525 struct net_device *rdma_ndev, void *cookie)
526{
527 bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
528}
529
530
531
532
533
534
535static void netdevice_event_work_handler(struct work_struct *_work)
536{
537 struct netdev_event_work *work =
538 container_of(_work, struct netdev_event_work, work);
539 unsigned int i;
540
541 for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
542 ib_enum_all_roce_netdevs(work->cmds[i].filter,
543 work->cmds[i].filter_ndev,
544 work->cmds[i].cb,
545 work->cmds[i].ndev);
546 dev_put(work->cmds[i].ndev);
547 dev_put(work->cmds[i].filter_ndev);
548 }
549
550 kfree(work);
551}
552
553static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
554 struct net_device *ndev)
555{
556 unsigned int i;
557 struct netdev_event_work *ndev_work =
558 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
559
560 if (!ndev_work)
561 return NOTIFY_DONE;
562
563 memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
564 for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
565 if (!ndev_work->cmds[i].ndev)
566 ndev_work->cmds[i].ndev = ndev;
567 if (!ndev_work->cmds[i].filter_ndev)
568 ndev_work->cmds[i].filter_ndev = ndev;
569 dev_hold(ndev_work->cmds[i].ndev);
570 dev_hold(ndev_work->cmds[i].filter_ndev);
571 }
572 INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
573
574 queue_work(gid_cache_wq, &ndev_work->work);
575
576 return NOTIFY_DONE;
577}
578
579static const struct netdev_event_work_cmd add_cmd = {
580 .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
581static const struct netdev_event_work_cmd add_cmd_upper_ips = {
582 .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
583
584static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
585 struct netdev_event_work_cmd *cmds)
586{
587 static const struct netdev_event_work_cmd upper_ips_del_cmd = {
588 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
589 static const struct netdev_event_work_cmd bonding_default_del_cmd = {
590 .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
591
592 if (changeupper_info->linking == false) {
593 cmds[0] = upper_ips_del_cmd;
594 cmds[0].ndev = changeupper_info->upper_dev;
595 cmds[1] = add_cmd;
596 } else {
597 cmds[0] = bonding_default_del_cmd;
598 cmds[0].ndev = changeupper_info->upper_dev;
599 cmds[1] = add_cmd_upper_ips;
600 cmds[1].ndev = changeupper_info->upper_dev;
601 cmds[1].filter_ndev = changeupper_info->upper_dev;
602 }
603}
604
605static int netdevice_event(struct notifier_block *this, unsigned long event,
606 void *ptr)
607{
608 static const struct netdev_event_work_cmd del_cmd = {
609 .cb = del_netdev_ips, .filter = pass_all_filter};
610 static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
611 .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
612 static const struct netdev_event_work_cmd default_del_cmd = {
613 .cb = del_netdev_default_ips, .filter = pass_all_filter};
614 static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
615 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
616 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
617 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
618
619 if (ndev->type != ARPHRD_ETHER)
620 return NOTIFY_DONE;
621
622 switch (event) {
623 case NETDEV_REGISTER:
624 case NETDEV_UP:
625 cmds[0] = bonding_default_del_cmd_join;
626 cmds[1] = add_cmd;
627 break;
628
629 case NETDEV_UNREGISTER:
630 if (ndev->reg_state < NETREG_UNREGISTERED)
631 cmds[0] = del_cmd;
632 else
633 return NOTIFY_DONE;
634 break;
635
636 case NETDEV_CHANGEADDR:
637 cmds[0] = default_del_cmd;
638 cmds[1] = add_cmd;
639 break;
640
641 case NETDEV_CHANGEUPPER:
642 netdevice_event_changeupper(
643 container_of(ptr, struct netdev_notifier_changeupper_info, info),
644 cmds);
645 break;
646
647 case NETDEV_BONDING_FAILOVER:
648 cmds[0] = bonding_event_ips_del_cmd;
649 cmds[1] = bonding_default_del_cmd_join;
650 cmds[2] = add_cmd_upper_ips;
651 break;
652
653 default:
654 return NOTIFY_DONE;
655 }
656
657 return netdevice_queue_work(cmds, ndev);
658}
659
660static void update_gid_event_work_handler(struct work_struct *_work)
661{
662 struct update_gid_event_work *work =
663 container_of(_work, struct update_gid_event_work, work);
664
665 ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
666 callback_for_addr_gid_device_scan, work);
667
668 dev_put(work->gid_attr.ndev);
669 kfree(work);
670}
671
672static int addr_event(struct notifier_block *this, unsigned long event,
673 struct sockaddr *sa, struct net_device *ndev)
674{
675 struct update_gid_event_work *work;
676 enum gid_op_type gid_op;
677
678 if (ndev->type != ARPHRD_ETHER)
679 return NOTIFY_DONE;
680
681 switch (event) {
682 case NETDEV_UP:
683 gid_op = GID_ADD;
684 break;
685
686 case NETDEV_DOWN:
687 gid_op = GID_DEL;
688 break;
689
690 default:
691 return NOTIFY_DONE;
692 }
693
694 work = kmalloc(sizeof(*work), GFP_ATOMIC);
695 if (!work)
696 return NOTIFY_DONE;
697
698 INIT_WORK(&work->work, update_gid_event_work_handler);
699
700 rdma_ip2gid(sa, &work->gid);
701 work->gid_op = gid_op;
702
703 memset(&work->gid_attr, 0, sizeof(work->gid_attr));
704 dev_hold(ndev);
705 work->gid_attr.ndev = ndev;
706
707 queue_work(gid_cache_wq, &work->work);
708
709 return NOTIFY_DONE;
710}
711
712static int inetaddr_event(struct notifier_block *this, unsigned long event,
713 void *ptr)
714{
715 struct sockaddr_in in;
716 struct net_device *ndev;
717 struct in_ifaddr *ifa = ptr;
718
719 in.sin_family = AF_INET;
720 in.sin_addr.s_addr = ifa->ifa_address;
721 ndev = ifa->ifa_dev->dev;
722
723 return addr_event(this, event, (struct sockaddr *)&in, ndev);
724}
725
726static int inet6addr_event(struct notifier_block *this, unsigned long event,
727 void *ptr)
728{
729 struct sockaddr_in6 in6;
730 struct net_device *ndev;
731 struct inet6_ifaddr *ifa6 = ptr;
732
733 in6.sin6_family = AF_INET6;
734 in6.sin6_addr = ifa6->addr;
735 ndev = ifa6->idev->dev;
736
737 return addr_event(this, event, (struct sockaddr *)&in6, ndev);
738}
739
740static struct notifier_block nb_netdevice = {
741 .notifier_call = netdevice_event
742};
743
744static struct notifier_block nb_inetaddr = {
745 .notifier_call = inetaddr_event
746};
747
748static struct notifier_block nb_inet6addr = {
749 .notifier_call = inet6addr_event
750};
751
752int __init roce_gid_mgmt_init(void)
753{
754 gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
755 if (!gid_cache_wq)
756 return -ENOMEM;
757
758 register_inetaddr_notifier(&nb_inetaddr);
759 if (IS_ENABLED(CONFIG_IPV6))
760 register_inet6addr_notifier(&nb_inet6addr);
761
762
763
764
765
766 register_netdevice_notifier(&nb_netdevice);
767
768 return 0;
769}
770
771void __exit roce_gid_mgmt_cleanup(void)
772{
773 if (IS_ENABLED(CONFIG_IPV6))
774 unregister_inet6addr_notifier(&nb_inet6addr);
775 unregister_inetaddr_notifier(&nb_inetaddr);
776 unregister_netdevice_notifier(&nb_netdevice);
777
778
779
780
781
782 destroy_workqueue(gid_cache_wq);
783}
784