1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/export.h>
17#include <linux/list.h>
18
19
20
21
22
23static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
24 const unsigned char *addr, int addr_len,
25 unsigned char addr_type, bool global,
26 bool sync)
27{
28 struct netdev_hw_addr *ha;
29 int alloc_size;
30
31 alloc_size = sizeof(*ha);
32 if (alloc_size < L1_CACHE_BYTES)
33 alloc_size = L1_CACHE_BYTES;
34 ha = kmalloc(alloc_size, GFP_ATOMIC);
35 if (!ha)
36 return -ENOMEM;
37 memcpy(ha->addr, addr, addr_len);
38 ha->type = addr_type;
39 ha->refcount = 1;
40 ha->global_use = global;
41 ha->synced = sync ? 1 : 0;
42 ha->sync_cnt = 0;
43 list_add_tail_rcu(&ha->list, &list->list);
44 list->count++;
45
46 return 0;
47}
48
49static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
50 const unsigned char *addr, int addr_len,
51 unsigned char addr_type, bool global, bool sync,
52 int sync_count)
53{
54 struct netdev_hw_addr *ha;
55
56 if (addr_len > MAX_ADDR_LEN)
57 return -EINVAL;
58
59 list_for_each_entry(ha, &list->list, list) {
60 if (ha->type == addr_type &&
61 !memcmp(ha->addr, addr, addr_len)) {
62 if (global) {
63
64 if (ha->global_use)
65 return 0;
66 else
67 ha->global_use = true;
68 }
69 if (sync) {
70 if (ha->synced && sync_count)
71 return -EEXIST;
72 else
73 ha->synced++;
74 }
75 ha->refcount++;
76 return 0;
77 }
78 }
79
80 return __hw_addr_create_ex(list, addr, addr_len, addr_type, global,
81 sync);
82}
83
84static int __hw_addr_add(struct netdev_hw_addr_list *list,
85 const unsigned char *addr, int addr_len,
86 unsigned char addr_type)
87{
88 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false,
89 0);
90}
91
92static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
93 struct netdev_hw_addr *ha, bool global,
94 bool sync)
95{
96 if (global && !ha->global_use)
97 return -ENOENT;
98
99 if (sync && !ha->synced)
100 return -ENOENT;
101
102 if (global)
103 ha->global_use = false;
104
105 if (sync)
106 ha->synced--;
107
108 if (--ha->refcount)
109 return 0;
110 list_del_rcu(&ha->list);
111 kfree_rcu(ha, rcu_head);
112 list->count--;
113 return 0;
114}
115
116static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
117 const unsigned char *addr, int addr_len,
118 unsigned char addr_type, bool global, bool sync)
119{
120 struct netdev_hw_addr *ha;
121
122 list_for_each_entry(ha, &list->list, list) {
123 if (!memcmp(ha->addr, addr, addr_len) &&
124 (ha->type == addr_type || !addr_type))
125 return __hw_addr_del_entry(list, ha, global, sync);
126 }
127 return -ENOENT;
128}
129
130static int __hw_addr_del(struct netdev_hw_addr_list *list,
131 const unsigned char *addr, int addr_len,
132 unsigned char addr_type)
133{
134 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false);
135}
136
137static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
138 struct netdev_hw_addr *ha,
139 int addr_len)
140{
141 int err;
142
143 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
144 false, true, ha->sync_cnt);
145 if (err && err != -EEXIST)
146 return err;
147
148 if (!err) {
149 ha->sync_cnt++;
150 ha->refcount++;
151 }
152
153 return 0;
154}
155
156static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
157 struct netdev_hw_addr_list *from_list,
158 struct netdev_hw_addr *ha,
159 int addr_len)
160{
161 int err;
162
163 err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type,
164 false, true);
165 if (err)
166 return;
167 ha->sync_cnt--;
168
169 __hw_addr_del_entry(from_list, ha, false, false);
170}
171
172static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
173 struct netdev_hw_addr_list *from_list,
174 int addr_len)
175{
176 int err = 0;
177 struct netdev_hw_addr *ha, *tmp;
178
179 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
180 if (ha->sync_cnt == ha->refcount) {
181 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
182 } else {
183 err = __hw_addr_sync_one(to_list, ha, addr_len);
184 if (err)
185 break;
186 }
187 }
188 return err;
189}
190
191
192
193
194
195
196int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
197 struct netdev_hw_addr_list *from_list,
198 int addr_len)
199{
200 int err = 0;
201 struct netdev_hw_addr *ha, *tmp;
202
203 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
204 if (!ha->sync_cnt) {
205 err = __hw_addr_sync_one(to_list, ha, addr_len);
206 if (err)
207 break;
208 } else if (ha->refcount == 1)
209 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
210 }
211 return err;
212}
213EXPORT_SYMBOL(__hw_addr_sync);
214
215void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
216 struct netdev_hw_addr_list *from_list,
217 int addr_len)
218{
219 struct netdev_hw_addr *ha, *tmp;
220
221 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
222 if (ha->sync_cnt)
223 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
224 }
225}
226EXPORT_SYMBOL(__hw_addr_unsync);
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
242 struct net_device *dev,
243 int (*sync)(struct net_device *, const unsigned char *),
244 int (*unsync)(struct net_device *,
245 const unsigned char *))
246{
247 struct netdev_hw_addr *ha, *tmp;
248 int err;
249
250
251 list_for_each_entry_safe(ha, tmp, &list->list, list) {
252 if (!ha->sync_cnt || ha->refcount != 1)
253 continue;
254
255
256 if (unsync && unsync(dev, ha->addr))
257 continue;
258
259 ha->sync_cnt--;
260 __hw_addr_del_entry(list, ha, false, false);
261 }
262
263
264 list_for_each_entry_safe(ha, tmp, &list->list, list) {
265 if (ha->sync_cnt)
266 continue;
267
268 err = sync(dev, ha->addr);
269 if (err)
270 return err;
271
272 ha->sync_cnt++;
273 ha->refcount++;
274 }
275
276 return 0;
277}
278EXPORT_SYMBOL(__hw_addr_sync_dev);
279
280
281
282
283
284
285
286
287
288
289
290
291
292void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
293 struct net_device *dev,
294 int (*unsync)(struct net_device *,
295 const unsigned char *))
296{
297 struct netdev_hw_addr *ha, *tmp;
298
299 list_for_each_entry_safe(ha, tmp, &list->list, list) {
300 if (!ha->sync_cnt)
301 continue;
302
303
304 if (unsync && unsync(dev, ha->addr))
305 continue;
306
307 ha->sync_cnt--;
308 __hw_addr_del_entry(list, ha, false, false);
309 }
310}
311EXPORT_SYMBOL(__hw_addr_unsync_dev);
312
313static void __hw_addr_flush(struct netdev_hw_addr_list *list)
314{
315 struct netdev_hw_addr *ha, *tmp;
316
317 list_for_each_entry_safe(ha, tmp, &list->list, list) {
318 list_del_rcu(&ha->list);
319 kfree_rcu(ha, rcu_head);
320 }
321 list->count = 0;
322}
323
324void __hw_addr_init(struct netdev_hw_addr_list *list)
325{
326 INIT_LIST_HEAD(&list->list);
327 list->count = 0;
328}
329EXPORT_SYMBOL(__hw_addr_init);
330
331
332
333
334
335
336
337
338
339
340
341
342
343void dev_addr_flush(struct net_device *dev)
344{
345
346
347 __hw_addr_flush(&dev->dev_addrs);
348 dev->dev_addr = NULL;
349}
350EXPORT_SYMBOL(dev_addr_flush);
351
352
353
354
355
356
357
358
359
360
361int dev_addr_init(struct net_device *dev)
362{
363 unsigned char addr[MAX_ADDR_LEN];
364 struct netdev_hw_addr *ha;
365 int err;
366
367
368
369 __hw_addr_init(&dev->dev_addrs);
370 memset(addr, 0, sizeof(addr));
371 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
372 NETDEV_HW_ADDR_T_LAN);
373 if (!err) {
374
375
376
377
378 ha = list_first_entry(&dev->dev_addrs.list,
379 struct netdev_hw_addr, list);
380 dev->dev_addr = ha->addr;
381 }
382 return err;
383}
384EXPORT_SYMBOL(dev_addr_init);
385
386
387
388
389
390
391
392
393
394
395
396
397int dev_addr_add(struct net_device *dev, const unsigned char *addr,
398 unsigned char addr_type)
399{
400 int err;
401
402 ASSERT_RTNL();
403
404 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
405 if (!err)
406 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
407 return err;
408}
409EXPORT_SYMBOL(dev_addr_add);
410
411
412
413
414
415
416
417
418
419
420
421
422int dev_addr_del(struct net_device *dev, const unsigned char *addr,
423 unsigned char addr_type)
424{
425 int err;
426 struct netdev_hw_addr *ha;
427
428 ASSERT_RTNL();
429
430
431
432
433
434 ha = list_first_entry(&dev->dev_addrs.list,
435 struct netdev_hw_addr, list);
436 if (!memcmp(ha->addr, addr, dev->addr_len) &&
437 ha->type == addr_type && ha->refcount == 1)
438 return -ENOENT;
439
440 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
441 addr_type);
442 if (!err)
443 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
444 return err;
445}
446EXPORT_SYMBOL(dev_addr_del);
447
448
449
450
451
452
453
454
455
456
457int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
458{
459 struct netdev_hw_addr *ha;
460 int err;
461
462 netif_addr_lock_bh(dev);
463 list_for_each_entry(ha, &dev->uc.list, list) {
464 if (!memcmp(ha->addr, addr, dev->addr_len) &&
465 ha->type == NETDEV_HW_ADDR_T_UNICAST) {
466 err = -EEXIST;
467 goto out;
468 }
469 }
470 err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
471 NETDEV_HW_ADDR_T_UNICAST, true, false);
472 if (!err)
473 __dev_set_rx_mode(dev);
474out:
475 netif_addr_unlock_bh(dev);
476 return err;
477}
478EXPORT_SYMBOL(dev_uc_add_excl);
479
480
481
482
483
484
485
486
487
488int dev_uc_add(struct net_device *dev, const unsigned char *addr)
489{
490 int err;
491
492 netif_addr_lock_bh(dev);
493 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
494 NETDEV_HW_ADDR_T_UNICAST);
495 if (!err)
496 __dev_set_rx_mode(dev);
497 netif_addr_unlock_bh(dev);
498 return err;
499}
500EXPORT_SYMBOL(dev_uc_add);
501
502
503
504
505
506
507
508
509
510int dev_uc_del(struct net_device *dev, const unsigned char *addr)
511{
512 int err;
513
514 netif_addr_lock_bh(dev);
515 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
516 NETDEV_HW_ADDR_T_UNICAST);
517 if (!err)
518 __dev_set_rx_mode(dev);
519 netif_addr_unlock_bh(dev);
520 return err;
521}
522EXPORT_SYMBOL(dev_uc_del);
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537int dev_uc_sync(struct net_device *to, struct net_device *from)
538{
539 int err = 0;
540
541 if (to->addr_len != from->addr_len)
542 return -EINVAL;
543
544 netif_addr_lock_nested(to);
545 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
546 if (!err)
547 __dev_set_rx_mode(to);
548 netif_addr_unlock(to);
549 return err;
550}
551EXPORT_SYMBOL(dev_uc_sync);
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
568{
569 int err = 0;
570
571 if (to->addr_len != from->addr_len)
572 return -EINVAL;
573
574 netif_addr_lock_nested(to);
575 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
576 if (!err)
577 __dev_set_rx_mode(to);
578 netif_addr_unlock(to);
579 return err;
580}
581EXPORT_SYMBOL(dev_uc_sync_multiple);
582
583
584
585
586
587
588
589
590
591
592void dev_uc_unsync(struct net_device *to, struct net_device *from)
593{
594 if (to->addr_len != from->addr_len)
595 return;
596
597 netif_addr_lock_bh(from);
598 netif_addr_lock_nested(to);
599 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
600 __dev_set_rx_mode(to);
601 netif_addr_unlock(to);
602 netif_addr_unlock_bh(from);
603}
604EXPORT_SYMBOL(dev_uc_unsync);
605
606
607
608
609
610
611
612void dev_uc_flush(struct net_device *dev)
613{
614 netif_addr_lock_bh(dev);
615 __hw_addr_flush(&dev->uc);
616 netif_addr_unlock_bh(dev);
617}
618EXPORT_SYMBOL(dev_uc_flush);
619
620
621
622
623
624
625
626void dev_uc_init(struct net_device *dev)
627{
628 __hw_addr_init(&dev->uc);
629}
630EXPORT_SYMBOL(dev_uc_init);
631
632
633
634
635
636
637
638
639
640
641int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
642{
643 struct netdev_hw_addr *ha;
644 int err;
645
646 netif_addr_lock_bh(dev);
647 list_for_each_entry(ha, &dev->mc.list, list) {
648 if (!memcmp(ha->addr, addr, dev->addr_len) &&
649 ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
650 err = -EEXIST;
651 goto out;
652 }
653 }
654 err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
655 NETDEV_HW_ADDR_T_MULTICAST, true, false);
656 if (!err)
657 __dev_set_rx_mode(dev);
658out:
659 netif_addr_unlock_bh(dev);
660 return err;
661}
662EXPORT_SYMBOL(dev_mc_add_excl);
663
664static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
665 bool global)
666{
667 int err;
668
669 netif_addr_lock_bh(dev);
670 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
671 NETDEV_HW_ADDR_T_MULTICAST, global, false, 0);
672 if (!err)
673 __dev_set_rx_mode(dev);
674 netif_addr_unlock_bh(dev);
675 return err;
676}
677
678
679
680
681
682
683
684
685int dev_mc_add(struct net_device *dev, const unsigned char *addr)
686{
687 return __dev_mc_add(dev, addr, false);
688}
689EXPORT_SYMBOL(dev_mc_add);
690
691
692
693
694
695
696
697
698int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
699{
700 return __dev_mc_add(dev, addr, true);
701}
702EXPORT_SYMBOL(dev_mc_add_global);
703
704static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
705 bool global)
706{
707 int err;
708
709 netif_addr_lock_bh(dev);
710 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
711 NETDEV_HW_ADDR_T_MULTICAST, global, false);
712 if (!err)
713 __dev_set_rx_mode(dev);
714 netif_addr_unlock_bh(dev);
715 return err;
716}
717
718
719
720
721
722
723
724
725
726int dev_mc_del(struct net_device *dev, const unsigned char *addr)
727{
728 return __dev_mc_del(dev, addr, false);
729}
730EXPORT_SYMBOL(dev_mc_del);
731
732
733
734
735
736
737
738
739
740int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
741{
742 return __dev_mc_del(dev, addr, true);
743}
744EXPORT_SYMBOL(dev_mc_del_global);
745
746
747
748
749
750
751
752
753
754
755
756
757
758int dev_mc_sync(struct net_device *to, struct net_device *from)
759{
760 int err = 0;
761
762 if (to->addr_len != from->addr_len)
763 return -EINVAL;
764
765 netif_addr_lock_nested(to);
766 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
767 if (!err)
768 __dev_set_rx_mode(to);
769 netif_addr_unlock(to);
770 return err;
771}
772EXPORT_SYMBOL(dev_mc_sync);
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
789{
790 int err = 0;
791
792 if (to->addr_len != from->addr_len)
793 return -EINVAL;
794
795 netif_addr_lock_nested(to);
796 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
797 if (!err)
798 __dev_set_rx_mode(to);
799 netif_addr_unlock(to);
800 return err;
801}
802EXPORT_SYMBOL(dev_mc_sync_multiple);
803
804
805
806
807
808
809
810
811
812
813void dev_mc_unsync(struct net_device *to, struct net_device *from)
814{
815 if (to->addr_len != from->addr_len)
816 return;
817
818 netif_addr_lock_bh(from);
819 netif_addr_lock_nested(to);
820 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
821 __dev_set_rx_mode(to);
822 netif_addr_unlock(to);
823 netif_addr_unlock_bh(from);
824}
825EXPORT_SYMBOL(dev_mc_unsync);
826
827
828
829
830
831
832
833void dev_mc_flush(struct net_device *dev)
834{
835 netif_addr_lock_bh(dev);
836 __hw_addr_flush(&dev->mc);
837 netif_addr_unlock_bh(dev);
838}
839EXPORT_SYMBOL(dev_mc_flush);
840
841
842
843
844
845
846
847void dev_mc_init(struct net_device *dev)
848{
849 __hw_addr_init(&dev->mc);
850}
851EXPORT_SYMBOL(dev_mc_init);
852