1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/export.h>
17#include <linux/list.h>
18
19
20
21
22
23static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
24 const unsigned char *addr, int addr_len,
25 unsigned char addr_type, bool global,
26 bool sync)
27{
28 struct netdev_hw_addr *ha;
29 int alloc_size;
30
31 alloc_size = sizeof(*ha);
32 if (alloc_size < L1_CACHE_BYTES)
33 alloc_size = L1_CACHE_BYTES;
34 ha = kmalloc(alloc_size, GFP_ATOMIC);
35 if (!ha)
36 return -ENOMEM;
37 memcpy(ha->addr, addr, addr_len);
38 ha->type = addr_type;
39 ha->refcount = 1;
40 ha->global_use = global;
41 ha->synced = sync;
42 ha->sync_cnt = 0;
43 list_add_tail_rcu(&ha->list, &list->list);
44 list->count++;
45
46 return 0;
47}
48
49static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
50 const unsigned char *addr, int addr_len,
51 unsigned char addr_type, bool global, bool sync)
52{
53 struct netdev_hw_addr *ha;
54
55 if (addr_len > MAX_ADDR_LEN)
56 return -EINVAL;
57
58 list_for_each_entry(ha, &list->list, list) {
59 if (!memcmp(ha->addr, addr, addr_len) &&
60 ha->type == addr_type) {
61 if (global) {
62
63 if (ha->global_use)
64 return 0;
65 else
66 ha->global_use = true;
67 }
68 if (sync) {
69 if (ha->synced)
70 return -EEXIST;
71 else
72 ha->synced = true;
73 }
74 ha->refcount++;
75 return 0;
76 }
77 }
78
79 return __hw_addr_create_ex(list, addr, addr_len, addr_type, global,
80 sync);
81}
82
83static int __hw_addr_add(struct netdev_hw_addr_list *list,
84 const unsigned char *addr, int addr_len,
85 unsigned char addr_type)
86{
87 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false);
88}
89
90static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
91 struct netdev_hw_addr *ha, bool global,
92 bool sync)
93{
94 if (global && !ha->global_use)
95 return -ENOENT;
96
97 if (sync && !ha->synced)
98 return -ENOENT;
99
100 if (global)
101 ha->global_use = false;
102
103 if (sync)
104 ha->synced = false;
105
106 if (--ha->refcount)
107 return 0;
108 list_del_rcu(&ha->list);
109 kfree_rcu(ha, rcu_head);
110 list->count--;
111 return 0;
112}
113
114static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
115 const unsigned char *addr, int addr_len,
116 unsigned char addr_type, bool global, bool sync)
117{
118 struct netdev_hw_addr *ha;
119
120 list_for_each_entry(ha, &list->list, list) {
121 if (!memcmp(ha->addr, addr, addr_len) &&
122 (ha->type == addr_type || !addr_type))
123 return __hw_addr_del_entry(list, ha, global, sync);
124 }
125 return -ENOENT;
126}
127
128static int __hw_addr_del(struct netdev_hw_addr_list *list,
129 const unsigned char *addr, int addr_len,
130 unsigned char addr_type)
131{
132 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false);
133}
134
135static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
136 struct netdev_hw_addr *ha,
137 int addr_len)
138{
139 int err;
140
141 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
142 false, true);
143 if (err && err != -EEXIST)
144 return err;
145
146 if (!err) {
147 ha->sync_cnt++;
148 ha->refcount++;
149 }
150
151 return 0;
152}
153
154static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
155 struct netdev_hw_addr_list *from_list,
156 struct netdev_hw_addr *ha,
157 int addr_len)
158{
159 int err;
160
161 err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type,
162 false, true);
163 if (err)
164 return;
165 ha->sync_cnt--;
166
167 __hw_addr_del_entry(from_list, ha, false, false);
168}
169
170static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
171 struct netdev_hw_addr_list *from_list,
172 int addr_len)
173{
174 int err = 0;
175 struct netdev_hw_addr *ha, *tmp;
176
177 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
178 if (ha->sync_cnt == ha->refcount) {
179 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
180 } else {
181 err = __hw_addr_sync_one(to_list, ha, addr_len);
182 if (err)
183 break;
184 }
185 }
186 return err;
187}
188
189int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
190 struct netdev_hw_addr_list *from_list,
191 int addr_len, unsigned char addr_type)
192{
193 int err;
194 struct netdev_hw_addr *ha, *ha2;
195 unsigned char type;
196
197 list_for_each_entry(ha, &from_list->list, list) {
198 type = addr_type ? addr_type : ha->type;
199 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
200 if (err)
201 goto unroll;
202 }
203 return 0;
204
205unroll:
206 list_for_each_entry(ha2, &from_list->list, list) {
207 if (ha2 == ha)
208 break;
209 type = addr_type ? addr_type : ha2->type;
210 __hw_addr_del(to_list, ha2->addr, addr_len, type);
211 }
212 return err;
213}
214EXPORT_SYMBOL(__hw_addr_add_multiple);
215
216void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
217 struct netdev_hw_addr_list *from_list,
218 int addr_len, unsigned char addr_type)
219{
220 struct netdev_hw_addr *ha;
221 unsigned char type;
222
223 list_for_each_entry(ha, &from_list->list, list) {
224 type = addr_type ? addr_type : ha->type;
225 __hw_addr_del(to_list, ha->addr, addr_len, type);
226 }
227}
228EXPORT_SYMBOL(__hw_addr_del_multiple);
229
230
231
232
233
234
235int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
236 struct netdev_hw_addr_list *from_list,
237 int addr_len)
238{
239 int err = 0;
240 struct netdev_hw_addr *ha, *tmp;
241
242 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
243 if (!ha->sync_cnt) {
244 err = __hw_addr_sync_one(to_list, ha, addr_len);
245 if (err)
246 break;
247 } else if (ha->refcount == 1)
248 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
249 }
250 return err;
251}
252EXPORT_SYMBOL(__hw_addr_sync);
253
254void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
255 struct netdev_hw_addr_list *from_list,
256 int addr_len)
257{
258 struct netdev_hw_addr *ha, *tmp;
259
260 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
261 if (ha->sync_cnt)
262 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
263 }
264}
265EXPORT_SYMBOL(__hw_addr_unsync);
266
267void __hw_addr_flush(struct netdev_hw_addr_list *list)
268{
269 struct netdev_hw_addr *ha, *tmp;
270
271 list_for_each_entry_safe(ha, tmp, &list->list, list) {
272 list_del_rcu(&ha->list);
273 kfree_rcu(ha, rcu_head);
274 }
275 list->count = 0;
276}
277EXPORT_SYMBOL(__hw_addr_flush);
278
279void __hw_addr_init(struct netdev_hw_addr_list *list)
280{
281 INIT_LIST_HEAD(&list->list);
282 list->count = 0;
283}
284EXPORT_SYMBOL(__hw_addr_init);
285
286
287
288
289
290
291
292
293
294
295
296
297
298void dev_addr_flush(struct net_device *dev)
299{
300
301
302 __hw_addr_flush(&dev->dev_addrs);
303 dev->dev_addr = NULL;
304}
305EXPORT_SYMBOL(dev_addr_flush);
306
307
308
309
310
311
312
313
314
315
316int dev_addr_init(struct net_device *dev)
317{
318 unsigned char addr[MAX_ADDR_LEN];
319 struct netdev_hw_addr *ha;
320 int err;
321
322
323
324 __hw_addr_init(&dev->dev_addrs);
325 memset(addr, 0, sizeof(addr));
326 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
327 NETDEV_HW_ADDR_T_LAN);
328 if (!err) {
329
330
331
332
333 ha = list_first_entry(&dev->dev_addrs.list,
334 struct netdev_hw_addr, list);
335 dev->dev_addr = ha->addr;
336 }
337 return err;
338}
339EXPORT_SYMBOL(dev_addr_init);
340
341
342
343
344
345
346
347
348
349
350
351
352int dev_addr_add(struct net_device *dev, const unsigned char *addr,
353 unsigned char addr_type)
354{
355 int err;
356
357 ASSERT_RTNL();
358
359 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
360 if (!err)
361 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
362 return err;
363}
364EXPORT_SYMBOL(dev_addr_add);
365
366
367
368
369
370
371
372
373
374
375
376
377int dev_addr_del(struct net_device *dev, const unsigned char *addr,
378 unsigned char addr_type)
379{
380 int err;
381 struct netdev_hw_addr *ha;
382
383 ASSERT_RTNL();
384
385
386
387
388
389 ha = list_first_entry(&dev->dev_addrs.list,
390 struct netdev_hw_addr, list);
391 if (!memcmp(ha->addr, addr, dev->addr_len) &&
392 ha->type == addr_type && ha->refcount == 1)
393 return -ENOENT;
394
395 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
396 addr_type);
397 if (!err)
398 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
399 return err;
400}
401EXPORT_SYMBOL(dev_addr_del);
402
403
404
405
406
407
408
409
410
411
412
413int dev_addr_add_multiple(struct net_device *to_dev,
414 struct net_device *from_dev,
415 unsigned char addr_type)
416{
417 int err;
418
419 ASSERT_RTNL();
420
421 if (from_dev->addr_len != to_dev->addr_len)
422 return -EINVAL;
423 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
424 to_dev->addr_len, addr_type);
425 if (!err)
426 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
427 return err;
428}
429EXPORT_SYMBOL(dev_addr_add_multiple);
430
431
432
433
434
435
436
437
438
439
440
441int dev_addr_del_multiple(struct net_device *to_dev,
442 struct net_device *from_dev,
443 unsigned char addr_type)
444{
445 ASSERT_RTNL();
446
447 if (from_dev->addr_len != to_dev->addr_len)
448 return -EINVAL;
449 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
450 to_dev->addr_len, addr_type);
451 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
452 return 0;
453}
454EXPORT_SYMBOL(dev_addr_del_multiple);
455
456
457
458
459
460
461
462
463
464
465int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
466{
467 struct netdev_hw_addr *ha;
468 int err;
469
470 netif_addr_lock_bh(dev);
471 list_for_each_entry(ha, &dev->uc.list, list) {
472 if (!memcmp(ha->addr, addr, dev->addr_len) &&
473 ha->type == NETDEV_HW_ADDR_T_UNICAST) {
474 err = -EEXIST;
475 goto out;
476 }
477 }
478 err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
479 NETDEV_HW_ADDR_T_UNICAST, true, false);
480 if (!err)
481 __dev_set_rx_mode(dev);
482out:
483 netif_addr_unlock_bh(dev);
484 return err;
485}
486EXPORT_SYMBOL(dev_uc_add_excl);
487
488
489
490
491
492
493
494
495
496int dev_uc_add(struct net_device *dev, const unsigned char *addr)
497{
498 int err;
499
500 netif_addr_lock_bh(dev);
501 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
502 NETDEV_HW_ADDR_T_UNICAST);
503 if (!err)
504 __dev_set_rx_mode(dev);
505 netif_addr_unlock_bh(dev);
506 return err;
507}
508EXPORT_SYMBOL(dev_uc_add);
509
510
511
512
513
514
515
516
517
518int dev_uc_del(struct net_device *dev, const unsigned char *addr)
519{
520 int err;
521
522 netif_addr_lock_bh(dev);
523 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
524 NETDEV_HW_ADDR_T_UNICAST);
525 if (!err)
526 __dev_set_rx_mode(dev);
527 netif_addr_unlock_bh(dev);
528 return err;
529}
530EXPORT_SYMBOL(dev_uc_del);
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545int dev_uc_sync(struct net_device *to, struct net_device *from)
546{
547 int err = 0;
548
549 if (to->addr_len != from->addr_len)
550 return -EINVAL;
551
552 netif_addr_lock_nested(to);
553 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
554 if (!err)
555 __dev_set_rx_mode(to);
556 netif_addr_unlock(to);
557 return err;
558}
559EXPORT_SYMBOL(dev_uc_sync);
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
576{
577 int err = 0;
578
579 if (to->addr_len != from->addr_len)
580 return -EINVAL;
581
582 netif_addr_lock_nested(to);
583 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
584 if (!err)
585 __dev_set_rx_mode(to);
586 netif_addr_unlock(to);
587 return err;
588}
589EXPORT_SYMBOL(dev_uc_sync_multiple);
590
591
592
593
594
595
596
597
598
599
600void dev_uc_unsync(struct net_device *to, struct net_device *from)
601{
602 if (to->addr_len != from->addr_len)
603 return;
604
605 netif_addr_lock_bh(from);
606 netif_addr_lock_nested(to);
607 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
608 __dev_set_rx_mode(to);
609 netif_addr_unlock(to);
610 netif_addr_unlock_bh(from);
611}
612EXPORT_SYMBOL(dev_uc_unsync);
613
614
615
616
617
618
619
620void dev_uc_flush(struct net_device *dev)
621{
622 netif_addr_lock_bh(dev);
623 __hw_addr_flush(&dev->uc);
624 netif_addr_unlock_bh(dev);
625}
626EXPORT_SYMBOL(dev_uc_flush);
627
628
629
630
631
632
633
634void dev_uc_init(struct net_device *dev)
635{
636 __hw_addr_init(&dev->uc);
637}
638EXPORT_SYMBOL(dev_uc_init);
639
640
641
642
643
644
645
646
647
648
649int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
650{
651 struct netdev_hw_addr *ha;
652 int err;
653
654 netif_addr_lock_bh(dev);
655 list_for_each_entry(ha, &dev->mc.list, list) {
656 if (!memcmp(ha->addr, addr, dev->addr_len) &&
657 ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
658 err = -EEXIST;
659 goto out;
660 }
661 }
662 err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
663 NETDEV_HW_ADDR_T_MULTICAST, true, false);
664 if (!err)
665 __dev_set_rx_mode(dev);
666out:
667 netif_addr_unlock_bh(dev);
668 return err;
669}
670EXPORT_SYMBOL(dev_mc_add_excl);
671
672static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
673 bool global)
674{
675 int err;
676
677 netif_addr_lock_bh(dev);
678 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
679 NETDEV_HW_ADDR_T_MULTICAST, global, false);
680 if (!err)
681 __dev_set_rx_mode(dev);
682 netif_addr_unlock_bh(dev);
683 return err;
684}
685
686
687
688
689
690
691
692
693int dev_mc_add(struct net_device *dev, const unsigned char *addr)
694{
695 return __dev_mc_add(dev, addr, false);
696}
697EXPORT_SYMBOL(dev_mc_add);
698
699
700
701
702
703
704
705
706int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
707{
708 return __dev_mc_add(dev, addr, true);
709}
710EXPORT_SYMBOL(dev_mc_add_global);
711
712static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
713 bool global)
714{
715 int err;
716
717 netif_addr_lock_bh(dev);
718 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
719 NETDEV_HW_ADDR_T_MULTICAST, global, false);
720 if (!err)
721 __dev_set_rx_mode(dev);
722 netif_addr_unlock_bh(dev);
723 return err;
724}
725
726
727
728
729
730
731
732
733
734int dev_mc_del(struct net_device *dev, const unsigned char *addr)
735{
736 return __dev_mc_del(dev, addr, false);
737}
738EXPORT_SYMBOL(dev_mc_del);
739
740
741
742
743
744
745
746
747
748int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
749{
750 return __dev_mc_del(dev, addr, true);
751}
752EXPORT_SYMBOL(dev_mc_del_global);
753
754
755
756
757
758
759
760
761
762
763
764
765
766int dev_mc_sync(struct net_device *to, struct net_device *from)
767{
768 int err = 0;
769
770 if (to->addr_len != from->addr_len)
771 return -EINVAL;
772
773 netif_addr_lock_nested(to);
774 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
775 if (!err)
776 __dev_set_rx_mode(to);
777 netif_addr_unlock(to);
778 return err;
779}
780EXPORT_SYMBOL(dev_mc_sync);
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
797{
798 int err = 0;
799
800 if (to->addr_len != from->addr_len)
801 return -EINVAL;
802
803 netif_addr_lock_nested(to);
804 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
805 if (!err)
806 __dev_set_rx_mode(to);
807 netif_addr_unlock(to);
808 return err;
809}
810EXPORT_SYMBOL(dev_mc_sync_multiple);
811
812
813
814
815
816
817
818
819
820
821void dev_mc_unsync(struct net_device *to, struct net_device *from)
822{
823 if (to->addr_len != from->addr_len)
824 return;
825
826 netif_addr_lock_bh(from);
827 netif_addr_lock_nested(to);
828 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
829 __dev_set_rx_mode(to);
830 netif_addr_unlock(to);
831 netif_addr_unlock_bh(from);
832}
833EXPORT_SYMBOL(dev_mc_unsync);
834
835
836
837
838
839
840
841void dev_mc_flush(struct net_device *dev)
842{
843 netif_addr_lock_bh(dev);
844 __hw_addr_flush(&dev->mc);
845 netif_addr_unlock_bh(dev);
846}
847EXPORT_SYMBOL(dev_mc_flush);
848
849
850
851
852
853
854
855void dev_mc_init(struct net_device *dev)
856{
857 __hw_addr_init(&dev->mc);
858}
859EXPORT_SYMBOL(dev_mc_init);
860