1
2
3
4
5
6
7
8
9
10#include <linux/netdevice.h>
11#include <linux/rtnetlink.h>
12#include <linux/export.h>
13#include <linux/list.h>
14
15
16
17
18
19static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
20 const unsigned char *addr, int addr_len,
21 unsigned char addr_type, bool global,
22 bool sync)
23{
24 struct netdev_hw_addr *ha;
25 int alloc_size;
26
27 alloc_size = sizeof(*ha);
28 if (alloc_size < L1_CACHE_BYTES)
29 alloc_size = L1_CACHE_BYTES;
30 ha = kmalloc(alloc_size, GFP_ATOMIC);
31 if (!ha)
32 return -ENOMEM;
33 memcpy(ha->addr, addr, addr_len);
34 ha->type = addr_type;
35 ha->refcount = 1;
36 ha->global_use = global;
37 ha->synced = sync ? 1 : 0;
38 ha->sync_cnt = 0;
39 list_add_tail_rcu(&ha->list, &list->list);
40 list->count++;
41
42 return 0;
43}
44
45static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
46 const unsigned char *addr, int addr_len,
47 unsigned char addr_type, bool global, bool sync,
48 int sync_count)
49{
50 struct netdev_hw_addr *ha;
51
52 if (addr_len > MAX_ADDR_LEN)
53 return -EINVAL;
54
55 list_for_each_entry(ha, &list->list, list) {
56 if (ha->type == addr_type &&
57 !memcmp(ha->addr, addr, addr_len)) {
58 if (global) {
59
60 if (ha->global_use)
61 return 0;
62 else
63 ha->global_use = true;
64 }
65 if (sync) {
66 if (ha->synced && sync_count)
67 return -EEXIST;
68 else
69 ha->synced++;
70 }
71 ha->refcount++;
72 return 0;
73 }
74 }
75
76 return __hw_addr_create_ex(list, addr, addr_len, addr_type, global,
77 sync);
78}
79
80static int __hw_addr_add(struct netdev_hw_addr_list *list,
81 const unsigned char *addr, int addr_len,
82 unsigned char addr_type)
83{
84 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false,
85 0);
86}
87
88static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
89 struct netdev_hw_addr *ha, bool global,
90 bool sync)
91{
92 if (global && !ha->global_use)
93 return -ENOENT;
94
95 if (sync && !ha->synced)
96 return -ENOENT;
97
98 if (global)
99 ha->global_use = false;
100
101 if (sync)
102 ha->synced--;
103
104 if (--ha->refcount)
105 return 0;
106 list_del_rcu(&ha->list);
107 kfree_rcu(ha, rcu_head);
108 list->count--;
109 return 0;
110}
111
112static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
113 const unsigned char *addr, int addr_len,
114 unsigned char addr_type, bool global, bool sync)
115{
116 struct netdev_hw_addr *ha;
117
118 list_for_each_entry(ha, &list->list, list) {
119 if (!memcmp(ha->addr, addr, addr_len) &&
120 (ha->type == addr_type || !addr_type))
121 return __hw_addr_del_entry(list, ha, global, sync);
122 }
123 return -ENOENT;
124}
125
126static int __hw_addr_del(struct netdev_hw_addr_list *list,
127 const unsigned char *addr, int addr_len,
128 unsigned char addr_type)
129{
130 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false);
131}
132
133static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
134 struct netdev_hw_addr *ha,
135 int addr_len)
136{
137 int err;
138
139 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
140 false, true, ha->sync_cnt);
141 if (err && err != -EEXIST)
142 return err;
143
144 if (!err) {
145 ha->sync_cnt++;
146 ha->refcount++;
147 }
148
149 return 0;
150}
151
152static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
153 struct netdev_hw_addr_list *from_list,
154 struct netdev_hw_addr *ha,
155 int addr_len)
156{
157 int err;
158
159 err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type,
160 false, true);
161 if (err)
162 return;
163 ha->sync_cnt--;
164
165 __hw_addr_del_entry(from_list, ha, false, false);
166}
167
168static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
169 struct netdev_hw_addr_list *from_list,
170 int addr_len)
171{
172 int err = 0;
173 struct netdev_hw_addr *ha, *tmp;
174
175 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
176 if (ha->sync_cnt == ha->refcount) {
177 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
178 } else {
179 err = __hw_addr_sync_one(to_list, ha, addr_len);
180 if (err)
181 break;
182 }
183 }
184 return err;
185}
186
187
188
189
190
191
192int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
193 struct netdev_hw_addr_list *from_list,
194 int addr_len)
195{
196 int err = 0;
197 struct netdev_hw_addr *ha, *tmp;
198
199 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
200 if (!ha->sync_cnt) {
201 err = __hw_addr_sync_one(to_list, ha, addr_len);
202 if (err)
203 break;
204 } else if (ha->refcount == 1)
205 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
206 }
207 return err;
208}
209EXPORT_SYMBOL(__hw_addr_sync);
210
211void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
212 struct netdev_hw_addr_list *from_list,
213 int addr_len)
214{
215 struct netdev_hw_addr *ha, *tmp;
216
217 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
218 if (ha->sync_cnt)
219 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
220 }
221}
222EXPORT_SYMBOL(__hw_addr_unsync);
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
238 struct net_device *dev,
239 int (*sync)(struct net_device *, const unsigned char *),
240 int (*unsync)(struct net_device *,
241 const unsigned char *))
242{
243 struct netdev_hw_addr *ha, *tmp;
244 int err;
245
246
247 list_for_each_entry_safe(ha, tmp, &list->list, list) {
248 if (!ha->sync_cnt || ha->refcount != 1)
249 continue;
250
251
252 if (unsync && unsync(dev, ha->addr))
253 continue;
254
255 ha->sync_cnt--;
256 __hw_addr_del_entry(list, ha, false, false);
257 }
258
259
260 list_for_each_entry_safe(ha, tmp, &list->list, list) {
261 if (ha->sync_cnt)
262 continue;
263
264 err = sync(dev, ha->addr);
265 if (err)
266 return err;
267
268 ha->sync_cnt++;
269 ha->refcount++;
270 }
271
272 return 0;
273}
274EXPORT_SYMBOL(__hw_addr_sync_dev);
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
293 struct net_device *dev,
294 int (*sync)(struct net_device *,
295 const unsigned char *, int),
296 int (*unsync)(struct net_device *,
297 const unsigned char *, int))
298{
299 struct netdev_hw_addr *ha, *tmp;
300 int err, ref_cnt;
301
302
303 list_for_each_entry_safe(ha, tmp, &list->list, list) {
304
305 if ((ha->sync_cnt << 1) <= ha->refcount)
306 continue;
307
308
309 ref_cnt = ha->refcount - ha->sync_cnt;
310 if (unsync && unsync(dev, ha->addr, ref_cnt))
311 continue;
312
313 ha->refcount = (ref_cnt << 1) + 1;
314 ha->sync_cnt = ref_cnt;
315 __hw_addr_del_entry(list, ha, false, false);
316 }
317
318
319 list_for_each_entry_safe(ha, tmp, &list->list, list) {
320
321 if ((ha->sync_cnt << 1) >= ha->refcount)
322 continue;
323
324 ref_cnt = ha->refcount - ha->sync_cnt;
325 err = sync(dev, ha->addr, ref_cnt);
326 if (err)
327 return err;
328
329 ha->refcount = ref_cnt << 1;
330 ha->sync_cnt = ref_cnt;
331 }
332
333 return 0;
334}
335EXPORT_SYMBOL(__hw_addr_ref_sync_dev);
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
352 struct net_device *dev,
353 int (*unsync)(struct net_device *,
354 const unsigned char *, int))
355{
356 struct netdev_hw_addr *ha, *tmp;
357
358 list_for_each_entry_safe(ha, tmp, &list->list, list) {
359 if (!ha->sync_cnt)
360 continue;
361
362
363 if (unsync && unsync(dev, ha->addr, ha->sync_cnt))
364 continue;
365
366 ha->refcount -= ha->sync_cnt - 1;
367 ha->sync_cnt = 0;
368 __hw_addr_del_entry(list, ha, false, false);
369 }
370}
371EXPORT_SYMBOL(__hw_addr_ref_unsync_dev);
372
373
374
375
376
377
378
379
380
381
382
383
384
385void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
386 struct net_device *dev,
387 int (*unsync)(struct net_device *,
388 const unsigned char *))
389{
390 struct netdev_hw_addr *ha, *tmp;
391
392 list_for_each_entry_safe(ha, tmp, &list->list, list) {
393 if (!ha->sync_cnt)
394 continue;
395
396
397 if (unsync && unsync(dev, ha->addr))
398 continue;
399
400 ha->sync_cnt--;
401 __hw_addr_del_entry(list, ha, false, false);
402 }
403}
404EXPORT_SYMBOL(__hw_addr_unsync_dev);
405
406static void __hw_addr_flush(struct netdev_hw_addr_list *list)
407{
408 struct netdev_hw_addr *ha, *tmp;
409
410 list_for_each_entry_safe(ha, tmp, &list->list, list) {
411 list_del_rcu(&ha->list);
412 kfree_rcu(ha, rcu_head);
413 }
414 list->count = 0;
415}
416
417void __hw_addr_init(struct netdev_hw_addr_list *list)
418{
419 INIT_LIST_HEAD(&list->list);
420 list->count = 0;
421}
422EXPORT_SYMBOL(__hw_addr_init);
423
424
425
426
427
428
429
430
431
432
433
434
435
436void dev_addr_flush(struct net_device *dev)
437{
438
439
440 __hw_addr_flush(&dev->dev_addrs);
441 dev->dev_addr = NULL;
442}
443EXPORT_SYMBOL(dev_addr_flush);
444
445
446
447
448
449
450
451
452
453
454int dev_addr_init(struct net_device *dev)
455{
456 unsigned char addr[MAX_ADDR_LEN];
457 struct netdev_hw_addr *ha;
458 int err;
459
460
461
462 __hw_addr_init(&dev->dev_addrs);
463 memset(addr, 0, sizeof(addr));
464 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
465 NETDEV_HW_ADDR_T_LAN);
466 if (!err) {
467
468
469
470
471 ha = list_first_entry(&dev->dev_addrs.list,
472 struct netdev_hw_addr, list);
473 dev->dev_addr = ha->addr;
474 }
475 return err;
476}
477EXPORT_SYMBOL(dev_addr_init);
478
479
480
481
482
483
484
485
486
487
488
489
490int dev_addr_add(struct net_device *dev, const unsigned char *addr,
491 unsigned char addr_type)
492{
493 int err;
494
495 ASSERT_RTNL();
496
497 err = dev_pre_changeaddr_notify(dev, addr, NULL);
498 if (err)
499 return err;
500 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
501 if (!err)
502 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
503 return err;
504}
505EXPORT_SYMBOL(dev_addr_add);
506
507
508
509
510
511
512
513
514
515
516
517
518int dev_addr_del(struct net_device *dev, const unsigned char *addr,
519 unsigned char addr_type)
520{
521 int err;
522 struct netdev_hw_addr *ha;
523
524 ASSERT_RTNL();
525
526
527
528
529
530 ha = list_first_entry(&dev->dev_addrs.list,
531 struct netdev_hw_addr, list);
532 if (!memcmp(ha->addr, addr, dev->addr_len) &&
533 ha->type == addr_type && ha->refcount == 1)
534 return -ENOENT;
535
536 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
537 addr_type);
538 if (!err)
539 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
540 return err;
541}
542EXPORT_SYMBOL(dev_addr_del);
543
544
545
546
547
548
549
550
551
552
553int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
554{
555 struct netdev_hw_addr *ha;
556 int err;
557
558 netif_addr_lock_bh(dev);
559 list_for_each_entry(ha, &dev->uc.list, list) {
560 if (!memcmp(ha->addr, addr, dev->addr_len) &&
561 ha->type == NETDEV_HW_ADDR_T_UNICAST) {
562 err = -EEXIST;
563 goto out;
564 }
565 }
566 err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
567 NETDEV_HW_ADDR_T_UNICAST, true, false);
568 if (!err)
569 __dev_set_rx_mode(dev);
570out:
571 netif_addr_unlock_bh(dev);
572 return err;
573}
574EXPORT_SYMBOL(dev_uc_add_excl);
575
576
577
578
579
580
581
582
583
584int dev_uc_add(struct net_device *dev, const unsigned char *addr)
585{
586 int err;
587
588 netif_addr_lock_bh(dev);
589 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
590 NETDEV_HW_ADDR_T_UNICAST);
591 if (!err)
592 __dev_set_rx_mode(dev);
593 netif_addr_unlock_bh(dev);
594 return err;
595}
596EXPORT_SYMBOL(dev_uc_add);
597
598
599
600
601
602
603
604
605
606int dev_uc_del(struct net_device *dev, const unsigned char *addr)
607{
608 int err;
609
610 netif_addr_lock_bh(dev);
611 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
612 NETDEV_HW_ADDR_T_UNICAST);
613 if (!err)
614 __dev_set_rx_mode(dev);
615 netif_addr_unlock_bh(dev);
616 return err;
617}
618EXPORT_SYMBOL(dev_uc_del);
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633int dev_uc_sync(struct net_device *to, struct net_device *from)
634{
635 int err = 0;
636
637 if (to->addr_len != from->addr_len)
638 return -EINVAL;
639
640 netif_addr_lock(to);
641 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
642 if (!err)
643 __dev_set_rx_mode(to);
644 netif_addr_unlock(to);
645 return err;
646}
647EXPORT_SYMBOL(dev_uc_sync);
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
664{
665 int err = 0;
666
667 if (to->addr_len != from->addr_len)
668 return -EINVAL;
669
670 netif_addr_lock(to);
671 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
672 if (!err)
673 __dev_set_rx_mode(to);
674 netif_addr_unlock(to);
675 return err;
676}
677EXPORT_SYMBOL(dev_uc_sync_multiple);
678
679
680
681
682
683
684
685
686
687
688void dev_uc_unsync(struct net_device *to, struct net_device *from)
689{
690 if (to->addr_len != from->addr_len)
691 return;
692
693 netif_addr_lock_bh(from);
694 netif_addr_lock(to);
695 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
696 __dev_set_rx_mode(to);
697 netif_addr_unlock(to);
698 netif_addr_unlock_bh(from);
699}
700EXPORT_SYMBOL(dev_uc_unsync);
701
702
703
704
705
706
707
708void dev_uc_flush(struct net_device *dev)
709{
710 netif_addr_lock_bh(dev);
711 __hw_addr_flush(&dev->uc);
712 netif_addr_unlock_bh(dev);
713}
714EXPORT_SYMBOL(dev_uc_flush);
715
716
717
718
719
720
721
722void dev_uc_init(struct net_device *dev)
723{
724 __hw_addr_init(&dev->uc);
725}
726EXPORT_SYMBOL(dev_uc_init);
727
728
729
730
731
732
733
734
735
736
737int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
738{
739 struct netdev_hw_addr *ha;
740 int err;
741
742 netif_addr_lock_bh(dev);
743 list_for_each_entry(ha, &dev->mc.list, list) {
744 if (!memcmp(ha->addr, addr, dev->addr_len) &&
745 ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
746 err = -EEXIST;
747 goto out;
748 }
749 }
750 err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
751 NETDEV_HW_ADDR_T_MULTICAST, true, false);
752 if (!err)
753 __dev_set_rx_mode(dev);
754out:
755 netif_addr_unlock_bh(dev);
756 return err;
757}
758EXPORT_SYMBOL(dev_mc_add_excl);
759
760static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
761 bool global)
762{
763 int err;
764
765 netif_addr_lock_bh(dev);
766 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
767 NETDEV_HW_ADDR_T_MULTICAST, global, false, 0);
768 if (!err)
769 __dev_set_rx_mode(dev);
770 netif_addr_unlock_bh(dev);
771 return err;
772}
773
774
775
776
777
778
779
780
781int dev_mc_add(struct net_device *dev, const unsigned char *addr)
782{
783 return __dev_mc_add(dev, addr, false);
784}
785EXPORT_SYMBOL(dev_mc_add);
786
787
788
789
790
791
792
793
794int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
795{
796 return __dev_mc_add(dev, addr, true);
797}
798EXPORT_SYMBOL(dev_mc_add_global);
799
800static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
801 bool global)
802{
803 int err;
804
805 netif_addr_lock_bh(dev);
806 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
807 NETDEV_HW_ADDR_T_MULTICAST, global, false);
808 if (!err)
809 __dev_set_rx_mode(dev);
810 netif_addr_unlock_bh(dev);
811 return err;
812}
813
814
815
816
817
818
819
820
821
822int dev_mc_del(struct net_device *dev, const unsigned char *addr)
823{
824 return __dev_mc_del(dev, addr, false);
825}
826EXPORT_SYMBOL(dev_mc_del);
827
828
829
830
831
832
833
834
835
836int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
837{
838 return __dev_mc_del(dev, addr, true);
839}
840EXPORT_SYMBOL(dev_mc_del_global);
841
842
843
844
845
846
847
848
849
850
851
852
853
854int dev_mc_sync(struct net_device *to, struct net_device *from)
855{
856 int err = 0;
857
858 if (to->addr_len != from->addr_len)
859 return -EINVAL;
860
861 netif_addr_lock(to);
862 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
863 if (!err)
864 __dev_set_rx_mode(to);
865 netif_addr_unlock(to);
866 return err;
867}
868EXPORT_SYMBOL(dev_mc_sync);
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
885{
886 int err = 0;
887
888 if (to->addr_len != from->addr_len)
889 return -EINVAL;
890
891 netif_addr_lock(to);
892 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
893 if (!err)
894 __dev_set_rx_mode(to);
895 netif_addr_unlock(to);
896 return err;
897}
898EXPORT_SYMBOL(dev_mc_sync_multiple);
899
900
901
902
903
904
905
906
907
908
909void dev_mc_unsync(struct net_device *to, struct net_device *from)
910{
911 if (to->addr_len != from->addr_len)
912 return;
913
914 netif_addr_lock_bh(from);
915 netif_addr_lock(to);
916 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
917 __dev_set_rx_mode(to);
918 netif_addr_unlock(to);
919 netif_addr_unlock_bh(from);
920}
921EXPORT_SYMBOL(dev_mc_unsync);
922
923
924
925
926
927
928
929void dev_mc_flush(struct net_device *dev)
930{
931 netif_addr_lock_bh(dev);
932 __hw_addr_flush(&dev->mc);
933 netif_addr_unlock_bh(dev);
934}
935EXPORT_SYMBOL(dev_mc_flush);
936
937
938
939
940
941
942
943void dev_mc_init(struct net_device *dev)
944{
945 __hw_addr_init(&dev->mc);
946}
947EXPORT_SYMBOL(dev_mc_init);
948