1
2
3
4
5
6
7
8
9
10#include <linux/netdevice.h>
11#include <linux/rtnetlink.h>
12#include <linux/export.h>
13#include <linux/list.h>
14
15#include "dev.h"
16
17
18
19
20
21static int __hw_addr_insert(struct netdev_hw_addr_list *list,
22 struct netdev_hw_addr *new, int addr_len)
23{
24 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL;
25 struct netdev_hw_addr *ha;
26
27 while (*ins_point) {
28 int diff;
29
30 ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
31 diff = memcmp(new->addr, ha->addr, addr_len);
32 if (diff == 0)
33 diff = memcmp(&new->type, &ha->type, sizeof(new->type));
34
35 parent = *ins_point;
36 if (diff < 0)
37 ins_point = &parent->rb_left;
38 else if (diff > 0)
39 ins_point = &parent->rb_right;
40 else
41 return -EEXIST;
42 }
43
44 rb_link_node_rcu(&new->node, parent, ins_point);
45 rb_insert_color(&new->node, &list->tree);
46
47 return 0;
48}
49
50static struct netdev_hw_addr*
51__hw_addr_create(const unsigned char *addr, int addr_len,
52 unsigned char addr_type, bool global, bool sync)
53{
54 struct netdev_hw_addr *ha;
55 int alloc_size;
56
57 alloc_size = sizeof(*ha);
58 if (alloc_size < L1_CACHE_BYTES)
59 alloc_size = L1_CACHE_BYTES;
60 ha = kmalloc(alloc_size, GFP_ATOMIC);
61 if (!ha)
62 return NULL;
63 memcpy(ha->addr, addr, addr_len);
64 ha->type = addr_type;
65 ha->refcount = 1;
66 ha->global_use = global;
67 ha->synced = sync ? 1 : 0;
68 ha->sync_cnt = 0;
69
70 return ha;
71}
72
73static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
74 const unsigned char *addr, int addr_len,
75 unsigned char addr_type, bool global, bool sync,
76 int sync_count, bool exclusive)
77{
78 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL;
79 struct netdev_hw_addr *ha;
80
81 if (addr_len > MAX_ADDR_LEN)
82 return -EINVAL;
83
84 while (*ins_point) {
85 int diff;
86
87 ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
88 diff = memcmp(addr, ha->addr, addr_len);
89 if (diff == 0)
90 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type));
91
92 parent = *ins_point;
93 if (diff < 0) {
94 ins_point = &parent->rb_left;
95 } else if (diff > 0) {
96 ins_point = &parent->rb_right;
97 } else {
98 if (exclusive)
99 return -EEXIST;
100 if (global) {
101
102 if (ha->global_use)
103 return 0;
104 else
105 ha->global_use = true;
106 }
107 if (sync) {
108 if (ha->synced && sync_count)
109 return -EEXIST;
110 else
111 ha->synced++;
112 }
113 ha->refcount++;
114 return 0;
115 }
116 }
117
118 ha = __hw_addr_create(addr, addr_len, addr_type, global, sync);
119 if (!ha)
120 return -ENOMEM;
121
122 rb_link_node(&ha->node, parent, ins_point);
123 rb_insert_color(&ha->node, &list->tree);
124
125 list_add_tail_rcu(&ha->list, &list->list);
126 list->count++;
127
128 return 0;
129}
130
131static int __hw_addr_add(struct netdev_hw_addr_list *list,
132 const unsigned char *addr, int addr_len,
133 unsigned char addr_type)
134{
135 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false,
136 0, false);
137}
138
139static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
140 struct netdev_hw_addr *ha, bool global,
141 bool sync)
142{
143 if (global && !ha->global_use)
144 return -ENOENT;
145
146 if (sync && !ha->synced)
147 return -ENOENT;
148
149 if (global)
150 ha->global_use = false;
151
152 if (sync)
153 ha->synced--;
154
155 if (--ha->refcount)
156 return 0;
157
158 rb_erase(&ha->node, &list->tree);
159
160 list_del_rcu(&ha->list);
161 kfree_rcu(ha, rcu_head);
162 list->count--;
163 return 0;
164}
165
166static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list,
167 const unsigned char *addr, int addr_len,
168 unsigned char addr_type)
169{
170 struct rb_node *node;
171
172 node = list->tree.rb_node;
173
174 while (node) {
175 struct netdev_hw_addr *ha = rb_entry(node, struct netdev_hw_addr, node);
176 int diff = memcmp(addr, ha->addr, addr_len);
177
178 if (diff == 0 && addr_type)
179 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type));
180
181 if (diff < 0)
182 node = node->rb_left;
183 else if (diff > 0)
184 node = node->rb_right;
185 else
186 return ha;
187 }
188
189 return NULL;
190}
191
192static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
193 const unsigned char *addr, int addr_len,
194 unsigned char addr_type, bool global, bool sync)
195{
196 struct netdev_hw_addr *ha = __hw_addr_lookup(list, addr, addr_len, addr_type);
197
198 if (!ha)
199 return -ENOENT;
200 return __hw_addr_del_entry(list, ha, global, sync);
201}
202
203static int __hw_addr_del(struct netdev_hw_addr_list *list,
204 const unsigned char *addr, int addr_len,
205 unsigned char addr_type)
206{
207 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false);
208}
209
210static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
211 struct netdev_hw_addr *ha,
212 int addr_len)
213{
214 int err;
215
216 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
217 false, true, ha->sync_cnt, false);
218 if (err && err != -EEXIST)
219 return err;
220
221 if (!err) {
222 ha->sync_cnt++;
223 ha->refcount++;
224 }
225
226 return 0;
227}
228
229static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
230 struct netdev_hw_addr_list *from_list,
231 struct netdev_hw_addr *ha,
232 int addr_len)
233{
234 int err;
235
236 err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type,
237 false, true);
238 if (err)
239 return;
240 ha->sync_cnt--;
241
242 __hw_addr_del_entry(from_list, ha, false, false);
243}
244
245static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
246 struct netdev_hw_addr_list *from_list,
247 int addr_len)
248{
249 int err = 0;
250 struct netdev_hw_addr *ha, *tmp;
251
252 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
253 if (ha->sync_cnt == ha->refcount) {
254 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
255 } else {
256 err = __hw_addr_sync_one(to_list, ha, addr_len);
257 if (err)
258 break;
259 }
260 }
261 return err;
262}
263
264
265
266
267
268
269int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
270 struct netdev_hw_addr_list *from_list,
271 int addr_len)
272{
273 int err = 0;
274 struct netdev_hw_addr *ha, *tmp;
275
276 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
277 if (!ha->sync_cnt) {
278 err = __hw_addr_sync_one(to_list, ha, addr_len);
279 if (err)
280 break;
281 } else if (ha->refcount == 1)
282 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
283 }
284 return err;
285}
286EXPORT_SYMBOL(__hw_addr_sync);
287
288void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
289 struct netdev_hw_addr_list *from_list,
290 int addr_len)
291{
292 struct netdev_hw_addr *ha, *tmp;
293
294 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
295 if (ha->sync_cnt)
296 __hw_addr_unsync_one(to_list, from_list, ha, addr_len);
297 }
298}
299EXPORT_SYMBOL(__hw_addr_unsync);
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
315 struct net_device *dev,
316 int (*sync)(struct net_device *, const unsigned char *),
317 int (*unsync)(struct net_device *,
318 const unsigned char *))
319{
320 struct netdev_hw_addr *ha, *tmp;
321 int err;
322
323
324 list_for_each_entry_safe(ha, tmp, &list->list, list) {
325 if (!ha->sync_cnt || ha->refcount != 1)
326 continue;
327
328
329 if (unsync && unsync(dev, ha->addr))
330 continue;
331
332 ha->sync_cnt--;
333 __hw_addr_del_entry(list, ha, false, false);
334 }
335
336
337 list_for_each_entry_safe(ha, tmp, &list->list, list) {
338 if (ha->sync_cnt)
339 continue;
340
341 err = sync(dev, ha->addr);
342 if (err)
343 return err;
344
345 ha->sync_cnt++;
346 ha->refcount++;
347 }
348
349 return 0;
350}
351EXPORT_SYMBOL(__hw_addr_sync_dev);
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
370 struct net_device *dev,
371 int (*sync)(struct net_device *,
372 const unsigned char *, int),
373 int (*unsync)(struct net_device *,
374 const unsigned char *, int))
375{
376 struct netdev_hw_addr *ha, *tmp;
377 int err, ref_cnt;
378
379
380 list_for_each_entry_safe(ha, tmp, &list->list, list) {
381
382 if ((ha->sync_cnt << 1) <= ha->refcount)
383 continue;
384
385
386 ref_cnt = ha->refcount - ha->sync_cnt;
387 if (unsync && unsync(dev, ha->addr, ref_cnt))
388 continue;
389
390 ha->refcount = (ref_cnt << 1) + 1;
391 ha->sync_cnt = ref_cnt;
392 __hw_addr_del_entry(list, ha, false, false);
393 }
394
395
396 list_for_each_entry_safe(ha, tmp, &list->list, list) {
397
398 if ((ha->sync_cnt << 1) >= ha->refcount)
399 continue;
400
401 ref_cnt = ha->refcount - ha->sync_cnt;
402 err = sync(dev, ha->addr, ref_cnt);
403 if (err)
404 return err;
405
406 ha->refcount = ref_cnt << 1;
407 ha->sync_cnt = ref_cnt;
408 }
409
410 return 0;
411}
412EXPORT_SYMBOL(__hw_addr_ref_sync_dev);
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
429 struct net_device *dev,
430 int (*unsync)(struct net_device *,
431 const unsigned char *, int))
432{
433 struct netdev_hw_addr *ha, *tmp;
434
435 list_for_each_entry_safe(ha, tmp, &list->list, list) {
436 if (!ha->sync_cnt)
437 continue;
438
439
440 if (unsync && unsync(dev, ha->addr, ha->sync_cnt))
441 continue;
442
443 ha->refcount -= ha->sync_cnt - 1;
444 ha->sync_cnt = 0;
445 __hw_addr_del_entry(list, ha, false, false);
446 }
447}
448EXPORT_SYMBOL(__hw_addr_ref_unsync_dev);
449
450
451
452
453
454
455
456
457
458
459
460
461
462void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
463 struct net_device *dev,
464 int (*unsync)(struct net_device *,
465 const unsigned char *))
466{
467 struct netdev_hw_addr *ha, *tmp;
468
469 list_for_each_entry_safe(ha, tmp, &list->list, list) {
470 if (!ha->sync_cnt)
471 continue;
472
473
474 if (unsync && unsync(dev, ha->addr))
475 continue;
476
477 ha->sync_cnt--;
478 __hw_addr_del_entry(list, ha, false, false);
479 }
480}
481EXPORT_SYMBOL(__hw_addr_unsync_dev);
482
483static void __hw_addr_flush(struct netdev_hw_addr_list *list)
484{
485 struct netdev_hw_addr *ha, *tmp;
486
487 list->tree = RB_ROOT;
488 list_for_each_entry_safe(ha, tmp, &list->list, list) {
489 list_del_rcu(&ha->list);
490 kfree_rcu(ha, rcu_head);
491 }
492 list->count = 0;
493}
494
495void __hw_addr_init(struct netdev_hw_addr_list *list)
496{
497 INIT_LIST_HEAD(&list->list);
498 list->count = 0;
499 list->tree = RB_ROOT;
500}
501EXPORT_SYMBOL(__hw_addr_init);
502
503
504
505
506
507
508
509
510
511void dev_addr_check(struct net_device *dev)
512{
513 if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN))
514 return;
515
516 netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr);
517 netdev_warn(dev, "Expected addr: %*ph\n",
518 MAX_ADDR_LEN, dev->dev_addr_shadow);
519 netdev_WARN(dev, "Incorrect netdev->dev_addr\n");
520}
521
522
523
524
525
526
527
528
529
530void dev_addr_flush(struct net_device *dev)
531{
532
533 dev_addr_check(dev);
534
535 __hw_addr_flush(&dev->dev_addrs);
536 dev->dev_addr = NULL;
537}
538
539
540
541
542
543
544
545
546
547
548int dev_addr_init(struct net_device *dev)
549{
550 unsigned char addr[MAX_ADDR_LEN];
551 struct netdev_hw_addr *ha;
552 int err;
553
554
555
556 __hw_addr_init(&dev->dev_addrs);
557 memset(addr, 0, sizeof(addr));
558 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
559 NETDEV_HW_ADDR_T_LAN);
560 if (!err) {
561
562
563
564
565 ha = list_first_entry(&dev->dev_addrs.list,
566 struct netdev_hw_addr, list);
567 dev->dev_addr = ha->addr;
568 }
569 return err;
570}
571
572void dev_addr_mod(struct net_device *dev, unsigned int offset,
573 const void *addr, size_t len)
574{
575 struct netdev_hw_addr *ha;
576
577 dev_addr_check(dev);
578
579 ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]);
580 rb_erase(&ha->node, &dev->dev_addrs.tree);
581 memcpy(&ha->addr[offset], addr, len);
582 memcpy(&dev->dev_addr_shadow[offset], addr, len);
583 WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len));
584}
585EXPORT_SYMBOL(dev_addr_mod);
586
587
588
589
590
591
592
593
594
595
596
597
598int dev_addr_add(struct net_device *dev, const unsigned char *addr,
599 unsigned char addr_type)
600{
601 int err;
602
603 ASSERT_RTNL();
604
605 err = dev_pre_changeaddr_notify(dev, addr, NULL);
606 if (err)
607 return err;
608 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
609 if (!err)
610 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
611 return err;
612}
613EXPORT_SYMBOL(dev_addr_add);
614
615
616
617
618
619
620
621
622
623
624
625
626int dev_addr_del(struct net_device *dev, const unsigned char *addr,
627 unsigned char addr_type)
628{
629 int err;
630 struct netdev_hw_addr *ha;
631
632 ASSERT_RTNL();
633
634
635
636
637
638 ha = list_first_entry(&dev->dev_addrs.list,
639 struct netdev_hw_addr, list);
640 if (!memcmp(ha->addr, addr, dev->addr_len) &&
641 ha->type == addr_type && ha->refcount == 1)
642 return -ENOENT;
643
644 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
645 addr_type);
646 if (!err)
647 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
648 return err;
649}
650EXPORT_SYMBOL(dev_addr_del);
651
652
653
654
655
656
657
658
659
660
661int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
662{
663 int err;
664
665 netif_addr_lock_bh(dev);
666 err = __hw_addr_add_ex(&dev->uc, addr, dev->addr_len,
667 NETDEV_HW_ADDR_T_UNICAST, true, false,
668 0, true);
669 if (!err)
670 __dev_set_rx_mode(dev);
671 netif_addr_unlock_bh(dev);
672 return err;
673}
674EXPORT_SYMBOL(dev_uc_add_excl);
675
676
677
678
679
680
681
682
683
684int dev_uc_add(struct net_device *dev, const unsigned char *addr)
685{
686 int err;
687
688 netif_addr_lock_bh(dev);
689 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
690 NETDEV_HW_ADDR_T_UNICAST);
691 if (!err)
692 __dev_set_rx_mode(dev);
693 netif_addr_unlock_bh(dev);
694 return err;
695}
696EXPORT_SYMBOL(dev_uc_add);
697
698
699
700
701
702
703
704
705
706int dev_uc_del(struct net_device *dev, const unsigned char *addr)
707{
708 int err;
709
710 netif_addr_lock_bh(dev);
711 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
712 NETDEV_HW_ADDR_T_UNICAST);
713 if (!err)
714 __dev_set_rx_mode(dev);
715 netif_addr_unlock_bh(dev);
716 return err;
717}
718EXPORT_SYMBOL(dev_uc_del);
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733int dev_uc_sync(struct net_device *to, struct net_device *from)
734{
735 int err = 0;
736
737 if (to->addr_len != from->addr_len)
738 return -EINVAL;
739
740 netif_addr_lock(to);
741 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
742 if (!err)
743 __dev_set_rx_mode(to);
744 netif_addr_unlock(to);
745 return err;
746}
747EXPORT_SYMBOL(dev_uc_sync);
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
764{
765 int err = 0;
766
767 if (to->addr_len != from->addr_len)
768 return -EINVAL;
769
770 netif_addr_lock(to);
771 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
772 if (!err)
773 __dev_set_rx_mode(to);
774 netif_addr_unlock(to);
775 return err;
776}
777EXPORT_SYMBOL(dev_uc_sync_multiple);
778
779
780
781
782
783
784
785
786
787
788void dev_uc_unsync(struct net_device *to, struct net_device *from)
789{
790 if (to->addr_len != from->addr_len)
791 return;
792
793
794
795
796
797
798
799
800
801
802 netif_addr_lock_bh(from);
803 netif_addr_lock(to);
804 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
805 __dev_set_rx_mode(to);
806 netif_addr_unlock(to);
807 netif_addr_unlock_bh(from);
808}
809EXPORT_SYMBOL(dev_uc_unsync);
810
811
812
813
814
815
816
817void dev_uc_flush(struct net_device *dev)
818{
819 netif_addr_lock_bh(dev);
820 __hw_addr_flush(&dev->uc);
821 netif_addr_unlock_bh(dev);
822}
823EXPORT_SYMBOL(dev_uc_flush);
824
825
826
827
828
829
830
831void dev_uc_init(struct net_device *dev)
832{
833 __hw_addr_init(&dev->uc);
834}
835EXPORT_SYMBOL(dev_uc_init);
836
837
838
839
840
841
842
843
844
845
846int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
847{
848 int err;
849
850 netif_addr_lock_bh(dev);
851 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
852 NETDEV_HW_ADDR_T_MULTICAST, true, false,
853 0, true);
854 if (!err)
855 __dev_set_rx_mode(dev);
856 netif_addr_unlock_bh(dev);
857 return err;
858}
859EXPORT_SYMBOL(dev_mc_add_excl);
860
861static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
862 bool global)
863{
864 int err;
865
866 netif_addr_lock_bh(dev);
867 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
868 NETDEV_HW_ADDR_T_MULTICAST, global, false,
869 0, false);
870 if (!err)
871 __dev_set_rx_mode(dev);
872 netif_addr_unlock_bh(dev);
873 return err;
874}
875
876
877
878
879
880
881
882
883int dev_mc_add(struct net_device *dev, const unsigned char *addr)
884{
885 return __dev_mc_add(dev, addr, false);
886}
887EXPORT_SYMBOL(dev_mc_add);
888
889
890
891
892
893
894
895
896int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
897{
898 return __dev_mc_add(dev, addr, true);
899}
900EXPORT_SYMBOL(dev_mc_add_global);
901
902static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
903 bool global)
904{
905 int err;
906
907 netif_addr_lock_bh(dev);
908 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
909 NETDEV_HW_ADDR_T_MULTICAST, global, false);
910 if (!err)
911 __dev_set_rx_mode(dev);
912 netif_addr_unlock_bh(dev);
913 return err;
914}
915
916
917
918
919
920
921
922
923
924int dev_mc_del(struct net_device *dev, const unsigned char *addr)
925{
926 return __dev_mc_del(dev, addr, false);
927}
928EXPORT_SYMBOL(dev_mc_del);
929
930
931
932
933
934
935
936
937
938int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
939{
940 return __dev_mc_del(dev, addr, true);
941}
942EXPORT_SYMBOL(dev_mc_del_global);
943
944
945
946
947
948
949
950
951
952
953
954
955
956int dev_mc_sync(struct net_device *to, struct net_device *from)
957{
958 int err = 0;
959
960 if (to->addr_len != from->addr_len)
961 return -EINVAL;
962
963 netif_addr_lock(to);
964 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
965 if (!err)
966 __dev_set_rx_mode(to);
967 netif_addr_unlock(to);
968 return err;
969}
970EXPORT_SYMBOL(dev_mc_sync);
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
987{
988 int err = 0;
989
990 if (to->addr_len != from->addr_len)
991 return -EINVAL;
992
993 netif_addr_lock(to);
994 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
995 if (!err)
996 __dev_set_rx_mode(to);
997 netif_addr_unlock(to);
998 return err;
999}
1000EXPORT_SYMBOL(dev_mc_sync_multiple);
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011void dev_mc_unsync(struct net_device *to, struct net_device *from)
1012{
1013 if (to->addr_len != from->addr_len)
1014 return;
1015
1016
1017 netif_addr_lock_bh(from);
1018 netif_addr_lock(to);
1019 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
1020 __dev_set_rx_mode(to);
1021 netif_addr_unlock(to);
1022 netif_addr_unlock_bh(from);
1023}
1024EXPORT_SYMBOL(dev_mc_unsync);
1025
1026
1027
1028
1029
1030
1031
1032void dev_mc_flush(struct net_device *dev)
1033{
1034 netif_addr_lock_bh(dev);
1035 __hw_addr_flush(&dev->mc);
1036 netif_addr_unlock_bh(dev);
1037}
1038EXPORT_SYMBOL(dev_mc_flush);
1039
1040
1041
1042
1043
1044
1045
1046void dev_mc_init(struct net_device *dev)
1047{
1048 __hw_addr_init(&dev->mc);
1049}
1050EXPORT_SYMBOL(dev_mc_init);
1051