1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
78#include <linux/capability.h>
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/hash.h>
83#include <linux/slab.h>
84#include <linux/sched.h>
85#include <linux/mutex.h>
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
95#include <linux/ethtool.h>
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
98#include <net/net_namespace.h>
99#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <net/xfrm.h>
108#include <linux/highmem.h>
109#include <linux/init.h>
110#include <linux/kmod.h>
111#include <linux/module.h>
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
115#include <net/wext.h>
116#include <net/iw_handler.h>
117#include <asm/current.h>
118#include <linux/audit.h>
119#include <linux/dmaengine.h>
120#include <linux/err.h>
121#include <linux/ctype.h>
122#include <linux/if_arp.h>
123#include <linux/if_vlan.h>
124#include <linux/ip.h>
125#include <net/ip.h>
126#include <linux/ipv6.h>
127#include <linux/in.h>
128#include <linux/jhash.h>
129#include <linux/random.h>
130#include <trace/events/napi.h>
131#include <trace/events/net.h>
132#include <trace/events/skb.h>
133#include <linux/pci.h>
134#include <linux/inetdevice.h>
135
136#include "net-sysfs.h"
137
138
139#define MAX_GRO_SKBS 8
140
141
142#define GRO_MAX_HEAD (MAX_HEADER + 128)
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172#define PTYPE_HASH_SIZE (16)
173#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
174
175static DEFINE_SPINLOCK(ptype_lock);
176static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
177static struct list_head ptype_all __read_mostly;
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198DEFINE_RWLOCK(dev_base_lock);
199EXPORT_SYMBOL(dev_base_lock);
200
201static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
202{
203 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
204 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
205}
206
207static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
208{
209 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
210}
211
212static inline void rps_lock(struct softnet_data *sd)
213{
214#ifdef CONFIG_RPS
215 spin_lock(&sd->input_pkt_queue.lock);
216#endif
217}
218
219static inline void rps_unlock(struct softnet_data *sd)
220{
221#ifdef CONFIG_RPS
222 spin_unlock(&sd->input_pkt_queue.lock);
223#endif
224}
225
226
227static int list_netdevice(struct net_device *dev)
228{
229 struct net *net = dev_net(dev);
230
231 ASSERT_RTNL();
232
233 write_lock_bh(&dev_base_lock);
234 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
235 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
236 hlist_add_head_rcu(&dev->index_hlist,
237 dev_index_hash(net, dev->ifindex));
238 write_unlock_bh(&dev_base_lock);
239 return 0;
240}
241
242
243
244
245static void unlist_netdevice(struct net_device *dev)
246{
247 ASSERT_RTNL();
248
249
250 write_lock_bh(&dev_base_lock);
251 list_del_rcu(&dev->dev_list);
252 hlist_del_rcu(&dev->name_hlist);
253 hlist_del_rcu(&dev->index_hlist);
254 write_unlock_bh(&dev_base_lock);
255}
256
257
258
259
260
261static RAW_NOTIFIER_HEAD(netdev_chain);
262
263
264
265
266
267
268DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
269EXPORT_PER_CPU_SYMBOL(softnet_data);
270
271#ifdef CONFIG_LOCKDEP
272
273
274
275
276static const unsigned short netdev_lock_type[] =
277 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
278 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
279 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
280 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
281 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
282 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
283 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
284 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
285 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
286 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
287 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
288 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
289 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
290 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
291 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
292 ARPHRD_VOID, ARPHRD_NONE};
293
294static const char *const netdev_lock_name[] =
295 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
296 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
297 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
298 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
299 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
300 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
301 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
302 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
303 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
304 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
305 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
306 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
307 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
308 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
309 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
310 "_xmit_VOID", "_xmit_NONE"};
311
312static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
313static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
314
315static inline unsigned short netdev_lock_pos(unsigned short dev_type)
316{
317 int i;
318
319 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
320 if (netdev_lock_type[i] == dev_type)
321 return i;
322
323 return ARRAY_SIZE(netdev_lock_type) - 1;
324}
325
326static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
327 unsigned short dev_type)
328{
329 int i;
330
331 i = netdev_lock_pos(dev_type);
332 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
333 netdev_lock_name[i]);
334}
335
336static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
337{
338 int i;
339
340 i = netdev_lock_pos(dev->type);
341 lockdep_set_class_and_name(&dev->addr_list_lock,
342 &netdev_addr_lock_key[i],
343 netdev_lock_name[i]);
344}
345#else
346static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
347 unsigned short dev_type)
348{
349}
350static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
351{
352}
353#endif
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377static inline struct list_head *ptype_head(const struct packet_type *pt)
378{
379 if (pt->type == htons(ETH_P_ALL))
380 return &ptype_all;
381 else
382 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398void dev_add_pack(struct packet_type *pt)
399{
400 struct list_head *head = ptype_head(pt);
401
402 spin_lock(&ptype_lock);
403 list_add_rcu(&pt->list, head);
404 spin_unlock(&ptype_lock);
405}
406EXPORT_SYMBOL(dev_add_pack);
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421void __dev_remove_pack(struct packet_type *pt)
422{
423 struct list_head *head = ptype_head(pt);
424 struct packet_type *pt1;
425
426 spin_lock(&ptype_lock);
427
428 list_for_each_entry(pt1, head, list) {
429 if (pt == pt1) {
430 list_del_rcu(&pt->list);
431 goto out;
432 }
433 }
434
435 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
436out:
437 spin_unlock(&ptype_lock);
438}
439EXPORT_SYMBOL(__dev_remove_pack);
440
441
442
443
444
445
446
447
448
449
450
451
452
453void dev_remove_pack(struct packet_type *pt)
454{
455 __dev_remove_pack(pt);
456
457 synchronize_net();
458}
459EXPORT_SYMBOL(dev_remove_pack);
460
461
462
463
464
465
466
467
468static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
469
470
471
472
473
474
475
476
477
478
479static int netdev_boot_setup_add(char *name, struct ifmap *map)
480{
481 struct netdev_boot_setup *s;
482 int i;
483
484 s = dev_boot_setup;
485 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
486 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
487 memset(s[i].name, 0, sizeof(s[i].name));
488 strlcpy(s[i].name, name, IFNAMSIZ);
489 memcpy(&s[i].map, map, sizeof(s[i].map));
490 break;
491 }
492 }
493
494 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
495}
496
497
498
499
500
501
502
503
504
505
506int netdev_boot_setup_check(struct net_device *dev)
507{
508 struct netdev_boot_setup *s = dev_boot_setup;
509 int i;
510
511 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
512 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
513 !strcmp(dev->name, s[i].name)) {
514 dev->irq = s[i].map.irq;
515 dev->base_addr = s[i].map.base_addr;
516 dev->mem_start = s[i].map.mem_start;
517 dev->mem_end = s[i].map.mem_end;
518 return 1;
519 }
520 }
521 return 0;
522}
523EXPORT_SYMBOL(netdev_boot_setup_check);
524
525
526
527
528
529
530
531
532
533
534
535
536unsigned long netdev_boot_base(const char *prefix, int unit)
537{
538 const struct netdev_boot_setup *s = dev_boot_setup;
539 char name[IFNAMSIZ];
540 int i;
541
542 sprintf(name, "%s%d", prefix, unit);
543
544
545
546
547
548 if (__dev_get_by_name(&init_net, name))
549 return 1;
550
551 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
552 if (!strcmp(name, s[i].name))
553 return s[i].map.base_addr;
554 return 0;
555}
556
557
558
559
560int __init netdev_boot_setup(char *str)
561{
562 int ints[5];
563 struct ifmap map;
564
565 str = get_options(str, ARRAY_SIZE(ints), ints);
566 if (!str || !*str)
567 return 0;
568
569
570 memset(&map, 0, sizeof(map));
571 if (ints[0] > 0)
572 map.irq = ints[1];
573 if (ints[0] > 1)
574 map.base_addr = ints[2];
575 if (ints[0] > 2)
576 map.mem_start = ints[3];
577 if (ints[0] > 3)
578 map.mem_end = ints[4];
579
580
581 return netdev_boot_setup_add(str, &map);
582}
583
584__setup("netdev=", netdev_boot_setup);
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604struct net_device *__dev_get_by_name(struct net *net, const char *name)
605{
606 struct hlist_node *p;
607 struct net_device *dev;
608 struct hlist_head *head = dev_name_hash(net, name);
609
610 hlist_for_each_entry(dev, p, head, name_hlist)
611 if (!strncmp(dev->name, name, IFNAMSIZ))
612 return dev;
613
614 return NULL;
615}
616EXPORT_SYMBOL(__dev_get_by_name);
617
618
619
620
621
622
623
624
625
626
627
628
629
630struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
631{
632 struct hlist_node *p;
633 struct net_device *dev;
634 struct hlist_head *head = dev_name_hash(net, name);
635
636 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
637 if (!strncmp(dev->name, name, IFNAMSIZ))
638 return dev;
639
640 return NULL;
641}
642EXPORT_SYMBOL(dev_get_by_name_rcu);
643
644
645
646
647
648
649
650
651
652
653
654
655
656struct net_device *dev_get_by_name(struct net *net, const char *name)
657{
658 struct net_device *dev;
659
660 rcu_read_lock();
661 dev = dev_get_by_name_rcu(net, name);
662 if (dev)
663 dev_hold(dev);
664 rcu_read_unlock();
665 return dev;
666}
667EXPORT_SYMBOL(dev_get_by_name);
668
669
670
671
672
673
674
675
676
677
678
679
680
681struct net_device *__dev_get_by_index(struct net *net, int ifindex)
682{
683 struct hlist_node *p;
684 struct net_device *dev;
685 struct hlist_head *head = dev_index_hash(net, ifindex);
686
687 hlist_for_each_entry(dev, p, head, index_hlist)
688 if (dev->ifindex == ifindex)
689 return dev;
690
691 return NULL;
692}
693EXPORT_SYMBOL(__dev_get_by_index);
694
695
696
697
698
699
700
701
702
703
704
705
706struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
707{
708 struct hlist_node *p;
709 struct net_device *dev;
710 struct hlist_head *head = dev_index_hash(net, ifindex);
711
712 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
713 if (dev->ifindex == ifindex)
714 return dev;
715
716 return NULL;
717}
718EXPORT_SYMBOL(dev_get_by_index_rcu);
719
720
721
722
723
724
725
726
727
728
729
730
731
732struct net_device *dev_get_by_index(struct net *net, int ifindex)
733{
734 struct net_device *dev;
735
736 rcu_read_lock();
737 dev = dev_get_by_index_rcu(net, ifindex);
738 if (dev)
739 dev_hold(dev);
740 rcu_read_unlock();
741 return dev;
742}
743EXPORT_SYMBOL(dev_get_by_index);
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
760 const char *ha)
761{
762 struct net_device *dev;
763
764 for_each_netdev_rcu(net, dev)
765 if (dev->type == type &&
766 !memcmp(dev->dev_addr, ha, dev->addr_len))
767 return dev;
768
769 return NULL;
770}
771EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
772
773struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
774{
775 struct net_device *dev;
776
777 ASSERT_RTNL();
778 for_each_netdev(net, dev)
779 if (dev->type == type)
780 return dev;
781
782 return NULL;
783}
784EXPORT_SYMBOL(__dev_getfirstbyhwtype);
785
786struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
787{
788 struct net_device *dev, *ret = NULL;
789
790 rcu_read_lock();
791 for_each_netdev_rcu(net, dev)
792 if (dev->type == type) {
793 dev_hold(dev);
794 ret = dev;
795 break;
796 }
797 rcu_read_unlock();
798 return ret;
799}
800EXPORT_SYMBOL(dev_getfirstbyhwtype);
801
802
803
804
805
806
807
808
809
810
811
812
813struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
814 unsigned short mask)
815{
816 struct net_device *dev, *ret;
817
818 ret = NULL;
819 for_each_netdev_rcu(net, dev) {
820 if (((dev->flags ^ if_flags) & mask) == 0) {
821 ret = dev;
822 break;
823 }
824 }
825 return ret;
826}
827EXPORT_SYMBOL(dev_get_by_flags_rcu);
828
829
830
831
832
833
834
835
836
837int dev_valid_name(const char *name)
838{
839 if (*name == '\0')
840 return 0;
841 if (strlen(name) >= IFNAMSIZ)
842 return 0;
843 if (!strcmp(name, ".") || !strcmp(name, ".."))
844 return 0;
845
846 while (*name) {
847 if (*name == '/' || isspace(*name))
848 return 0;
849 name++;
850 }
851 return 1;
852}
853EXPORT_SYMBOL(dev_valid_name);
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870static int __dev_alloc_name(struct net *net, const char *name, char *buf)
871{
872 int i = 0;
873 const char *p;
874 const int max_netdevices = 8*PAGE_SIZE;
875 unsigned long *inuse;
876 struct net_device *d;
877
878 p = strnchr(name, IFNAMSIZ-1, '%');
879 if (p) {
880
881
882
883
884
885 if (p[1] != 'd' || strchr(p + 2, '%'))
886 return -EINVAL;
887
888
889 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
890 if (!inuse)
891 return -ENOMEM;
892
893 for_each_netdev(net, d) {
894 if (!sscanf(d->name, name, &i))
895 continue;
896 if (i < 0 || i >= max_netdevices)
897 continue;
898
899
900 snprintf(buf, IFNAMSIZ, name, i);
901 if (!strncmp(buf, d->name, IFNAMSIZ))
902 set_bit(i, inuse);
903 }
904
905 i = find_first_zero_bit(inuse, max_netdevices);
906 free_page((unsigned long) inuse);
907 }
908
909 if (buf != name)
910 snprintf(buf, IFNAMSIZ, name, i);
911 if (!__dev_get_by_name(net, buf))
912 return i;
913
914
915
916
917
918 return -ENFILE;
919}
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935int dev_alloc_name(struct net_device *dev, const char *name)
936{
937 char buf[IFNAMSIZ];
938 struct net *net;
939 int ret;
940
941 BUG_ON(!dev_net(dev));
942 net = dev_net(dev);
943 ret = __dev_alloc_name(net, name, buf);
944 if (ret >= 0)
945 strlcpy(dev->name, buf, IFNAMSIZ);
946 return ret;
947}
948EXPORT_SYMBOL(dev_alloc_name);
949
950static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
951{
952 struct net *net;
953
954 BUG_ON(!dev_net(dev));
955 net = dev_net(dev);
956
957 if (!dev_valid_name(name))
958 return -EINVAL;
959
960 if (fmt && strchr(name, '%'))
961 return dev_alloc_name(dev, name);
962 else if (__dev_get_by_name(net, name))
963 return -EEXIST;
964 else if (dev->name != name)
965 strlcpy(dev->name, name, IFNAMSIZ);
966
967 return 0;
968}
969
970
971
972
973
974
975
976
977
978int dev_change_name(struct net_device *dev, const char *newname)
979{
980 char oldname[IFNAMSIZ];
981 int err = 0;
982 int ret;
983 struct net *net;
984
985 ASSERT_RTNL();
986 BUG_ON(!dev_net(dev));
987
988 net = dev_net(dev);
989 if (dev->flags & IFF_UP)
990 return -EBUSY;
991
992 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
993 return 0;
994
995 memcpy(oldname, dev->name, IFNAMSIZ);
996
997 err = dev_get_valid_name(dev, newname, 1);
998 if (err < 0)
999 return err;
1000
1001rollback:
1002 ret = device_rename(&dev->dev, dev->name);
1003 if (ret) {
1004 memcpy(dev->name, oldname, IFNAMSIZ);
1005 return ret;
1006 }
1007
1008 write_lock_bh(&dev_base_lock);
1009 hlist_del(&dev->name_hlist);
1010 write_unlock_bh(&dev_base_lock);
1011
1012 synchronize_rcu();
1013
1014 write_lock_bh(&dev_base_lock);
1015 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1016 write_unlock_bh(&dev_base_lock);
1017
1018 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1019 ret = notifier_to_errno(ret);
1020
1021 if (ret) {
1022
1023 if (err >= 0) {
1024 err = ret;
1025 memcpy(dev->name, oldname, IFNAMSIZ);
1026 goto rollback;
1027 } else {
1028 printk(KERN_ERR
1029 "%s: name change rollback failed: %d.\n",
1030 dev->name, ret);
1031 }
1032 }
1033
1034 return err;
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1046{
1047 ASSERT_RTNL();
1048
1049 if (len >= IFALIASZ)
1050 return -EINVAL;
1051
1052 if (!len) {
1053 if (dev->ifalias) {
1054 kfree(dev->ifalias);
1055 dev->ifalias = NULL;
1056 }
1057 return 0;
1058 }
1059
1060 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1061 if (!dev->ifalias)
1062 return -ENOMEM;
1063
1064 strlcpy(dev->ifalias, alias, len+1);
1065 return len;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075void netdev_features_change(struct net_device *dev)
1076{
1077 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1078}
1079EXPORT_SYMBOL(netdev_features_change);
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089void netdev_state_change(struct net_device *dev)
1090{
1091 if (dev->flags & IFF_UP) {
1092 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1093 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1094 }
1095}
1096EXPORT_SYMBOL(netdev_state_change);
1097
1098int netdev_bonding_change(struct net_device *dev, unsigned long event)
1099{
1100 return call_netdevice_notifiers(event, dev);
1101}
1102EXPORT_SYMBOL(netdev_bonding_change);
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114void dev_load(struct net *net, const char *name)
1115{
1116 struct net_device *dev;
1117 int no_module;
1118
1119 rcu_read_lock();
1120 dev = dev_get_by_name_rcu(net, name);
1121 rcu_read_unlock();
1122
1123 no_module = !dev;
1124 if (no_module && capable(CAP_NET_ADMIN))
1125 no_module = request_module("netdev-%s", name);
1126 if (no_module && capable(CAP_SYS_MODULE)) {
1127 if (!request_module("%s", name))
1128 pr_err("Loading kernel module for a network device "
1129"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
1130"instead\n", name);
1131 }
1132}
1133EXPORT_SYMBOL(dev_load);
1134
1135static int __dev_open(struct net_device *dev)
1136{
1137 const struct net_device_ops *ops = dev->netdev_ops;
1138 int ret;
1139
1140 ASSERT_RTNL();
1141
1142
1143
1144
1145 if (!netif_device_present(dev))
1146 return -ENODEV;
1147
1148 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1149 ret = notifier_to_errno(ret);
1150 if (ret)
1151 return ret;
1152
1153
1154
1155
1156 set_bit(__LINK_STATE_START, &dev->state);
1157
1158 if (ops->ndo_validate_addr)
1159 ret = ops->ndo_validate_addr(dev);
1160
1161 if (!ret && ops->ndo_open)
1162 ret = ops->ndo_open(dev);
1163
1164
1165
1166
1167
1168 if (ret)
1169 clear_bit(__LINK_STATE_START, &dev->state);
1170 else {
1171
1172
1173
1174 dev->flags |= IFF_UP;
1175
1176
1177
1178
1179 net_dmaengine_get();
1180
1181
1182
1183
1184 dev_set_rx_mode(dev);
1185
1186
1187
1188
1189 dev_activate(dev);
1190 }
1191
1192 return ret;
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207int dev_open(struct net_device *dev)
1208{
1209 int ret;
1210
1211
1212
1213
1214 if (dev->flags & IFF_UP)
1215 return 0;
1216
1217
1218
1219
1220 ret = __dev_open(dev);
1221 if (ret < 0)
1222 return ret;
1223
1224
1225
1226
1227 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1228 call_netdevice_notifiers(NETDEV_UP, dev);
1229
1230 return ret;
1231}
1232EXPORT_SYMBOL(dev_open);
1233
1234static int __dev_close_many(struct list_head *head)
1235{
1236 struct net_device *dev;
1237
1238 ASSERT_RTNL();
1239 might_sleep();
1240
1241 list_for_each_entry(dev, head, unreg_list) {
1242
1243
1244
1245
1246 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1247
1248 clear_bit(__LINK_STATE_START, &dev->state);
1249
1250
1251
1252
1253
1254
1255
1256 smp_mb__after_clear_bit();
1257 }
1258
1259 dev_deactivate_many(head);
1260
1261 list_for_each_entry(dev, head, unreg_list) {
1262 const struct net_device_ops *ops = dev->netdev_ops;
1263
1264
1265
1266
1267
1268
1269
1270
1271 if (ops->ndo_stop)
1272 ops->ndo_stop(dev);
1273
1274
1275
1276
1277
1278 dev->flags &= ~IFF_UP;
1279
1280
1281
1282
1283 net_dmaengine_put();
1284 }
1285
1286 return 0;
1287}
1288
1289static int __dev_close(struct net_device *dev)
1290{
1291 int retval;
1292 LIST_HEAD(single);
1293
1294 list_add(&dev->unreg_list, &single);
1295 retval = __dev_close_many(&single);
1296 list_del(&single);
1297 return retval;
1298}
1299
1300int dev_close_many(struct list_head *head)
1301{
1302 struct net_device *dev, *tmp;
1303 LIST_HEAD(tmp_list);
1304
1305 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1306 if (!(dev->flags & IFF_UP))
1307 list_move(&dev->unreg_list, &tmp_list);
1308
1309 __dev_close_many(head);
1310
1311
1312
1313
1314 list_for_each_entry(dev, head, unreg_list) {
1315 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1316 call_netdevice_notifiers(NETDEV_DOWN, dev);
1317 }
1318
1319
1320 list_splice(&tmp_list, head);
1321 return 0;
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333int dev_close(struct net_device *dev)
1334{
1335 LIST_HEAD(single);
1336
1337 list_add(&dev->unreg_list, &single);
1338 dev_close_many(&single);
1339 list_del(&single);
1340 return 0;
1341}
1342EXPORT_SYMBOL(dev_close);
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353void dev_disable_lro(struct net_device *dev)
1354{
1355 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1356 dev->ethtool_ops->set_flags) {
1357 u32 flags = dev->ethtool_ops->get_flags(dev);
1358 if (flags & ETH_FLAG_LRO) {
1359 flags &= ~ETH_FLAG_LRO;
1360 dev->ethtool_ops->set_flags(dev, flags);
1361 }
1362 }
1363 WARN_ON(dev->features & NETIF_F_LRO);
1364}
1365EXPORT_SYMBOL(dev_disable_lro);
1366
1367
1368static int dev_boot_phase = 1;
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389int register_netdevice_notifier(struct notifier_block *nb)
1390{
1391 struct net_device *dev;
1392 struct net_device *last;
1393 struct net *net;
1394 int err;
1395
1396 rtnl_lock();
1397 err = raw_notifier_chain_register(&netdev_chain, nb);
1398 if (err)
1399 goto unlock;
1400 if (dev_boot_phase)
1401 goto unlock;
1402 for_each_net(net) {
1403 for_each_netdev(net, dev) {
1404 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1405 err = notifier_to_errno(err);
1406 if (err)
1407 goto rollback;
1408
1409 if (!(dev->flags & IFF_UP))
1410 continue;
1411
1412 nb->notifier_call(nb, NETDEV_UP, dev);
1413 }
1414 }
1415
1416unlock:
1417 rtnl_unlock();
1418 return err;
1419
1420rollback:
1421 last = dev;
1422 for_each_net(net) {
1423 for_each_netdev(net, dev) {
1424 if (dev == last)
1425 break;
1426
1427 if (dev->flags & IFF_UP) {
1428 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1429 nb->notifier_call(nb, NETDEV_DOWN, dev);
1430 }
1431 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1432 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1433 }
1434 }
1435
1436 raw_notifier_chain_unregister(&netdev_chain, nb);
1437 goto unlock;
1438}
1439EXPORT_SYMBOL(register_netdevice_notifier);
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451int unregister_netdevice_notifier(struct notifier_block *nb)
1452{
1453 int err;
1454
1455 rtnl_lock();
1456 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1457 rtnl_unlock();
1458 return err;
1459}
1460EXPORT_SYMBOL(unregister_netdevice_notifier);
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1472{
1473 ASSERT_RTNL();
1474 return raw_notifier_call_chain(&netdev_chain, val, dev);
1475}
1476
1477
1478static atomic_t netstamp_needed = ATOMIC_INIT(0);
1479
1480void net_enable_timestamp(void)
1481{
1482 atomic_inc(&netstamp_needed);
1483}
1484EXPORT_SYMBOL(net_enable_timestamp);
1485
1486void net_disable_timestamp(void)
1487{
1488 atomic_dec(&netstamp_needed);
1489}
1490EXPORT_SYMBOL(net_disable_timestamp);
1491
1492static inline void net_timestamp_set(struct sk_buff *skb)
1493{
1494 if (atomic_read(&netstamp_needed))
1495 __net_timestamp(skb);
1496 else
1497 skb->tstamp.tv64 = 0;
1498}
1499
1500static inline void net_timestamp_check(struct sk_buff *skb)
1501{
1502 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1503 __net_timestamp(skb);
1504}
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1525{
1526 skb_orphan(skb);
1527 nf_reset(skb);
1528
1529 if (unlikely(!(dev->flags & IFF_UP) ||
1530 (skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN)))) {
1531 atomic_long_inc(&dev->rx_dropped);
1532 kfree_skb(skb);
1533 return NET_RX_DROP;
1534 }
1535 skb_set_dev(skb, dev);
1536 skb->tstamp.tv64 = 0;
1537 skb->pkt_type = PACKET_HOST;
1538 skb->protocol = eth_type_trans(skb, dev);
1539 return netif_rx(skb);
1540}
1541EXPORT_SYMBOL_GPL(dev_forward_skb);
1542
1543static inline int deliver_skb(struct sk_buff *skb,
1544 struct packet_type *pt_prev,
1545 struct net_device *orig_dev)
1546{
1547 atomic_inc(&skb->users);
1548 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1549}
1550
1551
1552
1553
1554
1555
1556static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1557{
1558 struct packet_type *ptype;
1559 struct sk_buff *skb2 = NULL;
1560 struct packet_type *pt_prev = NULL;
1561
1562 rcu_read_lock();
1563 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1564
1565
1566
1567 if ((ptype->dev == dev || !ptype->dev) &&
1568 (ptype->af_packet_priv == NULL ||
1569 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1570 if (pt_prev) {
1571 deliver_skb(skb2, pt_prev, skb->dev);
1572 pt_prev = ptype;
1573 continue;
1574 }
1575
1576 skb2 = skb_clone(skb, GFP_ATOMIC);
1577 if (!skb2)
1578 break;
1579
1580 net_timestamp_set(skb2);
1581
1582
1583
1584
1585
1586 skb_reset_mac_header(skb2);
1587
1588 if (skb_network_header(skb2) < skb2->data ||
1589 skb2->network_header > skb2->tail) {
1590 if (net_ratelimit())
1591 printk(KERN_CRIT "protocol %04x is "
1592 "buggy, dev %s\n",
1593 ntohs(skb2->protocol),
1594 dev->name);
1595 skb_reset_network_header(skb2);
1596 }
1597
1598 skb2->transport_header = skb2->network_header;
1599 skb2->pkt_type = PACKET_OUTGOING;
1600 pt_prev = ptype;
1601 }
1602 }
1603 if (pt_prev)
1604 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1605 rcu_read_unlock();
1606}
1607
1608
1609
1610
1611
1612int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1613{
1614 int rc;
1615
1616 if (txq < 1 || txq > dev->num_tx_queues)
1617 return -EINVAL;
1618
1619 if (dev->reg_state == NETREG_REGISTERED) {
1620 ASSERT_RTNL();
1621
1622 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1623 txq);
1624 if (rc)
1625 return rc;
1626
1627 if (txq < dev->real_num_tx_queues)
1628 qdisc_reset_all_tx_gt(dev, txq);
1629 }
1630
1631 dev->real_num_tx_queues = txq;
1632 return 0;
1633}
1634EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1635
1636#ifdef CONFIG_RPS
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1648{
1649 int rc;
1650
1651 if (rxq < 1 || rxq > dev->num_rx_queues)
1652 return -EINVAL;
1653
1654 if (dev->reg_state == NETREG_REGISTERED) {
1655 ASSERT_RTNL();
1656
1657 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1658 rxq);
1659 if (rc)
1660 return rc;
1661 }
1662
1663 dev->real_num_rx_queues = rxq;
1664 return 0;
1665}
1666EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1667#endif
1668
1669static inline void __netif_reschedule(struct Qdisc *q)
1670{
1671 struct softnet_data *sd;
1672 unsigned long flags;
1673
1674 local_irq_save(flags);
1675 sd = &__get_cpu_var(softnet_data);
1676 q->next_sched = NULL;
1677 *sd->output_queue_tailp = q;
1678 sd->output_queue_tailp = &q->next_sched;
1679 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1680 local_irq_restore(flags);
1681}
1682
1683void __netif_schedule(struct Qdisc *q)
1684{
1685 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1686 __netif_reschedule(q);
1687}
1688EXPORT_SYMBOL(__netif_schedule);
1689
1690void dev_kfree_skb_irq(struct sk_buff *skb)
1691{
1692 if (atomic_dec_and_test(&skb->users)) {
1693 struct softnet_data *sd;
1694 unsigned long flags;
1695
1696 local_irq_save(flags);
1697 sd = &__get_cpu_var(softnet_data);
1698 skb->next = sd->completion_queue;
1699 sd->completion_queue = skb;
1700 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1701 local_irq_restore(flags);
1702 }
1703}
1704EXPORT_SYMBOL(dev_kfree_skb_irq);
1705
1706void dev_kfree_skb_any(struct sk_buff *skb)
1707{
1708 if (in_irq() || irqs_disabled())
1709 dev_kfree_skb_irq(skb);
1710 else
1711 dev_kfree_skb(skb);
1712}
1713EXPORT_SYMBOL(dev_kfree_skb_any);
1714
1715
1716
1717
1718
1719
1720
1721
1722void netif_device_detach(struct net_device *dev)
1723{
1724 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1725 netif_running(dev)) {
1726 netif_tx_stop_all_queues(dev);
1727 }
1728}
1729EXPORT_SYMBOL(netif_device_detach);
1730
1731
1732
1733
1734
1735
1736
1737void netif_device_attach(struct net_device *dev)
1738{
1739 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1740 netif_running(dev)) {
1741 netif_tx_wake_all_queues(dev);
1742 __netdev_watchdog_up(dev);
1743 }
1744}
1745EXPORT_SYMBOL(netif_device_attach);
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756#ifdef CONFIG_NET_NS
1757void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1758{
1759 skb_dst_drop(skb);
1760 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1761 secpath_reset(skb);
1762 nf_reset(skb);
1763 skb_init_secmark(skb);
1764 skb->mark = 0;
1765 skb->priority = 0;
1766 skb->nf_trace = 0;
1767 skb->ipvs_property = 0;
1768#ifdef CONFIG_NET_SCHED
1769 skb->tc_index = 0;
1770#endif
1771 }
1772 skb->dev = dev;
1773}
1774EXPORT_SYMBOL(skb_set_dev);
1775#endif
1776
1777
1778
1779
1780
1781int skb_checksum_help(struct sk_buff *skb)
1782{
1783 __wsum csum;
1784 int ret = 0, offset;
1785
1786 if (skb->ip_summed == CHECKSUM_COMPLETE)
1787 goto out_set_summed;
1788
1789 if (unlikely(skb_shinfo(skb)->gso_size)) {
1790
1791 goto out_set_summed;
1792 }
1793
1794 offset = skb_checksum_start_offset(skb);
1795 BUG_ON(offset >= skb_headlen(skb));
1796 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1797
1798 offset += skb->csum_offset;
1799 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1800
1801 if (skb_cloned(skb) &&
1802 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1803 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1804 if (ret)
1805 goto out;
1806 }
1807
1808 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1809out_set_summed:
1810 skb->ip_summed = CHECKSUM_NONE;
1811out:
1812 return ret;
1813}
1814EXPORT_SYMBOL(skb_checksum_help);
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1827{
1828 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1829 struct packet_type *ptype;
1830 __be16 type = skb->protocol;
1831 int vlan_depth = ETH_HLEN;
1832 int err;
1833
1834 while (type == htons(ETH_P_8021Q)) {
1835 struct vlan_hdr *vh;
1836
1837 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1838 return ERR_PTR(-EINVAL);
1839
1840 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1841 type = vh->h_vlan_encapsulated_proto;
1842 vlan_depth += VLAN_HLEN;
1843 }
1844
1845 skb_reset_mac_header(skb);
1846 skb->mac_len = skb->network_header - skb->mac_header;
1847 __skb_pull(skb, skb->mac_len);
1848
1849 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1850 struct net_device *dev = skb->dev;
1851 struct ethtool_drvinfo info = {};
1852
1853 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1854 dev->ethtool_ops->get_drvinfo(dev, &info);
1855
1856 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
1857 info.driver, dev ? dev->features : 0L,
1858 skb->sk ? skb->sk->sk_route_caps : 0L,
1859 skb->len, skb->data_len, skb->ip_summed);
1860
1861 if (skb_header_cloned(skb) &&
1862 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1863 return ERR_PTR(err);
1864 }
1865
1866 rcu_read_lock();
1867 list_for_each_entry_rcu(ptype,
1868 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1869 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1870 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1871 err = ptype->gso_send_check(skb);
1872 segs = ERR_PTR(err);
1873 if (err || skb_gso_ok(skb, features))
1874 break;
1875 __skb_push(skb, (skb->data -
1876 skb_network_header(skb)));
1877 }
1878 segs = ptype->gso_segment(skb, features);
1879 break;
1880 }
1881 }
1882 rcu_read_unlock();
1883
1884 __skb_push(skb, skb->data - skb_mac_header(skb));
1885
1886 return segs;
1887}
1888EXPORT_SYMBOL(skb_gso_segment);
1889
1890
1891#ifdef CONFIG_BUG
1892void netdev_rx_csum_fault(struct net_device *dev)
1893{
1894 if (net_ratelimit()) {
1895 printk(KERN_ERR "%s: hw csum failure.\n",
1896 dev ? dev->name : "<unknown>");
1897 dump_stack();
1898 }
1899}
1900EXPORT_SYMBOL(netdev_rx_csum_fault);
1901#endif
1902
1903
1904
1905
1906
1907
1908static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1909{
1910#ifdef CONFIG_HIGHMEM
1911 int i;
1912 if (!(dev->features & NETIF_F_HIGHDMA)) {
1913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1914 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1915 return 1;
1916 }
1917
1918 if (PCI_DMA_BUS_IS_PHYS) {
1919 struct device *pdev = dev->dev.parent;
1920
1921 if (!pdev)
1922 return 0;
1923 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1924 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1925 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1926 return 1;
1927 }
1928 }
1929#endif
1930 return 0;
1931}
1932
1933struct dev_gso_cb {
1934 void (*destructor)(struct sk_buff *skb);
1935};
1936
1937#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1938
1939static void dev_gso_skb_destructor(struct sk_buff *skb)
1940{
1941 struct dev_gso_cb *cb;
1942
1943 do {
1944 struct sk_buff *nskb = skb->next;
1945
1946 skb->next = nskb->next;
1947 nskb->next = NULL;
1948 kfree_skb(nskb);
1949 } while (skb->next);
1950
1951 cb = DEV_GSO_CB(skb);
1952 if (cb->destructor)
1953 cb->destructor(skb);
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964static int dev_gso_segment(struct sk_buff *skb, int features)
1965{
1966 struct sk_buff *segs;
1967
1968 segs = skb_gso_segment(skb, features);
1969
1970
1971 if (!segs)
1972 return 0;
1973
1974 if (IS_ERR(segs))
1975 return PTR_ERR(segs);
1976
1977 skb->next = segs;
1978 DEV_GSO_CB(skb)->destructor = skb->destructor;
1979 skb->destructor = dev_gso_skb_destructor;
1980
1981 return 0;
1982}
1983
1984
1985
1986
1987
1988
1989static inline void skb_orphan_try(struct sk_buff *skb)
1990{
1991 struct sock *sk = skb->sk;
1992
1993 if (sk && !skb_shinfo(skb)->tx_flags) {
1994
1995
1996
1997 if (!skb->rxhash)
1998 skb->rxhash = sk->sk_hash;
1999 skb_orphan(skb);
2000 }
2001}
2002
2003static bool can_checksum_protocol(unsigned long features, __be16 protocol)
2004{
2005 return ((features & NETIF_F_GEN_CSUM) ||
2006 ((features & NETIF_F_V4_CSUM) &&
2007 protocol == htons(ETH_P_IP)) ||
2008 ((features & NETIF_F_V6_CSUM) &&
2009 protocol == htons(ETH_P_IPV6)) ||
2010 ((features & NETIF_F_FCOE_CRC) &&
2011 protocol == htons(ETH_P_FCOE)));
2012}
2013
2014static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
2015{
2016 if (!can_checksum_protocol(features, protocol)) {
2017 features &= ~NETIF_F_ALL_CSUM;
2018 features &= ~NETIF_F_SG;
2019 } else if (illegal_highdma(skb->dev, skb)) {
2020 features &= ~NETIF_F_SG;
2021 }
2022
2023 return features;
2024}
2025
2026int netif_skb_features(struct sk_buff *skb)
2027{
2028 __be16 protocol = skb->protocol;
2029 int features = skb->dev->features;
2030
2031 if (protocol == htons(ETH_P_8021Q)) {
2032 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2033 protocol = veh->h_vlan_encapsulated_proto;
2034 } else if (!vlan_tx_tag_present(skb)) {
2035 return harmonize_features(skb, protocol, features);
2036 }
2037
2038 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2039
2040 if (protocol != htons(ETH_P_8021Q)) {
2041 return harmonize_features(skb, protocol, features);
2042 } else {
2043 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2044 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2045 return harmonize_features(skb, protocol, features);
2046 }
2047}
2048EXPORT_SYMBOL(netif_skb_features);
2049
2050
2051
2052
2053
2054
2055
2056
2057static inline int skb_needs_linearize(struct sk_buff *skb,
2058 int features)
2059{
2060 return skb_is_nonlinear(skb) &&
2061 ((skb_has_frag_list(skb) &&
2062 !(features & NETIF_F_FRAGLIST)) ||
2063 (skb_shinfo(skb)->nr_frags &&
2064 !(features & NETIF_F_SG)));
2065}
2066
2067int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2068 struct netdev_queue *txq)
2069{
2070 const struct net_device_ops *ops = dev->netdev_ops;
2071 int rc = NETDEV_TX_OK;
2072
2073 if (likely(!skb->next)) {
2074 int features;
2075
2076
2077
2078
2079
2080 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2081 skb_dst_drop(skb);
2082
2083 if (!list_empty(&ptype_all))
2084 dev_queue_xmit_nit(skb, dev);
2085
2086 skb_orphan_try(skb);
2087
2088 features = netif_skb_features(skb);
2089
2090 if (vlan_tx_tag_present(skb) &&
2091 !(features & NETIF_F_HW_VLAN_TX)) {
2092 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2093 if (unlikely(!skb))
2094 goto out;
2095
2096 skb->vlan_tci = 0;
2097 }
2098
2099 if (netif_needs_gso(skb, features)) {
2100 if (unlikely(dev_gso_segment(skb, features)))
2101 goto out_kfree_skb;
2102 if (skb->next)
2103 goto gso;
2104 } else {
2105 if (skb_needs_linearize(skb, features) &&
2106 __skb_linearize(skb))
2107 goto out_kfree_skb;
2108
2109
2110
2111
2112
2113 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2114 skb_set_transport_header(skb,
2115 skb_checksum_start_offset(skb));
2116 if (!(features & NETIF_F_ALL_CSUM) &&
2117 skb_checksum_help(skb))
2118 goto out_kfree_skb;
2119 }
2120 }
2121
2122 rc = ops->ndo_start_xmit(skb, dev);
2123 trace_net_dev_xmit(skb, rc);
2124 if (rc == NETDEV_TX_OK)
2125 txq_trans_update(txq);
2126 return rc;
2127 }
2128
2129gso:
2130 do {
2131 struct sk_buff *nskb = skb->next;
2132
2133 skb->next = nskb->next;
2134 nskb->next = NULL;
2135
2136
2137
2138
2139
2140 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2141 skb_dst_drop(nskb);
2142
2143 rc = ops->ndo_start_xmit(nskb, dev);
2144 trace_net_dev_xmit(nskb, rc);
2145 if (unlikely(rc != NETDEV_TX_OK)) {
2146 if (rc & ~NETDEV_TX_MASK)
2147 goto out_kfree_gso_skb;
2148 nskb->next = skb->next;
2149 skb->next = nskb;
2150 return rc;
2151 }
2152 txq_trans_update(txq);
2153 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
2154 return NETDEV_TX_BUSY;
2155 } while (skb->next);
2156
2157out_kfree_gso_skb:
2158 if (likely(skb->next == NULL))
2159 skb->destructor = DEV_GSO_CB(skb)->destructor;
2160out_kfree_skb:
2161 kfree_skb(skb);
2162out:
2163 return rc;
2164}
2165
2166static u32 hashrnd __read_mostly;
2167
2168
2169
2170
2171
2172u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2173 unsigned int num_tx_queues)
2174{
2175 u32 hash;
2176
2177 if (skb_rx_queue_recorded(skb)) {
2178 hash = skb_get_rx_queue(skb);
2179 while (unlikely(hash >= num_tx_queues))
2180 hash -= num_tx_queues;
2181 return hash;
2182 }
2183
2184 if (skb->sk && skb->sk->sk_hash)
2185 hash = skb->sk->sk_hash;
2186 else
2187 hash = (__force u16) skb->protocol ^ skb->rxhash;
2188 hash = jhash_1word(hash, hashrnd);
2189
2190 return (u16) (((u64) hash * num_tx_queues) >> 32);
2191}
2192EXPORT_SYMBOL(__skb_tx_hash);
2193
2194static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2195{
2196 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2197 if (net_ratelimit()) {
2198 pr_warning("%s selects TX queue %d, but "
2199 "real number of TX queues is %d\n",
2200 dev->name, queue_index, dev->real_num_tx_queues);
2201 }
2202 return 0;
2203 }
2204 return queue_index;
2205}
2206
2207static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2208{
2209#ifdef CONFIG_XPS
2210 struct xps_dev_maps *dev_maps;
2211 struct xps_map *map;
2212 int queue_index = -1;
2213
2214 rcu_read_lock();
2215 dev_maps = rcu_dereference(dev->xps_maps);
2216 if (dev_maps) {
2217 map = rcu_dereference(
2218 dev_maps->cpu_map[raw_smp_processor_id()]);
2219 if (map) {
2220 if (map->len == 1)
2221 queue_index = map->queues[0];
2222 else {
2223 u32 hash;
2224 if (skb->sk && skb->sk->sk_hash)
2225 hash = skb->sk->sk_hash;
2226 else
2227 hash = (__force u16) skb->protocol ^
2228 skb->rxhash;
2229 hash = jhash_1word(hash, hashrnd);
2230 queue_index = map->queues[
2231 ((u64)hash * map->len) >> 32];
2232 }
2233 if (unlikely(queue_index >= dev->real_num_tx_queues))
2234 queue_index = -1;
2235 }
2236 }
2237 rcu_read_unlock();
2238
2239 return queue_index;
2240#else
2241 return -1;
2242#endif
2243}
2244
2245static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2246 struct sk_buff *skb)
2247{
2248 int queue_index;
2249 const struct net_device_ops *ops = dev->netdev_ops;
2250
2251 if (dev->real_num_tx_queues == 1)
2252 queue_index = 0;
2253 else if (ops->ndo_select_queue) {
2254 queue_index = ops->ndo_select_queue(dev, skb);
2255 queue_index = dev_cap_txqueue(dev, queue_index);
2256 } else {
2257 struct sock *sk = skb->sk;
2258 queue_index = sk_tx_queue_get(sk);
2259
2260 if (queue_index < 0 || skb->ooo_okay ||
2261 queue_index >= dev->real_num_tx_queues) {
2262 int old_index = queue_index;
2263
2264 queue_index = get_xps_queue(dev, skb);
2265 if (queue_index < 0)
2266 queue_index = skb_tx_hash(dev, skb);
2267
2268 if (queue_index != old_index && sk) {
2269 struct dst_entry *dst =
2270 rcu_dereference_check(sk->sk_dst_cache, 1);
2271
2272 if (dst && skb_dst(skb) == dst)
2273 sk_tx_queue_set(sk, queue_index);
2274 }
2275 }
2276 }
2277
2278 skb_set_queue_mapping(skb, queue_index);
2279 return netdev_get_tx_queue(dev, queue_index);
2280}
2281
2282static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2283 struct net_device *dev,
2284 struct netdev_queue *txq)
2285{
2286 spinlock_t *root_lock = qdisc_lock(q);
2287 bool contended = qdisc_is_running(q);
2288 int rc;
2289
2290
2291
2292
2293
2294
2295
2296 if (unlikely(contended))
2297 spin_lock(&q->busylock);
2298
2299 spin_lock(root_lock);
2300 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2301 kfree_skb(skb);
2302 rc = NET_XMIT_DROP;
2303 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2304 qdisc_run_begin(q)) {
2305
2306
2307
2308
2309
2310 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2311 skb_dst_force(skb);
2312
2313 qdisc_skb_cb(skb)->pkt_len = skb->len;
2314 qdisc_bstats_update(q, skb);
2315
2316 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2317 if (unlikely(contended)) {
2318 spin_unlock(&q->busylock);
2319 contended = false;
2320 }
2321 __qdisc_run(q);
2322 } else
2323 qdisc_run_end(q);
2324
2325 rc = NET_XMIT_SUCCESS;
2326 } else {
2327 skb_dst_force(skb);
2328 rc = qdisc_enqueue_root(skb, q);
2329 if (qdisc_run_begin(q)) {
2330 if (unlikely(contended)) {
2331 spin_unlock(&q->busylock);
2332 contended = false;
2333 }
2334 __qdisc_run(q);
2335 }
2336 }
2337 spin_unlock(root_lock);
2338 if (unlikely(contended))
2339 spin_unlock(&q->busylock);
2340 return rc;
2341}
2342
2343static DEFINE_PER_CPU(int, xmit_recursion);
2344#define RECURSION_LIMIT 10
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371int dev_queue_xmit(struct sk_buff *skb)
2372{
2373 struct net_device *dev = skb->dev;
2374 struct netdev_queue *txq;
2375 struct Qdisc *q;
2376 int rc = -ENOMEM;
2377
2378
2379
2380
2381 rcu_read_lock_bh();
2382
2383 txq = dev_pick_tx(dev, skb);
2384 q = rcu_dereference_bh(txq->qdisc);
2385
2386#ifdef CONFIG_NET_CLS_ACT
2387 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2388#endif
2389 trace_net_dev_queue(skb);
2390 if (q->enqueue) {
2391 rc = __dev_xmit_skb(skb, q, dev, txq);
2392 goto out;
2393 }
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407 if (dev->flags & IFF_UP) {
2408 int cpu = smp_processor_id();
2409
2410 if (txq->xmit_lock_owner != cpu) {
2411
2412 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2413 goto recursion_alert;
2414
2415 HARD_TX_LOCK(dev, txq, cpu);
2416
2417 if (!netif_tx_queue_stopped(txq)) {
2418 __this_cpu_inc(xmit_recursion);
2419 rc = dev_hard_start_xmit(skb, dev, txq);
2420 __this_cpu_dec(xmit_recursion);
2421 if (dev_xmit_complete(rc)) {
2422 HARD_TX_UNLOCK(dev, txq);
2423 goto out;
2424 }
2425 }
2426 HARD_TX_UNLOCK(dev, txq);
2427 if (net_ratelimit())
2428 printk(KERN_CRIT "Virtual device %s asks to "
2429 "queue packet!\n", dev->name);
2430 } else {
2431
2432
2433
2434recursion_alert:
2435 if (net_ratelimit())
2436 printk(KERN_CRIT "Dead loop on virtual device "
2437 "%s, fix it urgently!\n", dev->name);
2438 }
2439 }
2440
2441 rc = -ENETDOWN;
2442 rcu_read_unlock_bh();
2443
2444 kfree_skb(skb);
2445 return rc;
2446out:
2447 rcu_read_unlock_bh();
2448 return rc;
2449}
2450EXPORT_SYMBOL(dev_queue_xmit);
2451
2452
2453
2454
2455
2456
2457int netdev_max_backlog __read_mostly = 1000;
2458int netdev_tstamp_prequeue __read_mostly = 1;
2459int netdev_budget __read_mostly = 300;
2460int weight_p __read_mostly = 64;
2461
2462
2463static inline void ____napi_schedule(struct softnet_data *sd,
2464 struct napi_struct *napi)
2465{
2466 list_add_tail(&napi->poll_list, &sd->poll_list);
2467 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2468}
2469
2470
2471
2472
2473
2474
2475__u32 __skb_get_rxhash(struct sk_buff *skb)
2476{
2477 int nhoff, hash = 0, poff;
2478 struct ipv6hdr *ip6;
2479 struct iphdr *ip;
2480 u8 ip_proto;
2481 u32 addr1, addr2, ihl;
2482 union {
2483 u32 v32;
2484 u16 v16[2];
2485 } ports;
2486
2487 nhoff = skb_network_offset(skb);
2488
2489 switch (skb->protocol) {
2490 case __constant_htons(ETH_P_IP):
2491 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2492 goto done;
2493
2494 ip = (struct iphdr *) (skb->data + nhoff);
2495 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2496 ip_proto = 0;
2497 else
2498 ip_proto = ip->protocol;
2499 addr1 = (__force u32) ip->saddr;
2500 addr2 = (__force u32) ip->daddr;
2501 ihl = ip->ihl;
2502 break;
2503 case __constant_htons(ETH_P_IPV6):
2504 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2505 goto done;
2506
2507 ip6 = (struct ipv6hdr *) (skb->data + nhoff);
2508 ip_proto = ip6->nexthdr;
2509 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2510 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2511 ihl = (40 >> 2);
2512 break;
2513 default:
2514 goto done;
2515 }
2516
2517 ports.v32 = 0;
2518 poff = proto_ports_offset(ip_proto);
2519 if (poff >= 0) {
2520 nhoff += ihl * 4 + poff;
2521 if (pskb_may_pull(skb, nhoff + 4)) {
2522 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2523 if (ports.v16[1] < ports.v16[0])
2524 swap(ports.v16[0], ports.v16[1]);
2525 }
2526 }
2527
2528
2529 if (addr2 < addr1)
2530 swap(addr1, addr2);
2531
2532 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2533 if (!hash)
2534 hash = 1;
2535
2536done:
2537 return hash;
2538}
2539EXPORT_SYMBOL(__skb_get_rxhash);
2540
2541#ifdef CONFIG_RPS
2542
2543
2544struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2545EXPORT_SYMBOL(rps_sock_flow_table);
2546
2547
2548
2549
2550
2551
2552static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2553 struct rps_dev_flow **rflowp)
2554{
2555 struct netdev_rx_queue *rxqueue;
2556 struct rps_map *map;
2557 struct rps_dev_flow_table *flow_table;
2558 struct rps_sock_flow_table *sock_flow_table;
2559 int cpu = -1;
2560 u16 tcpu;
2561
2562 if (skb_rx_queue_recorded(skb)) {
2563 u16 index = skb_get_rx_queue(skb);
2564 if (unlikely(index >= dev->real_num_rx_queues)) {
2565 WARN_ONCE(dev->real_num_rx_queues > 1,
2566 "%s received packet on queue %u, but number "
2567 "of RX queues is %u\n",
2568 dev->name, index, dev->real_num_rx_queues);
2569 goto done;
2570 }
2571 rxqueue = dev->_rx + index;
2572 } else
2573 rxqueue = dev->_rx;
2574
2575 map = rcu_dereference(rxqueue->rps_map);
2576 if (map) {
2577 if (map->len == 1 &&
2578 !rcu_dereference_raw(rxqueue->rps_flow_table)) {
2579 tcpu = map->cpus[0];
2580 if (cpu_online(tcpu))
2581 cpu = tcpu;
2582 goto done;
2583 }
2584 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
2585 goto done;
2586 }
2587
2588 skb_reset_network_header(skb);
2589 if (!skb_get_rxhash(skb))
2590 goto done;
2591
2592 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2593 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2594 if (flow_table && sock_flow_table) {
2595 u16 next_cpu;
2596 struct rps_dev_flow *rflow;
2597
2598 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2599 tcpu = rflow->cpu;
2600
2601 next_cpu = sock_flow_table->ents[skb->rxhash &
2602 sock_flow_table->mask];
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615 if (unlikely(tcpu != next_cpu) &&
2616 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2617 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2618 rflow->last_qtail)) >= 0)) {
2619 tcpu = rflow->cpu = next_cpu;
2620 if (tcpu != RPS_NO_CPU)
2621 rflow->last_qtail = per_cpu(softnet_data,
2622 tcpu).input_queue_head;
2623 }
2624 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2625 *rflowp = rflow;
2626 cpu = tcpu;
2627 goto done;
2628 }
2629 }
2630
2631 if (map) {
2632 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2633
2634 if (cpu_online(tcpu)) {
2635 cpu = tcpu;
2636 goto done;
2637 }
2638 }
2639
2640done:
2641 return cpu;
2642}
2643
2644
2645static void rps_trigger_softirq(void *data)
2646{
2647 struct softnet_data *sd = data;
2648
2649 ____napi_schedule(sd, &sd->backlog);
2650 sd->received_rps++;
2651}
2652
2653#endif
2654
2655
2656
2657
2658
2659
2660static int rps_ipi_queued(struct softnet_data *sd)
2661{
2662#ifdef CONFIG_RPS
2663 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2664
2665 if (sd != mysd) {
2666 sd->rps_ipi_next = mysd->rps_ipi_list;
2667 mysd->rps_ipi_list = sd;
2668
2669 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2670 return 1;
2671 }
2672#endif
2673 return 0;
2674}
2675
2676
2677
2678
2679
2680static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2681 unsigned int *qtail)
2682{
2683 struct softnet_data *sd;
2684 unsigned long flags;
2685
2686 sd = &per_cpu(softnet_data, cpu);
2687
2688 local_irq_save(flags);
2689
2690 rps_lock(sd);
2691 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2692 if (skb_queue_len(&sd->input_pkt_queue)) {
2693enqueue:
2694 __skb_queue_tail(&sd->input_pkt_queue, skb);
2695 input_queue_tail_incr_save(sd, qtail);
2696 rps_unlock(sd);
2697 local_irq_restore(flags);
2698 return NET_RX_SUCCESS;
2699 }
2700
2701
2702
2703
2704 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2705 if (!rps_ipi_queued(sd))
2706 ____napi_schedule(sd, &sd->backlog);
2707 }
2708 goto enqueue;
2709 }
2710
2711 sd->dropped++;
2712 rps_unlock(sd);
2713
2714 local_irq_restore(flags);
2715
2716 atomic_long_inc(&skb->dev->rx_dropped);
2717 kfree_skb(skb);
2718 return NET_RX_DROP;
2719}
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736int netif_rx(struct sk_buff *skb)
2737{
2738 int ret;
2739
2740
2741 if (netpoll_rx(skb))
2742 return NET_RX_DROP;
2743
2744 if (netdev_tstamp_prequeue)
2745 net_timestamp_check(skb);
2746
2747 trace_netif_rx(skb);
2748#ifdef CONFIG_RPS
2749 {
2750 struct rps_dev_flow voidflow, *rflow = &voidflow;
2751 int cpu;
2752
2753 preempt_disable();
2754 rcu_read_lock();
2755
2756 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2757 if (cpu < 0)
2758 cpu = smp_processor_id();
2759
2760 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2761
2762 rcu_read_unlock();
2763 preempt_enable();
2764 }
2765#else
2766 {
2767 unsigned int qtail;
2768 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2769 put_cpu();
2770 }
2771#endif
2772 return ret;
2773}
2774EXPORT_SYMBOL(netif_rx);
2775
2776int netif_rx_ni(struct sk_buff *skb)
2777{
2778 int err;
2779
2780 preempt_disable();
2781 err = netif_rx(skb);
2782 if (local_softirq_pending())
2783 do_softirq();
2784 preempt_enable();
2785
2786 return err;
2787}
2788EXPORT_SYMBOL(netif_rx_ni);
2789
2790static void net_tx_action(struct softirq_action *h)
2791{
2792 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2793
2794 if (sd->completion_queue) {
2795 struct sk_buff *clist;
2796
2797 local_irq_disable();
2798 clist = sd->completion_queue;
2799 sd->completion_queue = NULL;
2800 local_irq_enable();
2801
2802 while (clist) {
2803 struct sk_buff *skb = clist;
2804 clist = clist->next;
2805
2806 WARN_ON(atomic_read(&skb->users));
2807 trace_kfree_skb(skb, net_tx_action);
2808 __kfree_skb(skb);
2809 }
2810 }
2811
2812 if (sd->output_queue) {
2813 struct Qdisc *head;
2814
2815 local_irq_disable();
2816 head = sd->output_queue;
2817 sd->output_queue = NULL;
2818 sd->output_queue_tailp = &sd->output_queue;
2819 local_irq_enable();
2820
2821 while (head) {
2822 struct Qdisc *q = head;
2823 spinlock_t *root_lock;
2824
2825 head = head->next_sched;
2826
2827 root_lock = qdisc_lock(q);
2828 if (spin_trylock(root_lock)) {
2829 smp_mb__before_clear_bit();
2830 clear_bit(__QDISC_STATE_SCHED,
2831 &q->state);
2832 qdisc_run(q);
2833 spin_unlock(root_lock);
2834 } else {
2835 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2836 &q->state)) {
2837 __netif_reschedule(q);
2838 } else {
2839 smp_mb__before_clear_bit();
2840 clear_bit(__QDISC_STATE_SCHED,
2841 &q->state);
2842 }
2843 }
2844 }
2845 }
2846}
2847
2848#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2849 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
2850
2851int (*br_fdb_test_addr_hook)(struct net_device *dev,
2852 unsigned char *addr) __read_mostly;
2853EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2854#endif
2855
2856#ifdef CONFIG_NET_CLS_ACT
2857
2858
2859
2860
2861
2862
2863
2864
2865static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
2866{
2867 struct net_device *dev = skb->dev;
2868 u32 ttl = G_TC_RTTL(skb->tc_verd);
2869 int result = TC_ACT_OK;
2870 struct Qdisc *q;
2871
2872 if (unlikely(MAX_RED_LOOP < ttl++)) {
2873 if (net_ratelimit())
2874 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2875 skb->skb_iif, dev->ifindex);
2876 return TC_ACT_SHOT;
2877 }
2878
2879 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2880 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2881
2882 q = rxq->qdisc;
2883 if (q != &noop_qdisc) {
2884 spin_lock(qdisc_lock(q));
2885 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2886 result = qdisc_enqueue_root(skb, q);
2887 spin_unlock(qdisc_lock(q));
2888 }
2889
2890 return result;
2891}
2892
2893static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2894 struct packet_type **pt_prev,
2895 int *ret, struct net_device *orig_dev)
2896{
2897 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
2898
2899 if (!rxq || rxq->qdisc == &noop_qdisc)
2900 goto out;
2901
2902 if (*pt_prev) {
2903 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2904 *pt_prev = NULL;
2905 }
2906
2907 switch (ing_filter(skb, rxq)) {
2908 case TC_ACT_SHOT:
2909 case TC_ACT_STOLEN:
2910 kfree_skb(skb);
2911 return NULL;
2912 }
2913
2914out:
2915 skb->tc_verd = 0;
2916 return skb;
2917}
2918#endif
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932int netdev_rx_handler_register(struct net_device *dev,
2933 rx_handler_func_t *rx_handler,
2934 void *rx_handler_data)
2935{
2936 ASSERT_RTNL();
2937
2938 if (dev->rx_handler)
2939 return -EBUSY;
2940
2941 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
2942 rcu_assign_pointer(dev->rx_handler, rx_handler);
2943
2944 return 0;
2945}
2946EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956void netdev_rx_handler_unregister(struct net_device *dev)
2957{
2958
2959 ASSERT_RTNL();
2960 rcu_assign_pointer(dev->rx_handler, NULL);
2961 rcu_assign_pointer(dev->rx_handler_data, NULL);
2962}
2963EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2964
2965static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2966 struct net_device *master)
2967{
2968 if (skb->pkt_type == PACKET_HOST) {
2969 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2970
2971 memcpy(dest, master->dev_addr, ETH_ALEN);
2972 }
2973}
2974
2975
2976
2977
2978
2979int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2980{
2981 struct net_device *dev = skb->dev;
2982
2983 if (master->priv_flags & IFF_MASTER_ARPMON)
2984 dev->last_rx = jiffies;
2985
2986 if ((master->priv_flags & IFF_MASTER_ALB) &&
2987 (master->priv_flags & IFF_BRIDGE_PORT)) {
2988
2989
2990
2991
2992 skb_bond_set_mac_by_master(skb, master);
2993 }
2994
2995 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2996 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2997 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2998 return 0;
2999
3000 if (master->priv_flags & IFF_MASTER_ALB) {
3001 if (skb->pkt_type != PACKET_BROADCAST &&
3002 skb->pkt_type != PACKET_MULTICAST)
3003 return 0;
3004 }
3005 if (master->priv_flags & IFF_MASTER_8023AD &&
3006 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
3007 return 0;
3008
3009 return 1;
3010 }
3011 return 0;
3012}
3013EXPORT_SYMBOL(__skb_bond_should_drop);
3014
3015static int __netif_receive_skb(struct sk_buff *skb)
3016{
3017 struct packet_type *ptype, *pt_prev;
3018 rx_handler_func_t *rx_handler;
3019 struct net_device *orig_dev;
3020 struct net_device *master;
3021 struct net_device *null_or_orig;
3022 struct net_device *orig_or_bond;
3023 int ret = NET_RX_DROP;
3024 __be16 type;
3025
3026 if (!netdev_tstamp_prequeue)
3027 net_timestamp_check(skb);
3028
3029 trace_netif_receive_skb(skb);
3030
3031
3032 if (netpoll_receive_skb(skb))
3033 return NET_RX_DROP;
3034
3035 if (!skb->skb_iif)
3036 skb->skb_iif = skb->dev->ifindex;
3037
3038
3039
3040
3041
3042
3043
3044
3045 null_or_orig = NULL;
3046 orig_dev = skb->dev;
3047 master = ACCESS_ONCE(orig_dev->master);
3048 if (skb->deliver_no_wcard)
3049 null_or_orig = orig_dev;
3050 else if (master) {
3051 if (skb_bond_should_drop(skb, master)) {
3052 skb->deliver_no_wcard = 1;
3053 null_or_orig = orig_dev;
3054 } else
3055 skb->dev = master;
3056 }
3057
3058 __this_cpu_inc(softnet_data.processed);
3059 skb_reset_network_header(skb);
3060 skb_reset_transport_header(skb);
3061 skb->mac_len = skb->network_header - skb->mac_header;
3062
3063 pt_prev = NULL;
3064
3065 rcu_read_lock();
3066
3067#ifdef CONFIG_NET_CLS_ACT
3068 if (skb->tc_verd & TC_NCLS) {
3069 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3070 goto ncls;
3071 }
3072#endif
3073
3074 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3075 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
3076 ptype->dev == orig_dev) {
3077 if (pt_prev)
3078 ret = deliver_skb(skb, pt_prev, orig_dev);
3079 pt_prev = ptype;
3080 }
3081 }
3082
3083#ifdef CONFIG_NET_CLS_ACT
3084 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3085 if (!skb)
3086 goto out;
3087ncls:
3088#endif
3089
3090
3091 rx_handler = rcu_dereference(skb->dev->rx_handler);
3092 if (rx_handler) {
3093 if (pt_prev) {
3094 ret = deliver_skb(skb, pt_prev, orig_dev);
3095 pt_prev = NULL;
3096 }
3097 skb = rx_handler(skb);
3098 if (!skb)
3099 goto out;
3100 }
3101
3102 if (vlan_tx_tag_present(skb)) {
3103 if (pt_prev) {
3104 ret = deliver_skb(skb, pt_prev, orig_dev);
3105 pt_prev = NULL;
3106 }
3107 if (vlan_hwaccel_do_receive(&skb)) {
3108 ret = __netif_receive_skb(skb);
3109 goto out;
3110 } else if (unlikely(!skb))
3111 goto out;
3112 }
3113
3114
3115
3116
3117
3118
3119
3120 orig_or_bond = orig_dev;
3121 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
3122 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
3123 orig_or_bond = vlan_dev_real_dev(skb->dev);
3124 }
3125
3126 type = skb->protocol;
3127 list_for_each_entry_rcu(ptype,
3128 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3129 if (ptype->type == type && (ptype->dev == null_or_orig ||
3130 ptype->dev == skb->dev || ptype->dev == orig_dev ||
3131 ptype->dev == orig_or_bond)) {
3132 if (pt_prev)
3133 ret = deliver_skb(skb, pt_prev, orig_dev);
3134 pt_prev = ptype;
3135 }
3136 }
3137
3138 if (pt_prev) {
3139 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3140 } else {
3141 atomic_long_inc(&skb->dev->rx_dropped);
3142 kfree_skb(skb);
3143
3144
3145
3146 ret = NET_RX_DROP;
3147 }
3148
3149out:
3150 rcu_read_unlock();
3151 return ret;
3152}
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169int netif_receive_skb(struct sk_buff *skb)
3170{
3171 if (netdev_tstamp_prequeue)
3172 net_timestamp_check(skb);
3173
3174 if (skb_defer_rx_timestamp(skb))
3175 return NET_RX_SUCCESS;
3176
3177#ifdef CONFIG_RPS
3178 {
3179 struct rps_dev_flow voidflow, *rflow = &voidflow;
3180 int cpu, ret;
3181
3182 rcu_read_lock();
3183
3184 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3185
3186 if (cpu >= 0) {
3187 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3188 rcu_read_unlock();
3189 } else {
3190 rcu_read_unlock();
3191 ret = __netif_receive_skb(skb);
3192 }
3193
3194 return ret;
3195 }
3196#else
3197 return __netif_receive_skb(skb);
3198#endif
3199}
3200EXPORT_SYMBOL(netif_receive_skb);
3201
3202
3203
3204
3205static void flush_backlog(void *arg)
3206{
3207 struct net_device *dev = arg;
3208 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3209 struct sk_buff *skb, *tmp;
3210
3211 rps_lock(sd);
3212 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3213 if (skb->dev == dev) {
3214 __skb_unlink(skb, &sd->input_pkt_queue);
3215 kfree_skb(skb);
3216 input_queue_head_incr(sd);
3217 }
3218 }
3219 rps_unlock(sd);
3220
3221 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3222 if (skb->dev == dev) {
3223 __skb_unlink(skb, &sd->process_queue);
3224 kfree_skb(skb);
3225 input_queue_head_incr(sd);
3226 }
3227 }
3228}
3229
3230static int napi_gro_complete(struct sk_buff *skb)
3231{
3232 struct packet_type *ptype;
3233 __be16 type = skb->protocol;
3234 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3235 int err = -ENOENT;
3236
3237 if (NAPI_GRO_CB(skb)->count == 1) {
3238 skb_shinfo(skb)->gso_size = 0;
3239 goto out;
3240 }
3241
3242 rcu_read_lock();
3243 list_for_each_entry_rcu(ptype, head, list) {
3244 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3245 continue;
3246
3247 err = ptype->gro_complete(skb);
3248 break;
3249 }
3250 rcu_read_unlock();
3251
3252 if (err) {
3253 WARN_ON(&ptype->list == head);
3254 kfree_skb(skb);
3255 return NET_RX_SUCCESS;
3256 }
3257
3258out:
3259 return netif_receive_skb(skb);
3260}
3261
3262inline void napi_gro_flush(struct napi_struct *napi)
3263{
3264 struct sk_buff *skb, *next;
3265
3266 for (skb = napi->gro_list; skb; skb = next) {
3267 next = skb->next;
3268 skb->next = NULL;
3269 napi_gro_complete(skb);
3270 }
3271
3272 napi->gro_count = 0;
3273 napi->gro_list = NULL;
3274}
3275EXPORT_SYMBOL(napi_gro_flush);
3276
3277enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3278{
3279 struct sk_buff **pp = NULL;
3280 struct packet_type *ptype;
3281 __be16 type = skb->protocol;
3282 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3283 int same_flow;
3284 int mac_len;
3285 enum gro_result ret;
3286
3287 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3288 goto normal;
3289
3290 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3291 goto normal;
3292
3293 rcu_read_lock();
3294 list_for_each_entry_rcu(ptype, head, list) {
3295 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3296 continue;
3297
3298 skb_set_network_header(skb, skb_gro_offset(skb));
3299 mac_len = skb->network_header - skb->mac_header;
3300 skb->mac_len = mac_len;
3301 NAPI_GRO_CB(skb)->same_flow = 0;
3302 NAPI_GRO_CB(skb)->flush = 0;
3303 NAPI_GRO_CB(skb)->free = 0;
3304
3305 pp = ptype->gro_receive(&napi->gro_list, skb);
3306 break;
3307 }
3308 rcu_read_unlock();
3309
3310 if (&ptype->list == head)
3311 goto normal;
3312
3313 same_flow = NAPI_GRO_CB(skb)->same_flow;
3314 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3315
3316 if (pp) {
3317 struct sk_buff *nskb = *pp;
3318
3319 *pp = nskb->next;
3320 nskb->next = NULL;
3321 napi_gro_complete(nskb);
3322 napi->gro_count--;
3323 }
3324
3325 if (same_flow)
3326 goto ok;
3327
3328 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3329 goto normal;
3330
3331 napi->gro_count++;
3332 NAPI_GRO_CB(skb)->count = 1;
3333 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3334 skb->next = napi->gro_list;
3335 napi->gro_list = skb;
3336 ret = GRO_HELD;
3337
3338pull:
3339 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3340 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3341
3342 BUG_ON(skb->end - skb->tail < grow);
3343
3344 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3345
3346 skb->tail += grow;
3347 skb->data_len -= grow;
3348
3349 skb_shinfo(skb)->frags[0].page_offset += grow;
3350 skb_shinfo(skb)->frags[0].size -= grow;
3351
3352 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3353 put_page(skb_shinfo(skb)->frags[0].page);
3354 memmove(skb_shinfo(skb)->frags,
3355 skb_shinfo(skb)->frags + 1,
3356 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3357 }
3358 }
3359
3360ok:
3361 return ret;
3362
3363normal:
3364 ret = GRO_NORMAL;
3365 goto pull;
3366}
3367EXPORT_SYMBOL(dev_gro_receive);
3368
3369static inline gro_result_t
3370__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3371{
3372 struct sk_buff *p;
3373
3374 for (p = napi->gro_list; p; p = p->next) {
3375 unsigned long diffs;
3376
3377 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3378 diffs |= p->vlan_tci ^ skb->vlan_tci;
3379 diffs |= compare_ether_header(skb_mac_header(p),
3380 skb_gro_mac_header(skb));
3381 NAPI_GRO_CB(p)->same_flow = !diffs;
3382 NAPI_GRO_CB(p)->flush = 0;
3383 }
3384
3385 return dev_gro_receive(napi, skb);
3386}
3387
3388gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3389{
3390 switch (ret) {
3391 case GRO_NORMAL:
3392 if (netif_receive_skb(skb))
3393 ret = GRO_DROP;
3394 break;
3395
3396 case GRO_DROP:
3397 case GRO_MERGED_FREE:
3398 kfree_skb(skb);
3399 break;
3400
3401 case GRO_HELD:
3402 case GRO_MERGED:
3403 break;
3404 }
3405
3406 return ret;
3407}
3408EXPORT_SYMBOL(napi_skb_finish);
3409
3410void skb_gro_reset_offset(struct sk_buff *skb)
3411{
3412 NAPI_GRO_CB(skb)->data_offset = 0;
3413 NAPI_GRO_CB(skb)->frag0 = NULL;
3414 NAPI_GRO_CB(skb)->frag0_len = 0;
3415
3416 if (skb->mac_header == skb->tail &&
3417 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
3418 NAPI_GRO_CB(skb)->frag0 =
3419 page_address(skb_shinfo(skb)->frags[0].page) +
3420 skb_shinfo(skb)->frags[0].page_offset;
3421 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3422 }
3423}
3424EXPORT_SYMBOL(skb_gro_reset_offset);
3425
3426gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3427{
3428 skb_gro_reset_offset(skb);
3429
3430 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3431}
3432EXPORT_SYMBOL(napi_gro_receive);
3433
3434static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3435{
3436 __skb_pull(skb, skb_headlen(skb));
3437 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3438 skb->vlan_tci = 0;
3439 skb->dev = napi->dev;
3440 skb->skb_iif = 0;
3441
3442 napi->skb = skb;
3443}
3444
3445struct sk_buff *napi_get_frags(struct napi_struct *napi)
3446{
3447 struct sk_buff *skb = napi->skb;
3448
3449 if (!skb) {
3450 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3451 if (skb)
3452 napi->skb = skb;
3453 }
3454 return skb;
3455}
3456EXPORT_SYMBOL(napi_get_frags);
3457
3458gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3459 gro_result_t ret)
3460{
3461 switch (ret) {
3462 case GRO_NORMAL:
3463 case GRO_HELD:
3464 skb->protocol = eth_type_trans(skb, skb->dev);
3465
3466 if (ret == GRO_HELD)
3467 skb_gro_pull(skb, -ETH_HLEN);
3468 else if (netif_receive_skb(skb))
3469 ret = GRO_DROP;
3470 break;
3471
3472 case GRO_DROP:
3473 case GRO_MERGED_FREE:
3474 napi_reuse_skb(napi, skb);
3475 break;
3476
3477 case GRO_MERGED:
3478 break;
3479 }
3480
3481 return ret;
3482}
3483EXPORT_SYMBOL(napi_frags_finish);
3484
3485struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3486{
3487 struct sk_buff *skb = napi->skb;
3488 struct ethhdr *eth;
3489 unsigned int hlen;
3490 unsigned int off;
3491
3492 napi->skb = NULL;
3493
3494 skb_reset_mac_header(skb);
3495 skb_gro_reset_offset(skb);
3496
3497 off = skb_gro_offset(skb);
3498 hlen = off + sizeof(*eth);
3499 eth = skb_gro_header_fast(skb, off);
3500 if (skb_gro_header_hard(skb, hlen)) {
3501 eth = skb_gro_header_slow(skb, hlen, off);
3502 if (unlikely(!eth)) {
3503 napi_reuse_skb(napi, skb);
3504 skb = NULL;
3505 goto out;
3506 }
3507 }
3508
3509 skb_gro_pull(skb, sizeof(*eth));
3510
3511
3512
3513
3514
3515 skb->protocol = eth->h_proto;
3516
3517out:
3518 return skb;
3519}
3520EXPORT_SYMBOL(napi_frags_skb);
3521
3522gro_result_t napi_gro_frags(struct napi_struct *napi)
3523{
3524 struct sk_buff *skb = napi_frags_skb(napi);
3525
3526 if (!skb)
3527 return GRO_DROP;
3528
3529 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3530}
3531EXPORT_SYMBOL(napi_gro_frags);
3532
3533
3534
3535
3536
3537static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3538{
3539#ifdef CONFIG_RPS
3540 struct softnet_data *remsd = sd->rps_ipi_list;
3541
3542 if (remsd) {
3543 sd->rps_ipi_list = NULL;
3544
3545 local_irq_enable();
3546
3547
3548 while (remsd) {
3549 struct softnet_data *next = remsd->rps_ipi_next;
3550
3551 if (cpu_online(remsd->cpu))
3552 __smp_call_function_single(remsd->cpu,
3553 &remsd->csd, 0);
3554 remsd = next;
3555 }
3556 } else
3557#endif
3558 local_irq_enable();
3559}
3560
3561static int process_backlog(struct napi_struct *napi, int quota)
3562{
3563 int work = 0;
3564 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3565
3566#ifdef CONFIG_RPS
3567
3568
3569
3570 if (sd->rps_ipi_list) {
3571 local_irq_disable();
3572 net_rps_action_and_irq_enable(sd);
3573 }
3574#endif
3575 napi->weight = weight_p;
3576 local_irq_disable();
3577 while (work < quota) {
3578 struct sk_buff *skb;
3579 unsigned int qlen;
3580
3581 while ((skb = __skb_dequeue(&sd->process_queue))) {
3582 local_irq_enable();
3583 __netif_receive_skb(skb);
3584 local_irq_disable();
3585 input_queue_head_incr(sd);
3586 if (++work >= quota) {
3587 local_irq_enable();
3588 return work;
3589 }
3590 }
3591
3592 rps_lock(sd);
3593 qlen = skb_queue_len(&sd->input_pkt_queue);
3594 if (qlen)
3595 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3596 &sd->process_queue);
3597
3598 if (qlen < quota - work) {
3599
3600
3601
3602
3603
3604
3605
3606 list_del(&napi->poll_list);
3607 napi->state = 0;
3608
3609 quota = work + qlen;
3610 }
3611 rps_unlock(sd);
3612 }
3613 local_irq_enable();
3614
3615 return work;
3616}
3617
3618
3619
3620
3621
3622
3623
3624void __napi_schedule(struct napi_struct *n)
3625{
3626 unsigned long flags;
3627
3628 local_irq_save(flags);
3629 ____napi_schedule(&__get_cpu_var(softnet_data), n);
3630 local_irq_restore(flags);
3631}
3632EXPORT_SYMBOL(__napi_schedule);
3633
3634void __napi_complete(struct napi_struct *n)
3635{
3636 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3637 BUG_ON(n->gro_list);
3638
3639 list_del(&n->poll_list);
3640 smp_mb__before_clear_bit();
3641 clear_bit(NAPI_STATE_SCHED, &n->state);
3642}
3643EXPORT_SYMBOL(__napi_complete);
3644
3645void napi_complete(struct napi_struct *n)
3646{
3647 unsigned long flags;
3648
3649
3650
3651
3652
3653 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3654 return;
3655
3656 napi_gro_flush(n);
3657 local_irq_save(flags);
3658 __napi_complete(n);
3659 local_irq_restore(flags);
3660}
3661EXPORT_SYMBOL(napi_complete);
3662
3663void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3664 int (*poll)(struct napi_struct *, int), int weight)
3665{
3666 INIT_LIST_HEAD(&napi->poll_list);
3667 napi->gro_count = 0;
3668 napi->gro_list = NULL;
3669 napi->skb = NULL;
3670 napi->poll = poll;
3671 napi->weight = weight;
3672 list_add(&napi->dev_list, &dev->napi_list);
3673 napi->dev = dev;
3674#ifdef CONFIG_NETPOLL
3675 spin_lock_init(&napi->poll_lock);
3676 napi->poll_owner = -1;
3677#endif
3678 set_bit(NAPI_STATE_SCHED, &napi->state);
3679}
3680EXPORT_SYMBOL(netif_napi_add);
3681
3682void netif_napi_del(struct napi_struct *napi)
3683{
3684 struct sk_buff *skb, *next;
3685
3686 list_del_init(&napi->dev_list);
3687 napi_free_frags(napi);
3688
3689 for (skb = napi->gro_list; skb; skb = next) {
3690 next = skb->next;
3691 skb->next = NULL;
3692 kfree_skb(skb);
3693 }
3694
3695 napi->gro_list = NULL;
3696 napi->gro_count = 0;
3697}
3698EXPORT_SYMBOL(netif_napi_del);
3699
3700static void net_rx_action(struct softirq_action *h)
3701{
3702 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3703 unsigned long time_limit = jiffies + 2;
3704 int budget = netdev_budget;
3705 void *have;
3706
3707 local_irq_disable();
3708
3709 while (!list_empty(&sd->poll_list)) {
3710 struct napi_struct *n;
3711 int work, weight;
3712
3713
3714
3715
3716
3717 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3718 goto softnet_break;
3719
3720 local_irq_enable();
3721
3722
3723
3724
3725
3726
3727 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3728
3729 have = netpoll_poll_lock(n);
3730
3731 weight = n->weight;
3732
3733
3734
3735
3736
3737
3738
3739 work = 0;
3740 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3741 work = n->poll(n, weight);
3742 trace_napi_poll(n);
3743 }
3744
3745 WARN_ON_ONCE(work > weight);
3746
3747 budget -= work;
3748
3749 local_irq_disable();
3750
3751
3752
3753
3754
3755
3756 if (unlikely(work == weight)) {
3757 if (unlikely(napi_disable_pending(n))) {
3758 local_irq_enable();
3759 napi_complete(n);
3760 local_irq_disable();
3761 } else
3762 list_move_tail(&n->poll_list, &sd->poll_list);
3763 }
3764
3765 netpoll_poll_unlock(have);
3766 }
3767out:
3768 net_rps_action_and_irq_enable(sd);
3769
3770#ifdef CONFIG_NET_DMA
3771
3772
3773
3774
3775 dma_issue_pending_all();
3776#endif
3777
3778 return;
3779
3780softnet_break:
3781 sd->time_squeeze++;
3782 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3783 goto out;
3784}
3785
3786static gifconf_func_t *gifconf_list[NPROTO];
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3798{
3799 if (family >= NPROTO)
3800 return -EINVAL;
3801 gifconf_list[family] = gifconf;
3802 return 0;
3803}
3804EXPORT_SYMBOL(register_gifconf);
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818static int dev_ifname(struct net *net, struct ifreq __user *arg)
3819{
3820 struct net_device *dev;
3821 struct ifreq ifr;
3822
3823
3824
3825
3826
3827 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3828 return -EFAULT;
3829
3830 rcu_read_lock();
3831 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3832 if (!dev) {
3833 rcu_read_unlock();
3834 return -ENODEV;
3835 }
3836
3837 strcpy(ifr.ifr_name, dev->name);
3838 rcu_read_unlock();
3839
3840 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3841 return -EFAULT;
3842 return 0;
3843}
3844
3845
3846
3847
3848
3849
3850
3851static int dev_ifconf(struct net *net, char __user *arg)
3852{
3853 struct ifconf ifc;
3854 struct net_device *dev;
3855 char __user *pos;
3856 int len;
3857 int total;
3858 int i;
3859
3860
3861
3862
3863
3864 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3865 return -EFAULT;
3866
3867 pos = ifc.ifc_buf;
3868 len = ifc.ifc_len;
3869
3870
3871
3872
3873
3874 total = 0;
3875 for_each_netdev(net, dev) {
3876 for (i = 0; i < NPROTO; i++) {
3877 if (gifconf_list[i]) {
3878 int done;
3879 if (!pos)
3880 done = gifconf_list[i](dev, NULL, 0);
3881 else
3882 done = gifconf_list[i](dev, pos + total,
3883 len - total);
3884 if (done < 0)
3885 return -EFAULT;
3886 total += done;
3887 }
3888 }
3889 }
3890
3891
3892
3893
3894 ifc.ifc_len = total;
3895
3896
3897
3898
3899 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3900}
3901
3902#ifdef CONFIG_PROC_FS
3903
3904
3905
3906
3907void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3908 __acquires(RCU)
3909{
3910 struct net *net = seq_file_net(seq);
3911 loff_t off;
3912 struct net_device *dev;
3913
3914 rcu_read_lock();
3915 if (!*pos)
3916 return SEQ_START_TOKEN;
3917
3918 off = 1;
3919 for_each_netdev_rcu(net, dev)
3920 if (off++ == *pos)
3921 return dev;
3922
3923 return NULL;
3924}
3925
3926void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3927{
3928 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3929 first_net_device(seq_file_net(seq)) :
3930 next_net_device((struct net_device *)v);
3931
3932 ++*pos;
3933 return rcu_dereference(dev);
3934}
3935
3936void dev_seq_stop(struct seq_file *seq, void *v)
3937 __releases(RCU)
3938{
3939 rcu_read_unlock();
3940}
3941
3942static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3943{
3944 struct rtnl_link_stats64 temp;
3945 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
3946
3947 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
3948 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
3949 dev->name, stats->rx_bytes, stats->rx_packets,
3950 stats->rx_errors,
3951 stats->rx_dropped + stats->rx_missed_errors,
3952 stats->rx_fifo_errors,
3953 stats->rx_length_errors + stats->rx_over_errors +
3954 stats->rx_crc_errors + stats->rx_frame_errors,
3955 stats->rx_compressed, stats->multicast,
3956 stats->tx_bytes, stats->tx_packets,
3957 stats->tx_errors, stats->tx_dropped,
3958 stats->tx_fifo_errors, stats->collisions,
3959 stats->tx_carrier_errors +
3960 stats->tx_aborted_errors +
3961 stats->tx_window_errors +
3962 stats->tx_heartbeat_errors,
3963 stats->tx_compressed);
3964}
3965
3966
3967
3968
3969
3970static int dev_seq_show(struct seq_file *seq, void *v)
3971{
3972 if (v == SEQ_START_TOKEN)
3973 seq_puts(seq, "Inter-| Receive "
3974 " | Transmit\n"
3975 " face |bytes packets errs drop fifo frame "
3976 "compressed multicast|bytes packets errs "
3977 "drop fifo colls carrier compressed\n");
3978 else
3979 dev_seq_printf_stats(seq, v);
3980 return 0;
3981}
3982
3983static struct softnet_data *softnet_get_online(loff_t *pos)
3984{
3985 struct softnet_data *sd = NULL;
3986
3987 while (*pos < nr_cpu_ids)
3988 if (cpu_online(*pos)) {
3989 sd = &per_cpu(softnet_data, *pos);
3990 break;
3991 } else
3992 ++*pos;
3993 return sd;
3994}
3995
3996static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3997{
3998 return softnet_get_online(pos);
3999}
4000
4001static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4002{
4003 ++*pos;
4004 return softnet_get_online(pos);
4005}
4006
4007static void softnet_seq_stop(struct seq_file *seq, void *v)
4008{
4009}
4010
4011static int softnet_seq_show(struct seq_file *seq, void *v)
4012{
4013 struct softnet_data *sd = v;
4014
4015 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4016 sd->processed, sd->dropped, sd->time_squeeze, 0,
4017 0, 0, 0, 0,
4018 sd->cpu_collision, sd->received_rps);
4019 return 0;
4020}
4021
4022static const struct seq_operations dev_seq_ops = {
4023 .start = dev_seq_start,
4024 .next = dev_seq_next,
4025 .stop = dev_seq_stop,
4026 .show = dev_seq_show,
4027};
4028
4029static int dev_seq_open(struct inode *inode, struct file *file)
4030{
4031 return seq_open_net(inode, file, &dev_seq_ops,
4032 sizeof(struct seq_net_private));
4033}
4034
4035static const struct file_operations dev_seq_fops = {
4036 .owner = THIS_MODULE,
4037 .open = dev_seq_open,
4038 .read = seq_read,
4039 .llseek = seq_lseek,
4040 .release = seq_release_net,
4041};
4042
4043static const struct seq_operations softnet_seq_ops = {
4044 .start = softnet_seq_start,
4045 .next = softnet_seq_next,
4046 .stop = softnet_seq_stop,
4047 .show = softnet_seq_show,
4048};
4049
4050static int softnet_seq_open(struct inode *inode, struct file *file)
4051{
4052 return seq_open(file, &softnet_seq_ops);
4053}
4054
4055static const struct file_operations softnet_seq_fops = {
4056 .owner = THIS_MODULE,
4057 .open = softnet_seq_open,
4058 .read = seq_read,
4059 .llseek = seq_lseek,
4060 .release = seq_release,
4061};
4062
4063static void *ptype_get_idx(loff_t pos)
4064{
4065 struct packet_type *pt = NULL;
4066 loff_t i = 0;
4067 int t;
4068
4069 list_for_each_entry_rcu(pt, &ptype_all, list) {
4070 if (i == pos)
4071 return pt;
4072 ++i;
4073 }
4074
4075 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4076 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4077 if (i == pos)
4078 return pt;
4079 ++i;
4080 }
4081 }
4082 return NULL;
4083}
4084
4085static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4086 __acquires(RCU)
4087{
4088 rcu_read_lock();
4089 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4090}
4091
4092static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4093{
4094 struct packet_type *pt;
4095 struct list_head *nxt;
4096 int hash;
4097
4098 ++*pos;
4099 if (v == SEQ_START_TOKEN)
4100 return ptype_get_idx(0);
4101
4102 pt = v;
4103 nxt = pt->list.next;
4104 if (pt->type == htons(ETH_P_ALL)) {
4105 if (nxt != &ptype_all)
4106 goto found;
4107 hash = 0;
4108 nxt = ptype_base[0].next;
4109 } else
4110 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4111
4112 while (nxt == &ptype_base[hash]) {
4113 if (++hash >= PTYPE_HASH_SIZE)
4114 return NULL;
4115 nxt = ptype_base[hash].next;
4116 }
4117found:
4118 return list_entry(nxt, struct packet_type, list);
4119}
4120
4121static void ptype_seq_stop(struct seq_file *seq, void *v)
4122 __releases(RCU)
4123{
4124 rcu_read_unlock();
4125}
4126
4127static int ptype_seq_show(struct seq_file *seq, void *v)
4128{
4129 struct packet_type *pt = v;
4130
4131 if (v == SEQ_START_TOKEN)
4132 seq_puts(seq, "Type Device Function\n");
4133 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4134 if (pt->type == htons(ETH_P_ALL))
4135 seq_puts(seq, "ALL ");
4136 else
4137 seq_printf(seq, "%04x", ntohs(pt->type));
4138
4139 seq_printf(seq, " %-8s %pF\n",
4140 pt->dev ? pt->dev->name : "", pt->func);
4141 }
4142
4143 return 0;
4144}
4145
4146static const struct seq_operations ptype_seq_ops = {
4147 .start = ptype_seq_start,
4148 .next = ptype_seq_next,
4149 .stop = ptype_seq_stop,
4150 .show = ptype_seq_show,
4151};
4152
4153static int ptype_seq_open(struct inode *inode, struct file *file)
4154{
4155 return seq_open_net(inode, file, &ptype_seq_ops,
4156 sizeof(struct seq_net_private));
4157}
4158
4159static const struct file_operations ptype_seq_fops = {
4160 .owner = THIS_MODULE,
4161 .open = ptype_seq_open,
4162 .read = seq_read,
4163 .llseek = seq_lseek,
4164 .release = seq_release_net,
4165};
4166
4167
4168static int __net_init dev_proc_net_init(struct net *net)
4169{
4170 int rc = -ENOMEM;
4171
4172 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4173 goto out;
4174 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4175 goto out_dev;
4176 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4177 goto out_softnet;
4178
4179 if (wext_proc_init(net))
4180 goto out_ptype;
4181 rc = 0;
4182out:
4183 return rc;
4184out_ptype:
4185 proc_net_remove(net, "ptype");
4186out_softnet:
4187 proc_net_remove(net, "softnet_stat");
4188out_dev:
4189 proc_net_remove(net, "dev");
4190 goto out;
4191}
4192
4193static void __net_exit dev_proc_net_exit(struct net *net)
4194{
4195 wext_proc_exit(net);
4196
4197 proc_net_remove(net, "ptype");
4198 proc_net_remove(net, "softnet_stat");
4199 proc_net_remove(net, "dev");
4200}
4201
4202static struct pernet_operations __net_initdata dev_proc_ops = {
4203 .init = dev_proc_net_init,
4204 .exit = dev_proc_net_exit,
4205};
4206
4207static int __init dev_proc_init(void)
4208{
4209 return register_pernet_subsys(&dev_proc_ops);
4210}
4211#else
4212#define dev_proc_init() 0
4213#endif
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227int netdev_set_master(struct net_device *slave, struct net_device *master)
4228{
4229 struct net_device *old = slave->master;
4230
4231 ASSERT_RTNL();
4232
4233 if (master) {
4234 if (old)
4235 return -EBUSY;
4236 dev_hold(master);
4237 }
4238
4239 slave->master = master;
4240
4241 if (old) {
4242 synchronize_net();
4243 dev_put(old);
4244 }
4245 if (master)
4246 slave->flags |= IFF_SLAVE;
4247 else
4248 slave->flags &= ~IFF_SLAVE;
4249
4250 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4251 return 0;
4252}
4253EXPORT_SYMBOL(netdev_set_master);
4254
4255static void dev_change_rx_flags(struct net_device *dev, int flags)
4256{
4257 const struct net_device_ops *ops = dev->netdev_ops;
4258
4259 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4260 ops->ndo_change_rx_flags(dev, flags);
4261}
4262
4263static int __dev_set_promiscuity(struct net_device *dev, int inc)
4264{
4265 unsigned short old_flags = dev->flags;
4266 uid_t uid;
4267 gid_t gid;
4268
4269 ASSERT_RTNL();
4270
4271 dev->flags |= IFF_PROMISC;
4272 dev->promiscuity += inc;
4273 if (dev->promiscuity == 0) {
4274
4275
4276
4277
4278 if (inc < 0)
4279 dev->flags &= ~IFF_PROMISC;
4280 else {
4281 dev->promiscuity -= inc;
4282 printk(KERN_WARNING "%s: promiscuity touches roof, "
4283 "set promiscuity failed, promiscuity feature "
4284 "of device might be broken.\n", dev->name);
4285 return -EOVERFLOW;
4286 }
4287 }
4288 if (dev->flags != old_flags) {
4289 printk(KERN_INFO "device %s %s promiscuous mode\n",
4290 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4291 "left");
4292 if (audit_enabled) {
4293 current_uid_gid(&uid, &gid);
4294 audit_log(current->audit_context, GFP_ATOMIC,
4295 AUDIT_ANOM_PROMISCUOUS,
4296 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4297 dev->name, (dev->flags & IFF_PROMISC),
4298 (old_flags & IFF_PROMISC),
4299 audit_get_loginuid(current),
4300 uid, gid,
4301 audit_get_sessionid(current));
4302 }
4303
4304 dev_change_rx_flags(dev, IFF_PROMISC);
4305 }
4306 return 0;
4307}
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320int dev_set_promiscuity(struct net_device *dev, int inc)
4321{
4322 unsigned short old_flags = dev->flags;
4323 int err;
4324
4325 err = __dev_set_promiscuity(dev, inc);
4326 if (err < 0)
4327 return err;
4328 if (dev->flags != old_flags)
4329 dev_set_rx_mode(dev);
4330 return err;
4331}
4332EXPORT_SYMBOL(dev_set_promiscuity);
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347int dev_set_allmulti(struct net_device *dev, int inc)
4348{
4349 unsigned short old_flags = dev->flags;
4350
4351 ASSERT_RTNL();
4352
4353 dev->flags |= IFF_ALLMULTI;
4354 dev->allmulti += inc;
4355 if (dev->allmulti == 0) {
4356
4357
4358
4359
4360 if (inc < 0)
4361 dev->flags &= ~IFF_ALLMULTI;
4362 else {
4363 dev->allmulti -= inc;
4364 printk(KERN_WARNING "%s: allmulti touches roof, "
4365 "set allmulti failed, allmulti feature of "
4366 "device might be broken.\n", dev->name);
4367 return -EOVERFLOW;
4368 }
4369 }
4370 if (dev->flags ^ old_flags) {
4371 dev_change_rx_flags(dev, IFF_ALLMULTI);
4372 dev_set_rx_mode(dev);
4373 }
4374 return 0;
4375}
4376EXPORT_SYMBOL(dev_set_allmulti);
4377
4378
4379
4380
4381
4382
4383
4384void __dev_set_rx_mode(struct net_device *dev)
4385{
4386 const struct net_device_ops *ops = dev->netdev_ops;
4387
4388
4389 if (!(dev->flags&IFF_UP))
4390 return;
4391
4392 if (!netif_device_present(dev))
4393 return;
4394
4395 if (ops->ndo_set_rx_mode)
4396 ops->ndo_set_rx_mode(dev);
4397 else {
4398
4399
4400
4401 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4402 __dev_set_promiscuity(dev, 1);
4403 dev->uc_promisc = 1;
4404 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4405 __dev_set_promiscuity(dev, -1);
4406 dev->uc_promisc = 0;
4407 }
4408
4409 if (ops->ndo_set_multicast_list)
4410 ops->ndo_set_multicast_list(dev);
4411 }
4412}
4413
4414void dev_set_rx_mode(struct net_device *dev)
4415{
4416 netif_addr_lock_bh(dev);
4417 __dev_set_rx_mode(dev);
4418 netif_addr_unlock_bh(dev);
4419}
4420
4421
4422
4423
4424
4425
4426
4427unsigned dev_get_flags(const struct net_device *dev)
4428{
4429 unsigned flags;
4430
4431 flags = (dev->flags & ~(IFF_PROMISC |
4432 IFF_ALLMULTI |
4433 IFF_RUNNING |
4434 IFF_LOWER_UP |
4435 IFF_DORMANT)) |
4436 (dev->gflags & (IFF_PROMISC |
4437 IFF_ALLMULTI));
4438
4439 if (netif_running(dev)) {
4440 if (netif_oper_up(dev))
4441 flags |= IFF_RUNNING;
4442 if (netif_carrier_ok(dev))
4443 flags |= IFF_LOWER_UP;
4444 if (netif_dormant(dev))
4445 flags |= IFF_DORMANT;
4446 }
4447
4448 return flags;
4449}
4450EXPORT_SYMBOL(dev_get_flags);
4451
4452int __dev_change_flags(struct net_device *dev, unsigned int flags)
4453{
4454 int old_flags = dev->flags;
4455 int ret;
4456
4457 ASSERT_RTNL();
4458
4459
4460
4461
4462
4463 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4464 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4465 IFF_AUTOMEDIA)) |
4466 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4467 IFF_ALLMULTI));
4468
4469
4470
4471
4472
4473 if ((old_flags ^ flags) & IFF_MULTICAST)
4474 dev_change_rx_flags(dev, IFF_MULTICAST);
4475
4476 dev_set_rx_mode(dev);
4477
4478
4479
4480
4481
4482
4483
4484 ret = 0;
4485 if ((old_flags ^ flags) & IFF_UP) {
4486 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4487
4488 if (!ret)
4489 dev_set_rx_mode(dev);
4490 }
4491
4492 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4493 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4494
4495 dev->gflags ^= IFF_PROMISC;
4496 dev_set_promiscuity(dev, inc);
4497 }
4498
4499
4500
4501
4502
4503 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4504 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4505
4506 dev->gflags ^= IFF_ALLMULTI;
4507 dev_set_allmulti(dev, inc);
4508 }
4509
4510 return ret;
4511}
4512
4513void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4514{
4515 unsigned int changes = dev->flags ^ old_flags;
4516
4517 if (changes & IFF_UP) {
4518 if (dev->flags & IFF_UP)
4519 call_netdevice_notifiers(NETDEV_UP, dev);
4520 else
4521 call_netdevice_notifiers(NETDEV_DOWN, dev);
4522 }
4523
4524 if (dev->flags & IFF_UP &&
4525 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4526 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4527}
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537int dev_change_flags(struct net_device *dev, unsigned flags)
4538{
4539 int ret, changes;
4540 int old_flags = dev->flags;
4541
4542 ret = __dev_change_flags(dev, flags);
4543 if (ret < 0)
4544 return ret;
4545
4546 changes = old_flags ^ dev->flags;
4547 if (changes)
4548 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4549
4550 __dev_notify_flags(dev, old_flags);
4551 return ret;
4552}
4553EXPORT_SYMBOL(dev_change_flags);
4554
4555
4556
4557
4558
4559
4560
4561
4562int dev_set_mtu(struct net_device *dev, int new_mtu)
4563{
4564 const struct net_device_ops *ops = dev->netdev_ops;
4565 int err;
4566
4567 if (new_mtu == dev->mtu)
4568 return 0;
4569
4570
4571 if (new_mtu < 0)
4572 return -EINVAL;
4573
4574 if (!netif_device_present(dev))
4575 return -ENODEV;
4576
4577 err = 0;
4578 if (ops->ndo_change_mtu)
4579 err = ops->ndo_change_mtu(dev, new_mtu);
4580 else
4581 dev->mtu = new_mtu;
4582
4583 if (!err && dev->flags & IFF_UP)
4584 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4585 return err;
4586}
4587EXPORT_SYMBOL(dev_set_mtu);
4588
4589
4590
4591
4592
4593
4594
4595
4596int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4597{
4598 const struct net_device_ops *ops = dev->netdev_ops;
4599 int err;
4600
4601 if (!ops->ndo_set_mac_address)
4602 return -EOPNOTSUPP;
4603 if (sa->sa_family != dev->type)
4604 return -EINVAL;
4605 if (!netif_device_present(dev))
4606 return -ENODEV;
4607 err = ops->ndo_set_mac_address(dev, sa);
4608 if (!err)
4609 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4610 return err;
4611}
4612EXPORT_SYMBOL(dev_set_mac_address);
4613
4614
4615
4616
4617static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4618{
4619 int err;
4620 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4621
4622 if (!dev)
4623 return -ENODEV;
4624
4625 switch (cmd) {
4626 case SIOCGIFFLAGS:
4627 ifr->ifr_flags = (short) dev_get_flags(dev);
4628 return 0;
4629
4630 case SIOCGIFMETRIC:
4631
4632 ifr->ifr_metric = 0;
4633 return 0;
4634
4635 case SIOCGIFMTU:
4636 ifr->ifr_mtu = dev->mtu;
4637 return 0;
4638
4639 case SIOCGIFHWADDR:
4640 if (!dev->addr_len)
4641 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4642 else
4643 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4644 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4645 ifr->ifr_hwaddr.sa_family = dev->type;
4646 return 0;
4647
4648 case SIOCGIFSLAVE:
4649 err = -EINVAL;
4650 break;
4651
4652 case SIOCGIFMAP:
4653 ifr->ifr_map.mem_start = dev->mem_start;
4654 ifr->ifr_map.mem_end = dev->mem_end;
4655 ifr->ifr_map.base_addr = dev->base_addr;
4656 ifr->ifr_map.irq = dev->irq;
4657 ifr->ifr_map.dma = dev->dma;
4658 ifr->ifr_map.port = dev->if_port;
4659 return 0;
4660
4661 case SIOCGIFINDEX:
4662 ifr->ifr_ifindex = dev->ifindex;
4663 return 0;
4664
4665 case SIOCGIFTXQLEN:
4666 ifr->ifr_qlen = dev->tx_queue_len;
4667 return 0;
4668
4669 default:
4670
4671
4672
4673 WARN_ON(1);
4674 err = -EINVAL;
4675 break;
4676
4677 }
4678 return err;
4679}
4680
4681
4682
4683
4684static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4685{
4686 int err;
4687 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4688 const struct net_device_ops *ops;
4689
4690 if (!dev)
4691 return -ENODEV;
4692
4693 ops = dev->netdev_ops;
4694
4695 switch (cmd) {
4696 case SIOCSIFFLAGS:
4697 return dev_change_flags(dev, ifr->ifr_flags);
4698
4699 case SIOCSIFMETRIC:
4700
4701 return -EOPNOTSUPP;
4702
4703 case SIOCSIFMTU:
4704 return dev_set_mtu(dev, ifr->ifr_mtu);
4705
4706 case SIOCSIFHWADDR:
4707 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4708
4709 case SIOCSIFHWBROADCAST:
4710 if (ifr->ifr_hwaddr.sa_family != dev->type)
4711 return -EINVAL;
4712 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4713 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4714 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4715 return 0;
4716
4717 case SIOCSIFMAP:
4718 if (ops->ndo_set_config) {
4719 if (!netif_device_present(dev))
4720 return -ENODEV;
4721 return ops->ndo_set_config(dev, &ifr->ifr_map);
4722 }
4723 return -EOPNOTSUPP;
4724
4725 case SIOCADDMULTI:
4726 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4727 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4728 return -EINVAL;
4729 if (!netif_device_present(dev))
4730 return -ENODEV;
4731 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4732
4733 case SIOCDELMULTI:
4734 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4735 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4736 return -EINVAL;
4737 if (!netif_device_present(dev))
4738 return -ENODEV;
4739 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4740
4741 case SIOCSIFTXQLEN:
4742 if (ifr->ifr_qlen < 0)
4743 return -EINVAL;
4744 dev->tx_queue_len = ifr->ifr_qlen;
4745 return 0;
4746
4747 case SIOCSIFNAME:
4748 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4749 return dev_change_name(dev, ifr->ifr_newname);
4750
4751
4752
4753
4754 default:
4755 if ((cmd >= SIOCDEVPRIVATE &&
4756 cmd <= SIOCDEVPRIVATE + 15) ||
4757 cmd == SIOCBONDENSLAVE ||
4758 cmd == SIOCBONDRELEASE ||
4759 cmd == SIOCBONDSETHWADDR ||
4760 cmd == SIOCBONDSLAVEINFOQUERY ||
4761 cmd == SIOCBONDINFOQUERY ||
4762 cmd == SIOCBONDCHANGEACTIVE ||
4763 cmd == SIOCGMIIPHY ||
4764 cmd == SIOCGMIIREG ||
4765 cmd == SIOCSMIIREG ||
4766 cmd == SIOCBRADDIF ||
4767 cmd == SIOCBRDELIF ||
4768 cmd == SIOCSHWTSTAMP ||
4769 cmd == SIOCWANDEV) {
4770 err = -EOPNOTSUPP;
4771 if (ops->ndo_do_ioctl) {
4772 if (netif_device_present(dev))
4773 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4774 else
4775 err = -ENODEV;
4776 }
4777 } else
4778 err = -EINVAL;
4779
4780 }
4781 return err;
4782}
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4802{
4803 struct ifreq ifr;
4804 int ret;
4805 char *colon;
4806
4807
4808
4809
4810
4811
4812 if (cmd == SIOCGIFCONF) {
4813 rtnl_lock();
4814 ret = dev_ifconf(net, (char __user *) arg);
4815 rtnl_unlock();
4816 return ret;
4817 }
4818 if (cmd == SIOCGIFNAME)
4819 return dev_ifname(net, (struct ifreq __user *)arg);
4820
4821 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4822 return -EFAULT;
4823
4824 ifr.ifr_name[IFNAMSIZ-1] = 0;
4825
4826 colon = strchr(ifr.ifr_name, ':');
4827 if (colon)
4828 *colon = 0;
4829
4830
4831
4832
4833
4834 switch (cmd) {
4835
4836
4837
4838
4839
4840
4841 case SIOCGIFFLAGS:
4842 case SIOCGIFMETRIC:
4843 case SIOCGIFMTU:
4844 case SIOCGIFHWADDR:
4845 case SIOCGIFSLAVE:
4846 case SIOCGIFMAP:
4847 case SIOCGIFINDEX:
4848 case SIOCGIFTXQLEN:
4849 dev_load(net, ifr.ifr_name);
4850 rcu_read_lock();
4851 ret = dev_ifsioc_locked(net, &ifr, cmd);
4852 rcu_read_unlock();
4853 if (!ret) {
4854 if (colon)
4855 *colon = ':';
4856 if (copy_to_user(arg, &ifr,
4857 sizeof(struct ifreq)))
4858 ret = -EFAULT;
4859 }
4860 return ret;
4861
4862 case SIOCETHTOOL:
4863 dev_load(net, ifr.ifr_name);
4864 rtnl_lock();
4865 ret = dev_ethtool(net, &ifr);
4866 rtnl_unlock();
4867 if (!ret) {
4868 if (colon)
4869 *colon = ':';
4870 if (copy_to_user(arg, &ifr,
4871 sizeof(struct ifreq)))
4872 ret = -EFAULT;
4873 }
4874 return ret;
4875
4876
4877
4878
4879
4880
4881
4882 case SIOCGMIIPHY:
4883 case SIOCGMIIREG:
4884 case SIOCSIFNAME:
4885 if (!capable(CAP_NET_ADMIN))
4886 return -EPERM;
4887 dev_load(net, ifr.ifr_name);
4888 rtnl_lock();
4889 ret = dev_ifsioc(net, &ifr, cmd);
4890 rtnl_unlock();
4891 if (!ret) {
4892 if (colon)
4893 *colon = ':';
4894 if (copy_to_user(arg, &ifr,
4895 sizeof(struct ifreq)))
4896 ret = -EFAULT;
4897 }
4898 return ret;
4899
4900
4901
4902
4903
4904
4905
4906 case SIOCSIFFLAGS:
4907 case SIOCSIFMETRIC:
4908 case SIOCSIFMTU:
4909 case SIOCSIFMAP:
4910 case SIOCSIFHWADDR:
4911 case SIOCSIFSLAVE:
4912 case SIOCADDMULTI:
4913 case SIOCDELMULTI:
4914 case SIOCSIFHWBROADCAST:
4915 case SIOCSIFTXQLEN:
4916 case SIOCSMIIREG:
4917 case SIOCBONDENSLAVE:
4918 case SIOCBONDRELEASE:
4919 case SIOCBONDSETHWADDR:
4920 case SIOCBONDCHANGEACTIVE:
4921 case SIOCBRADDIF:
4922 case SIOCBRDELIF:
4923 case SIOCSHWTSTAMP:
4924 if (!capable(CAP_NET_ADMIN))
4925 return -EPERM;
4926
4927 case SIOCBONDSLAVEINFOQUERY:
4928 case SIOCBONDINFOQUERY:
4929 dev_load(net, ifr.ifr_name);
4930 rtnl_lock();
4931 ret = dev_ifsioc(net, &ifr, cmd);
4932 rtnl_unlock();
4933 return ret;
4934
4935 case SIOCGIFMEM:
4936
4937
4938 case SIOCSIFMEM:
4939
4940
4941 case SIOCSIFLINK:
4942 return -EINVAL;
4943
4944
4945
4946
4947 default:
4948 if (cmd == SIOCWANDEV ||
4949 (cmd >= SIOCDEVPRIVATE &&
4950 cmd <= SIOCDEVPRIVATE + 15)) {
4951 dev_load(net, ifr.ifr_name);
4952 rtnl_lock();
4953 ret = dev_ifsioc(net, &ifr, cmd);
4954 rtnl_unlock();
4955 if (!ret && copy_to_user(arg, &ifr,
4956 sizeof(struct ifreq)))
4957 ret = -EFAULT;
4958 return ret;
4959 }
4960
4961 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4962 return wext_handle_ioctl(net, &ifr, cmd, arg);
4963 return -EINVAL;
4964 }
4965}
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976static int dev_new_index(struct net *net)
4977{
4978 static int ifindex;
4979 for (;;) {
4980 if (++ifindex <= 0)
4981 ifindex = 1;
4982 if (!__dev_get_by_index(net, ifindex))
4983 return ifindex;
4984 }
4985}
4986
4987
4988static LIST_HEAD(net_todo_list);
4989
4990static void net_set_todo(struct net_device *dev)
4991{
4992 list_add_tail(&dev->todo_list, &net_todo_list);
4993}
4994
4995static void rollback_registered_many(struct list_head *head)
4996{
4997 struct net_device *dev, *tmp;
4998
4999 BUG_ON(dev_boot_phase);
5000 ASSERT_RTNL();
5001
5002 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5003
5004
5005
5006
5007 if (dev->reg_state == NETREG_UNINITIALIZED) {
5008 pr_debug("unregister_netdevice: device %s/%p never "
5009 "was registered\n", dev->name, dev);
5010
5011 WARN_ON(1);
5012 list_del(&dev->unreg_list);
5013 continue;
5014 }
5015
5016 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5017 }
5018
5019
5020 dev_close_many(head);
5021
5022 list_for_each_entry(dev, head, unreg_list) {
5023
5024 unlist_netdevice(dev);
5025
5026 dev->reg_state = NETREG_UNREGISTERING;
5027 }
5028
5029 synchronize_net();
5030
5031 list_for_each_entry(dev, head, unreg_list) {
5032
5033 dev_shutdown(dev);
5034
5035
5036
5037
5038
5039 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5040
5041 if (!dev->rtnl_link_ops ||
5042 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5043 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5044
5045
5046
5047
5048 dev_uc_flush(dev);
5049 dev_mc_flush(dev);
5050
5051 if (dev->netdev_ops->ndo_uninit)
5052 dev->netdev_ops->ndo_uninit(dev);
5053
5054
5055 WARN_ON(dev->master);
5056
5057
5058 netdev_unregister_kobject(dev);
5059 }
5060
5061
5062 dev = list_first_entry(head, struct net_device, unreg_list);
5063 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5064
5065 rcu_barrier();
5066
5067 list_for_each_entry(dev, head, unreg_list)
5068 dev_put(dev);
5069}
5070
5071static void rollback_registered(struct net_device *dev)
5072{
5073 LIST_HEAD(single);
5074
5075 list_add(&dev->unreg_list, &single);
5076 rollback_registered_many(&single);
5077 list_del(&single);
5078}
5079
5080unsigned long netdev_fix_features(unsigned long features, const char *name)
5081{
5082
5083 if ((features & NETIF_F_SG) &&
5084 !(features & NETIF_F_ALL_CSUM)) {
5085 if (name)
5086 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
5087 "checksum feature.\n", name);
5088 features &= ~NETIF_F_SG;
5089 }
5090
5091
5092 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
5093 if (name)
5094 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
5095 "SG feature.\n", name);
5096 features &= ~NETIF_F_TSO;
5097 }
5098
5099 if (features & NETIF_F_UFO) {
5100
5101 if (!((features & NETIF_F_GEN_CSUM) ||
5102 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5103 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5104 if (name)
5105 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
5106 "since no checksum offload features.\n",
5107 name);
5108 features &= ~NETIF_F_UFO;
5109 }
5110
5111 if (!(features & NETIF_F_SG)) {
5112 if (name)
5113 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
5114 "since no NETIF_F_SG feature.\n", name);
5115 features &= ~NETIF_F_UFO;
5116 }
5117 }
5118
5119 return features;
5120}
5121EXPORT_SYMBOL(netdev_fix_features);
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5133 struct net_device *dev)
5134{
5135 if (rootdev->operstate == IF_OPER_DORMANT)
5136 netif_dormant_on(dev);
5137 else
5138 netif_dormant_off(dev);
5139
5140 if (netif_carrier_ok(rootdev)) {
5141 if (!netif_carrier_ok(dev))
5142 netif_carrier_on(dev);
5143 } else {
5144 if (netif_carrier_ok(dev))
5145 netif_carrier_off(dev);
5146 }
5147}
5148EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5149
5150#ifdef CONFIG_RPS
5151static int netif_alloc_rx_queues(struct net_device *dev)
5152{
5153 unsigned int i, count = dev->num_rx_queues;
5154 struct netdev_rx_queue *rx;
5155
5156 BUG_ON(count < 1);
5157
5158 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5159 if (!rx) {
5160 pr_err("netdev: Unable to allocate %u rx queues.\n", count);
5161 return -ENOMEM;
5162 }
5163 dev->_rx = rx;
5164
5165 for (i = 0; i < count; i++)
5166 rx[i].dev = dev;
5167 return 0;
5168}
5169#endif
5170
5171static void netdev_init_one_queue(struct net_device *dev,
5172 struct netdev_queue *queue, void *_unused)
5173{
5174
5175 spin_lock_init(&queue->_xmit_lock);
5176 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5177 queue->xmit_lock_owner = -1;
5178 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5179 queue->dev = dev;
5180}
5181
5182static int netif_alloc_netdev_queues(struct net_device *dev)
5183{
5184 unsigned int count = dev->num_tx_queues;
5185 struct netdev_queue *tx;
5186
5187 BUG_ON(count < 1);
5188
5189 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5190 if (!tx) {
5191 pr_err("netdev: Unable to allocate %u tx queues.\n",
5192 count);
5193 return -ENOMEM;
5194 }
5195 dev->_tx = tx;
5196
5197 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5198 spin_lock_init(&dev->tx_global_lock);
5199
5200 return 0;
5201}
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220int register_netdevice(struct net_device *dev)
5221{
5222 int ret;
5223 struct net *net = dev_net(dev);
5224
5225 BUG_ON(dev_boot_phase);
5226 ASSERT_RTNL();
5227
5228 might_sleep();
5229
5230
5231 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5232 BUG_ON(!net);
5233
5234 spin_lock_init(&dev->addr_list_lock);
5235 netdev_set_addr_lockdep_class(dev);
5236
5237 dev->iflink = -1;
5238
5239
5240 if (dev->netdev_ops->ndo_init) {
5241 ret = dev->netdev_ops->ndo_init(dev);
5242 if (ret) {
5243 if (ret > 0)
5244 ret = -EIO;
5245 goto out;
5246 }
5247 }
5248
5249 ret = dev_get_valid_name(dev, dev->name, 0);
5250 if (ret)
5251 goto err_uninit;
5252
5253 dev->ifindex = dev_new_index(net);
5254 if (dev->iflink == -1)
5255 dev->iflink = dev->ifindex;
5256
5257
5258 if ((dev->features & NETIF_F_HW_CSUM) &&
5259 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5260 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5261 dev->name);
5262 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5263 }
5264
5265 if ((dev->features & NETIF_F_NO_CSUM) &&
5266 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5267 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5268 dev->name);
5269 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5270 }
5271
5272 dev->features = netdev_fix_features(dev->features, dev->name);
5273
5274
5275 if (dev->features & NETIF_F_SG)
5276 dev->features |= NETIF_F_GSO;
5277
5278
5279
5280
5281
5282 dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
5283
5284 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5285 ret = notifier_to_errno(ret);
5286 if (ret)
5287 goto err_uninit;
5288
5289 ret = netdev_register_kobject(dev);
5290 if (ret)
5291 goto err_uninit;
5292 dev->reg_state = NETREG_REGISTERED;
5293
5294
5295
5296
5297
5298
5299 set_bit(__LINK_STATE_PRESENT, &dev->state);
5300
5301 dev_init_scheduler(dev);
5302 dev_hold(dev);
5303 list_netdevice(dev);
5304
5305
5306 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5307 ret = notifier_to_errno(ret);
5308 if (ret) {
5309 rollback_registered(dev);
5310 dev->reg_state = NETREG_UNREGISTERED;
5311 }
5312
5313
5314
5315
5316 if (!dev->rtnl_link_ops ||
5317 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5318 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5319
5320out:
5321 return ret;
5322
5323err_uninit:
5324 if (dev->netdev_ops->ndo_uninit)
5325 dev->netdev_ops->ndo_uninit(dev);
5326 goto out;
5327}
5328EXPORT_SYMBOL(register_netdevice);
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340int init_dummy_netdev(struct net_device *dev)
5341{
5342
5343
5344
5345
5346
5347 memset(dev, 0, sizeof(struct net_device));
5348
5349
5350
5351
5352 dev->reg_state = NETREG_DUMMY;
5353
5354
5355 INIT_LIST_HEAD(&dev->napi_list);
5356
5357
5358 set_bit(__LINK_STATE_PRESENT, &dev->state);
5359 set_bit(__LINK_STATE_START, &dev->state);
5360
5361
5362
5363
5364
5365
5366 return 0;
5367}
5368EXPORT_SYMBOL_GPL(init_dummy_netdev);
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384int register_netdev(struct net_device *dev)
5385{
5386 int err;
5387
5388 rtnl_lock();
5389
5390
5391
5392
5393
5394 if (strchr(dev->name, '%')) {
5395 err = dev_alloc_name(dev, dev->name);
5396 if (err < 0)
5397 goto out;
5398 }
5399
5400 err = register_netdevice(dev);
5401out:
5402 rtnl_unlock();
5403 return err;
5404}
5405EXPORT_SYMBOL(register_netdev);
5406
5407int netdev_refcnt_read(const struct net_device *dev)
5408{
5409 int i, refcnt = 0;
5410
5411 for_each_possible_cpu(i)
5412 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5413 return refcnt;
5414}
5415EXPORT_SYMBOL(netdev_refcnt_read);
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428static void netdev_wait_allrefs(struct net_device *dev)
5429{
5430 unsigned long rebroadcast_time, warning_time;
5431 int refcnt;
5432
5433 linkwatch_forget_dev(dev);
5434
5435 rebroadcast_time = warning_time = jiffies;
5436 refcnt = netdev_refcnt_read(dev);
5437
5438 while (refcnt != 0) {
5439 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5440 rtnl_lock();
5441
5442
5443 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5444
5445
5446
5447 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5448 &dev->state)) {
5449
5450
5451
5452
5453
5454
5455 linkwatch_run_queue();
5456 }
5457
5458 __rtnl_unlock();
5459
5460 rebroadcast_time = jiffies;
5461 }
5462
5463 msleep(250);
5464
5465 refcnt = netdev_refcnt_read(dev);
5466
5467 if (time_after(jiffies, warning_time + 10 * HZ)) {
5468 printk(KERN_EMERG "unregister_netdevice: "
5469 "waiting for %s to become free. Usage "
5470 "count = %d\n",
5471 dev->name, refcnt);
5472 warning_time = jiffies;
5473 }
5474 }
5475}
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501void netdev_run_todo(void)
5502{
5503 struct list_head list;
5504
5505
5506 list_replace_init(&net_todo_list, &list);
5507
5508 __rtnl_unlock();
5509
5510 while (!list_empty(&list)) {
5511 struct net_device *dev
5512 = list_first_entry(&list, struct net_device, todo_list);
5513 list_del(&dev->todo_list);
5514
5515 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5516 printk(KERN_ERR "network todo '%s' but state %d\n",
5517 dev->name, dev->reg_state);
5518 dump_stack();
5519 continue;
5520 }
5521
5522 dev->reg_state = NETREG_UNREGISTERED;
5523
5524 on_each_cpu(flush_backlog, dev, 1);
5525
5526 netdev_wait_allrefs(dev);
5527
5528
5529 BUG_ON(netdev_refcnt_read(dev));
5530 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
5531 WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
5532 WARN_ON(dev->dn_ptr);
5533
5534 if (dev->destructor)
5535 dev->destructor(dev);
5536
5537
5538 kobject_put(&dev->dev.kobj);
5539 }
5540}
5541
5542
5543
5544
5545static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5546 const struct net_device_stats *netdev_stats)
5547{
5548#if BITS_PER_LONG == 64
5549 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5550 memcpy(stats64, netdev_stats, sizeof(*stats64));
5551#else
5552 size_t i, n = sizeof(*stats64) / sizeof(u64);
5553 const unsigned long *src = (const unsigned long *)netdev_stats;
5554 u64 *dst = (u64 *)stats64;
5555
5556 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5557 sizeof(*stats64) / sizeof(u64));
5558 for (i = 0; i < n; i++)
5559 dst[i] = src[i];
5560#endif
5561}
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5574 struct rtnl_link_stats64 *storage)
5575{
5576 const struct net_device_ops *ops = dev->netdev_ops;
5577
5578 if (ops->ndo_get_stats64) {
5579 memset(storage, 0, sizeof(*storage));
5580 ops->ndo_get_stats64(dev, storage);
5581 } else if (ops->ndo_get_stats) {
5582 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5583 } else {
5584 netdev_stats_to_stats64(storage, &dev->stats);
5585 }
5586 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5587 return storage;
5588}
5589EXPORT_SYMBOL(dev_get_stats);
5590
5591struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5592{
5593 struct netdev_queue *queue = dev_ingress_queue(dev);
5594
5595#ifdef CONFIG_NET_CLS_ACT
5596 if (queue)
5597 return queue;
5598 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5599 if (!queue)
5600 return NULL;
5601 netdev_init_one_queue(dev, queue, NULL);
5602 queue->qdisc = &noop_qdisc;
5603 queue->qdisc_sleeping = &noop_qdisc;
5604 rcu_assign_pointer(dev->ingress_queue, queue);
5605#endif
5606 return queue;
5607}
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5622 void (*setup)(struct net_device *),
5623 unsigned int txqs, unsigned int rxqs)
5624{
5625 struct net_device *dev;
5626 size_t alloc_size;
5627 struct net_device *p;
5628
5629 BUG_ON(strlen(name) >= sizeof(dev->name));
5630
5631 if (txqs < 1) {
5632 pr_err("alloc_netdev: Unable to allocate device "
5633 "with zero queues.\n");
5634 return NULL;
5635 }
5636
5637#ifdef CONFIG_RPS
5638 if (rxqs < 1) {
5639 pr_err("alloc_netdev: Unable to allocate device "
5640 "with zero RX queues.\n");
5641 return NULL;
5642 }
5643#endif
5644
5645 alloc_size = sizeof(struct net_device);
5646 if (sizeof_priv) {
5647
5648 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5649 alloc_size += sizeof_priv;
5650 }
5651
5652 alloc_size += NETDEV_ALIGN - 1;
5653
5654 p = kzalloc(alloc_size, GFP_KERNEL);
5655 if (!p) {
5656 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5657 return NULL;
5658 }
5659
5660 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5661 dev->padded = (char *)dev - (char *)p;
5662
5663 dev->pcpu_refcnt = alloc_percpu(int);
5664 if (!dev->pcpu_refcnt)
5665 goto free_p;
5666
5667 if (dev_addr_init(dev))
5668 goto free_pcpu;
5669
5670 dev_mc_init(dev);
5671 dev_uc_init(dev);
5672
5673 dev_net_set(dev, &init_net);
5674
5675 dev->gso_max_size = GSO_MAX_SIZE;
5676
5677 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5678 dev->ethtool_ntuple_list.count = 0;
5679 INIT_LIST_HEAD(&dev->napi_list);
5680 INIT_LIST_HEAD(&dev->unreg_list);
5681 INIT_LIST_HEAD(&dev->link_watch_list);
5682 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5683 setup(dev);
5684
5685 dev->num_tx_queues = txqs;
5686 dev->real_num_tx_queues = txqs;
5687 if (netif_alloc_netdev_queues(dev))
5688 goto free_all;
5689
5690#ifdef CONFIG_RPS
5691 dev->num_rx_queues = rxqs;
5692 dev->real_num_rx_queues = rxqs;
5693 if (netif_alloc_rx_queues(dev))
5694 goto free_all;
5695#endif
5696
5697 strcpy(dev->name, name);
5698 return dev;
5699
5700free_all:
5701 free_netdev(dev);
5702 return NULL;
5703
5704free_pcpu:
5705 free_percpu(dev->pcpu_refcnt);
5706 kfree(dev->_tx);
5707#ifdef CONFIG_RPS
5708 kfree(dev->_rx);
5709#endif
5710
5711free_p:
5712 kfree(p);
5713 return NULL;
5714}
5715EXPORT_SYMBOL(alloc_netdev_mqs);
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725void free_netdev(struct net_device *dev)
5726{
5727 struct napi_struct *p, *n;
5728
5729 release_net(dev_net(dev));
5730
5731 kfree(dev->_tx);
5732#ifdef CONFIG_RPS
5733 kfree(dev->_rx);
5734#endif
5735
5736 kfree(rcu_dereference_raw(dev->ingress_queue));
5737
5738
5739 dev_addr_flush(dev);
5740
5741
5742 ethtool_ntuple_flush(dev);
5743
5744 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5745 netif_napi_del(p);
5746
5747 free_percpu(dev->pcpu_refcnt);
5748 dev->pcpu_refcnt = NULL;
5749
5750
5751 if (dev->reg_state == NETREG_UNINITIALIZED) {
5752 kfree((char *)dev - dev->padded);
5753 return;
5754 }
5755
5756 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5757 dev->reg_state = NETREG_RELEASED;
5758
5759
5760 put_device(&dev->dev);
5761}
5762EXPORT_SYMBOL(free_netdev);
5763
5764
5765
5766
5767
5768
5769
5770void synchronize_net(void)
5771{
5772 might_sleep();
5773 synchronize_rcu();
5774}
5775EXPORT_SYMBOL(synchronize_net);
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5791{
5792 ASSERT_RTNL();
5793
5794 if (head) {
5795 list_move_tail(&dev->unreg_list, head);
5796 } else {
5797 rollback_registered(dev);
5798
5799 net_set_todo(dev);
5800 }
5801}
5802EXPORT_SYMBOL(unregister_netdevice_queue);
5803
5804
5805
5806
5807
5808void unregister_netdevice_many(struct list_head *head)
5809{
5810 struct net_device *dev;
5811
5812 if (!list_empty(head)) {
5813 rollback_registered_many(head);
5814 list_for_each_entry(dev, head, unreg_list)
5815 net_set_todo(dev);
5816 }
5817}
5818EXPORT_SYMBOL(unregister_netdevice_many);
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831void unregister_netdev(struct net_device *dev)
5832{
5833 rtnl_lock();
5834 unregister_netdevice(dev);
5835 rtnl_unlock();
5836}
5837EXPORT_SYMBOL(unregister_netdev);
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5854{
5855 int err;
5856
5857 ASSERT_RTNL();
5858
5859
5860 err = -EINVAL;
5861 if (dev->features & NETIF_F_NETNS_LOCAL)
5862 goto out;
5863
5864
5865 err = -EINVAL;
5866 if (dev->reg_state != NETREG_REGISTERED)
5867 goto out;
5868
5869
5870 err = 0;
5871 if (net_eq(dev_net(dev), net))
5872 goto out;
5873
5874
5875
5876
5877 err = -EEXIST;
5878 if (__dev_get_by_name(net, dev->name)) {
5879
5880 if (!pat)
5881 goto out;
5882 if (dev_get_valid_name(dev, pat, 1))
5883 goto out;
5884 }
5885
5886
5887
5888
5889
5890
5891 dev_close(dev);
5892
5893
5894 err = -ENODEV;
5895 unlist_netdevice(dev);
5896
5897 synchronize_net();
5898
5899
5900 dev_shutdown(dev);
5901
5902
5903
5904
5905
5906
5907
5908
5909 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5910 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5911
5912
5913
5914
5915 dev_uc_flush(dev);
5916 dev_mc_flush(dev);
5917
5918
5919 dev_net_set(dev, net);
5920
5921
5922 if (__dev_get_by_index(net, dev->ifindex)) {
5923 int iflink = (dev->iflink == dev->ifindex);
5924 dev->ifindex = dev_new_index(net);
5925 if (iflink)
5926 dev->iflink = dev->ifindex;
5927 }
5928
5929
5930 err = device_rename(&dev->dev, dev->name);
5931 WARN_ON(err);
5932
5933
5934 list_netdevice(dev);
5935
5936
5937 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5938
5939
5940
5941
5942
5943 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5944
5945 synchronize_net();
5946 err = 0;
5947out:
5948 return err;
5949}
5950EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5951
5952static int dev_cpu_callback(struct notifier_block *nfb,
5953 unsigned long action,
5954 void *ocpu)
5955{
5956 struct sk_buff **list_skb;
5957 struct sk_buff *skb;
5958 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5959 struct softnet_data *sd, *oldsd;
5960
5961 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5962 return NOTIFY_OK;
5963
5964 local_irq_disable();
5965 cpu = smp_processor_id();
5966 sd = &per_cpu(softnet_data, cpu);
5967 oldsd = &per_cpu(softnet_data, oldcpu);
5968
5969
5970 list_skb = &sd->completion_queue;
5971 while (*list_skb)
5972 list_skb = &(*list_skb)->next;
5973
5974 *list_skb = oldsd->completion_queue;
5975 oldsd->completion_queue = NULL;
5976
5977
5978 if (oldsd->output_queue) {
5979 *sd->output_queue_tailp = oldsd->output_queue;
5980 sd->output_queue_tailp = oldsd->output_queue_tailp;
5981 oldsd->output_queue = NULL;
5982 oldsd->output_queue_tailp = &oldsd->output_queue;
5983 }
5984
5985 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5986 local_irq_enable();
5987
5988
5989 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
5990 netif_rx(skb);
5991 input_queue_head_incr(oldsd);
5992 }
5993 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
5994 netif_rx(skb);
5995 input_queue_head_incr(oldsd);
5996 }
5997
5998 return NOTIFY_OK;
5999}
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012unsigned long netdev_increment_features(unsigned long all, unsigned long one,
6013 unsigned long mask)
6014{
6015
6016 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
6017 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
6018 else if (mask & NETIF_F_ALL_CSUM) {
6019
6020 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
6021 !(all & NETIF_F_GEN_CSUM)) {
6022 all &= ~NETIF_F_ALL_CSUM;
6023 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
6024 }
6025
6026
6027 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
6028 all &= ~NETIF_F_ALL_CSUM;
6029 all |= NETIF_F_HW_CSUM;
6030 }
6031 }
6032
6033 one |= NETIF_F_ALL_CSUM;
6034
6035 one |= all & NETIF_F_ONE_FOR_ALL;
6036 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
6037 all |= one & mask & NETIF_F_ONE_FOR_ALL;
6038
6039 return all;
6040}
6041EXPORT_SYMBOL(netdev_increment_features);
6042
6043static struct hlist_head *netdev_create_hash(void)
6044{
6045 int i;
6046 struct hlist_head *hash;
6047
6048 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6049 if (hash != NULL)
6050 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6051 INIT_HLIST_HEAD(&hash[i]);
6052
6053 return hash;
6054}
6055
6056
6057static int __net_init netdev_init(struct net *net)
6058{
6059 INIT_LIST_HEAD(&net->dev_base_head);
6060
6061 net->dev_name_head = netdev_create_hash();
6062 if (net->dev_name_head == NULL)
6063 goto err_name;
6064
6065 net->dev_index_head = netdev_create_hash();
6066 if (net->dev_index_head == NULL)
6067 goto err_idx;
6068
6069 return 0;
6070
6071err_idx:
6072 kfree(net->dev_name_head);
6073err_name:
6074 return -ENOMEM;
6075}
6076
6077
6078
6079
6080
6081
6082
6083
6084
6085char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6086{
6087 const struct device_driver *driver;
6088 const struct device *parent;
6089
6090 if (len <= 0 || !buffer)
6091 return buffer;
6092 buffer[0] = 0;
6093
6094 parent = dev->dev.parent;
6095
6096 if (!parent)
6097 return buffer;
6098
6099 driver = parent->driver;
6100 if (driver && driver->name)
6101 strlcpy(buffer, driver->name, len);
6102 return buffer;
6103}
6104
6105static int __netdev_printk(const char *level, const struct net_device *dev,
6106 struct va_format *vaf)
6107{
6108 int r;
6109
6110 if (dev && dev->dev.parent)
6111 r = dev_printk(level, dev->dev.parent, "%s: %pV",
6112 netdev_name(dev), vaf);
6113 else if (dev)
6114 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6115 else
6116 r = printk("%s(NULL net_device): %pV", level, vaf);
6117
6118 return r;
6119}
6120
6121int netdev_printk(const char *level, const struct net_device *dev,
6122 const char *format, ...)
6123{
6124 struct va_format vaf;
6125 va_list args;
6126 int r;
6127
6128 va_start(args, format);
6129
6130 vaf.fmt = format;
6131 vaf.va = &args;
6132
6133 r = __netdev_printk(level, dev, &vaf);
6134 va_end(args);
6135
6136 return r;
6137}
6138EXPORT_SYMBOL(netdev_printk);
6139
6140#define define_netdev_printk_level(func, level) \
6141int func(const struct net_device *dev, const char *fmt, ...) \
6142{ \
6143 int r; \
6144 struct va_format vaf; \
6145 va_list args; \
6146 \
6147 va_start(args, fmt); \
6148 \
6149 vaf.fmt = fmt; \
6150 vaf.va = &args; \
6151 \
6152 r = __netdev_printk(level, dev, &vaf); \
6153 va_end(args); \
6154 \
6155 return r; \
6156} \
6157EXPORT_SYMBOL(func);
6158
6159define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6160define_netdev_printk_level(netdev_alert, KERN_ALERT);
6161define_netdev_printk_level(netdev_crit, KERN_CRIT);
6162define_netdev_printk_level(netdev_err, KERN_ERR);
6163define_netdev_printk_level(netdev_warn, KERN_WARNING);
6164define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6165define_netdev_printk_level(netdev_info, KERN_INFO);
6166
6167static void __net_exit netdev_exit(struct net *net)
6168{
6169 kfree(net->dev_name_head);
6170 kfree(net->dev_index_head);
6171}
6172
6173static struct pernet_operations __net_initdata netdev_net_ops = {
6174 .init = netdev_init,
6175 .exit = netdev_exit,
6176};
6177
6178static void __net_exit default_device_exit(struct net *net)
6179{
6180 struct net_device *dev, *aux;
6181
6182
6183
6184
6185 rtnl_lock();
6186 for_each_netdev_safe(net, dev, aux) {
6187 int err;
6188 char fb_name[IFNAMSIZ];
6189
6190
6191 if (dev->features & NETIF_F_NETNS_LOCAL)
6192 continue;
6193
6194
6195 if (dev->rtnl_link_ops)
6196 continue;
6197
6198
6199 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6200 err = dev_change_net_namespace(dev, &init_net, fb_name);
6201 if (err) {
6202 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
6203 __func__, dev->name, err);
6204 BUG();
6205 }
6206 }
6207 rtnl_unlock();
6208}
6209
6210static void __net_exit default_device_exit_batch(struct list_head *net_list)
6211{
6212
6213
6214
6215
6216
6217 struct net_device *dev;
6218 struct net *net;
6219 LIST_HEAD(dev_kill_list);
6220
6221 rtnl_lock();
6222 list_for_each_entry(net, net_list, exit_list) {
6223 for_each_netdev_reverse(net, dev) {
6224 if (dev->rtnl_link_ops)
6225 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6226 else
6227 unregister_netdevice_queue(dev, &dev_kill_list);
6228 }
6229 }
6230 unregister_netdevice_many(&dev_kill_list);
6231 list_del(&dev_kill_list);
6232 rtnl_unlock();
6233}
6234
6235static struct pernet_operations __net_initdata default_device_ops = {
6236 .exit = default_device_exit,
6237 .exit_batch = default_device_exit_batch,
6238};
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251static int __init net_dev_init(void)
6252{
6253 int i, rc = -ENOMEM;
6254
6255 BUG_ON(!dev_boot_phase);
6256
6257 if (dev_proc_init())
6258 goto out;
6259
6260 if (netdev_kobject_init())
6261 goto out;
6262
6263 INIT_LIST_HEAD(&ptype_all);
6264 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6265 INIT_LIST_HEAD(&ptype_base[i]);
6266
6267 if (register_pernet_subsys(&netdev_net_ops))
6268 goto out;
6269
6270
6271
6272
6273
6274 for_each_possible_cpu(i) {
6275 struct softnet_data *sd = &per_cpu(softnet_data, i);
6276
6277 memset(sd, 0, sizeof(*sd));
6278 skb_queue_head_init(&sd->input_pkt_queue);
6279 skb_queue_head_init(&sd->process_queue);
6280 sd->completion_queue = NULL;
6281 INIT_LIST_HEAD(&sd->poll_list);
6282 sd->output_queue = NULL;
6283 sd->output_queue_tailp = &sd->output_queue;
6284#ifdef CONFIG_RPS
6285 sd->csd.func = rps_trigger_softirq;
6286 sd->csd.info = sd;
6287 sd->csd.flags = 0;
6288 sd->cpu = i;
6289#endif
6290
6291 sd->backlog.poll = process_backlog;
6292 sd->backlog.weight = weight_p;
6293 sd->backlog.gro_list = NULL;
6294 sd->backlog.gro_count = 0;
6295 }
6296
6297 dev_boot_phase = 0;
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307
6308 if (register_pernet_device(&loopback_net_ops))
6309 goto out;
6310
6311 if (register_pernet_device(&default_device_ops))
6312 goto out;
6313
6314 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6315 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6316
6317 hotcpu_notifier(dev_cpu_callback, 0);
6318 dst_init();
6319 dev_mcast_init();
6320 rc = 0;
6321out:
6322 return rc;
6323}
6324
6325subsys_initcall(net_dev_init);
6326
6327static int __init initialize_hashrnd(void)
6328{
6329 get_random_bytes(&hashrnd, sizeof(hashrnd));
6330 return 0;
6331}
6332
6333late_initcall_sync(initialize_hashrnd);
6334
6335