1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#include <linux/uaccess.h>
76#include <linux/bitops.h>
77#include <linux/capability.h>
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
81#include <linux/hash.h>
82#include <linux/slab.h>
83#include <linux/sched.h>
84#include <linux/sched/mm.h>
85#include <linux/mutex.h>
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
95#include <linux/ethtool.h>
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
98#include <linux/bpf.h>
99#include <linux/bpf_trace.h>
100#include <net/net_namespace.h>
101#include <net/sock.h>
102#include <net/busy_poll.h>
103#include <linux/rtnetlink.h>
104#include <linux/stat.h>
105#include <net/dst.h>
106#include <net/dst_metadata.h>
107#include <net/pkt_sched.h>
108#include <net/checksum.h>
109#include <net/xfrm.h>
110#include <linux/highmem.h>
111#include <linux/init.h>
112#include <linux/module.h>
113#include <linux/netpoll.h>
114#include <linux/rcupdate.h>
115#include <linux/delay.h>
116#include <net/iw_handler.h>
117#include <asm/current.h>
118#include <linux/audit.h>
119#include <linux/dmaengine.h>
120#include <linux/err.h>
121#include <linux/ctype.h>
122#include <linux/if_arp.h>
123#include <linux/if_vlan.h>
124#include <linux/ip.h>
125#include <net/ip.h>
126#include <net/mpls.h>
127#include <linux/ipv6.h>
128#include <linux/in.h>
129#include <linux/jhash.h>
130#include <linux/random.h>
131#include <trace/events/napi.h>
132#include <trace/events/net.h>
133#include <trace/events/skb.h>
134#include <linux/pci.h>
135#include <linux/inetdevice.h>
136#include <linux/cpu_rmap.h>
137#include <linux/static_key.h>
138#include <linux/hashtable.h>
139#include <linux/vmalloc.h>
140#include <linux/if_macvlan.h>
141#include <linux/errqueue.h>
142#include <linux/hrtimer.h>
143#include <linux/netfilter_ingress.h>
144#include <linux/crash_dump.h>
145
146#include "net-sysfs.h"
147
148
149#define MAX_GRO_SKBS 8
150
151
152#define GRO_MAX_HEAD (MAX_HEADER + 128)
153
154static DEFINE_SPINLOCK(ptype_lock);
155static DEFINE_SPINLOCK(offload_lock);
156struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
157struct list_head ptype_all __read_mostly;
158static struct list_head offload_base __read_mostly;
159
160static int netif_rx_internal(struct sk_buff *skb);
161static int call_netdevice_notifiers_info(unsigned long val,
162 struct net_device *dev,
163 struct netdev_notifier_info *info);
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184DEFINE_RWLOCK(dev_base_lock);
185EXPORT_SYMBOL(dev_base_lock);
186
187
188static DEFINE_SPINLOCK(napi_hash_lock);
189
190static unsigned int napi_gen_id = NR_CPUS;
191static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
192
193static seqcount_t devnet_rename_seq;
194
195static inline void dev_base_seq_inc(struct net *net)
196{
197 while (++net->dev_base_seq == 0)
198 ;
199}
200
201static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
202{
203 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
204
205 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
206}
207
208static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
209{
210 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
211}
212
213static inline void rps_lock(struct softnet_data *sd)
214{
215#ifdef CONFIG_RPS
216 spin_lock(&sd->input_pkt_queue.lock);
217#endif
218}
219
220static inline void rps_unlock(struct softnet_data *sd)
221{
222#ifdef CONFIG_RPS
223 spin_unlock(&sd->input_pkt_queue.lock);
224#endif
225}
226
227
228static void list_netdevice(struct net_device *dev)
229{
230 struct net *net = dev_net(dev);
231
232 ASSERT_RTNL();
233
234 write_lock_bh(&dev_base_lock);
235 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
236 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
237 hlist_add_head_rcu(&dev->index_hlist,
238 dev_index_hash(net, dev->ifindex));
239 write_unlock_bh(&dev_base_lock);
240
241 dev_base_seq_inc(net);
242}
243
244
245
246
247static void unlist_netdevice(struct net_device *dev)
248{
249 ASSERT_RTNL();
250
251
252 write_lock_bh(&dev_base_lock);
253 list_del_rcu(&dev->dev_list);
254 hlist_del_rcu(&dev->name_hlist);
255 hlist_del_rcu(&dev->index_hlist);
256 write_unlock_bh(&dev_base_lock);
257
258 dev_base_seq_inc(dev_net(dev));
259}
260
261
262
263
264
265static RAW_NOTIFIER_HEAD(netdev_chain);
266
267
268
269
270
271
272DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
273EXPORT_PER_CPU_SYMBOL(softnet_data);
274
275#ifdef CONFIG_LOCKDEP
276
277
278
279
280static const unsigned short netdev_lock_type[] = {
281 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
282 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
283 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
284 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
285 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
286 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
287 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
288 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
289 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
290 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
291 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
292 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
293 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
294 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
295 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
296
297static const char *const netdev_lock_name[] = {
298 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
299 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
300 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
301 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
302 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
303 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
304 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
305 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
306 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
307 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
308 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
309 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
310 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
311 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
312 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
313
314static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
315static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
316
317static inline unsigned short netdev_lock_pos(unsigned short dev_type)
318{
319 int i;
320
321 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
322 if (netdev_lock_type[i] == dev_type)
323 return i;
324
325 return ARRAY_SIZE(netdev_lock_type) - 1;
326}
327
328static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
329 unsigned short dev_type)
330{
331 int i;
332
333 i = netdev_lock_pos(dev_type);
334 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
335 netdev_lock_name[i]);
336}
337
338static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
339{
340 int i;
341
342 i = netdev_lock_pos(dev->type);
343 lockdep_set_class_and_name(&dev->addr_list_lock,
344 &netdev_addr_lock_key[i],
345 netdev_lock_name[i]);
346}
347#else
348static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
349 unsigned short dev_type)
350{
351}
352static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
353{
354}
355#endif
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380static inline struct list_head *ptype_head(const struct packet_type *pt)
381{
382 if (pt->type == htons(ETH_P_ALL))
383 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
384 else
385 return pt->dev ? &pt->dev->ptype_specific :
386 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402void dev_add_pack(struct packet_type *pt)
403{
404 struct list_head *head = ptype_head(pt);
405
406 spin_lock(&ptype_lock);
407 list_add_rcu(&pt->list, head);
408 spin_unlock(&ptype_lock);
409}
410EXPORT_SYMBOL(dev_add_pack);
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425void __dev_remove_pack(struct packet_type *pt)
426{
427 struct list_head *head = ptype_head(pt);
428 struct packet_type *pt1;
429
430 spin_lock(&ptype_lock);
431
432 list_for_each_entry(pt1, head, list) {
433 if (pt == pt1) {
434 list_del_rcu(&pt->list);
435 goto out;
436 }
437 }
438
439 pr_warn("dev_remove_pack: %p not found\n", pt);
440out:
441 spin_unlock(&ptype_lock);
442}
443EXPORT_SYMBOL(__dev_remove_pack);
444
445
446
447
448
449
450
451
452
453
454
455
456
457void dev_remove_pack(struct packet_type *pt)
458{
459 __dev_remove_pack(pt);
460
461 synchronize_net();
462}
463EXPORT_SYMBOL(dev_remove_pack);
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478void dev_add_offload(struct packet_offload *po)
479{
480 struct packet_offload *elem;
481
482 spin_lock(&offload_lock);
483 list_for_each_entry(elem, &offload_base, list) {
484 if (po->priority < elem->priority)
485 break;
486 }
487 list_add_rcu(&po->list, elem->list.prev);
488 spin_unlock(&offload_lock);
489}
490EXPORT_SYMBOL(dev_add_offload);
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505static void __dev_remove_offload(struct packet_offload *po)
506{
507 struct list_head *head = &offload_base;
508 struct packet_offload *po1;
509
510 spin_lock(&offload_lock);
511
512 list_for_each_entry(po1, head, list) {
513 if (po == po1) {
514 list_del_rcu(&po->list);
515 goto out;
516 }
517 }
518
519 pr_warn("dev_remove_offload: %p not found\n", po);
520out:
521 spin_unlock(&offload_lock);
522}
523
524
525
526
527
528
529
530
531
532
533
534
535
536void dev_remove_offload(struct packet_offload *po)
537{
538 __dev_remove_offload(po);
539
540 synchronize_net();
541}
542EXPORT_SYMBOL(dev_remove_offload);
543
544
545
546
547
548
549
550
551static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
552
553
554
555
556
557
558
559
560
561
562static int netdev_boot_setup_add(char *name, struct ifmap *map)
563{
564 struct netdev_boot_setup *s;
565 int i;
566
567 s = dev_boot_setup;
568 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
569 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
570 memset(s[i].name, 0, sizeof(s[i].name));
571 strlcpy(s[i].name, name, IFNAMSIZ);
572 memcpy(&s[i].map, map, sizeof(s[i].map));
573 break;
574 }
575 }
576
577 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
578}
579
580
581
582
583
584
585
586
587
588
589int netdev_boot_setup_check(struct net_device *dev)
590{
591 struct netdev_boot_setup *s = dev_boot_setup;
592 int i;
593
594 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
595 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
596 !strcmp(dev->name, s[i].name)) {
597 dev->irq = s[i].map.irq;
598 dev->base_addr = s[i].map.base_addr;
599 dev->mem_start = s[i].map.mem_start;
600 dev->mem_end = s[i].map.mem_end;
601 return 1;
602 }
603 }
604 return 0;
605}
606EXPORT_SYMBOL(netdev_boot_setup_check);
607
608
609
610
611
612
613
614
615
616
617
618
619unsigned long netdev_boot_base(const char *prefix, int unit)
620{
621 const struct netdev_boot_setup *s = dev_boot_setup;
622 char name[IFNAMSIZ];
623 int i;
624
625 sprintf(name, "%s%d", prefix, unit);
626
627
628
629
630
631 if (__dev_get_by_name(&init_net, name))
632 return 1;
633
634 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
635 if (!strcmp(name, s[i].name))
636 return s[i].map.base_addr;
637 return 0;
638}
639
640
641
642
643int __init netdev_boot_setup(char *str)
644{
645 int ints[5];
646 struct ifmap map;
647
648 str = get_options(str, ARRAY_SIZE(ints), ints);
649 if (!str || !*str)
650 return 0;
651
652
653 memset(&map, 0, sizeof(map));
654 if (ints[0] > 0)
655 map.irq = ints[1];
656 if (ints[0] > 1)
657 map.base_addr = ints[2];
658 if (ints[0] > 2)
659 map.mem_start = ints[3];
660 if (ints[0] > 3)
661 map.mem_end = ints[4];
662
663
664 return netdev_boot_setup_add(str, &map);
665}
666
667__setup("netdev=", netdev_boot_setup);
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683int dev_get_iflink(const struct net_device *dev)
684{
685 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
686 return dev->netdev_ops->ndo_get_iflink(dev);
687
688 return dev->ifindex;
689}
690EXPORT_SYMBOL(dev_get_iflink);
691
692
693
694
695
696
697
698
699
700
701int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
702{
703 struct ip_tunnel_info *info;
704
705 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
706 return -EINVAL;
707
708 info = skb_tunnel_info_unclone(skb);
709 if (!info)
710 return -ENOMEM;
711 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
712 return -EINVAL;
713
714 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
715}
716EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
717
718
719
720
721
722
723
724
725
726
727
728
729
730struct net_device *__dev_get_by_name(struct net *net, const char *name)
731{
732 struct net_device *dev;
733 struct hlist_head *head = dev_name_hash(net, name);
734
735 hlist_for_each_entry(dev, head, name_hlist)
736 if (!strncmp(dev->name, name, IFNAMSIZ))
737 return dev;
738
739 return NULL;
740}
741EXPORT_SYMBOL(__dev_get_by_name);
742
743
744
745
746
747
748
749
750
751
752
753
754
755struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
756{
757 struct net_device *dev;
758 struct hlist_head *head = dev_name_hash(net, name);
759
760 hlist_for_each_entry_rcu(dev, head, name_hlist)
761 if (!strncmp(dev->name, name, IFNAMSIZ))
762 return dev;
763
764 return NULL;
765}
766EXPORT_SYMBOL(dev_get_by_name_rcu);
767
768
769
770
771
772
773
774
775
776
777
778
779
780struct net_device *dev_get_by_name(struct net *net, const char *name)
781{
782 struct net_device *dev;
783
784 rcu_read_lock();
785 dev = dev_get_by_name_rcu(net, name);
786 if (dev)
787 dev_hold(dev);
788 rcu_read_unlock();
789 return dev;
790}
791EXPORT_SYMBOL(dev_get_by_name);
792
793
794
795
796
797
798
799
800
801
802
803
804
805struct net_device *__dev_get_by_index(struct net *net, int ifindex)
806{
807 struct net_device *dev;
808 struct hlist_head *head = dev_index_hash(net, ifindex);
809
810 hlist_for_each_entry(dev, head, index_hlist)
811 if (dev->ifindex == ifindex)
812 return dev;
813
814 return NULL;
815}
816EXPORT_SYMBOL(__dev_get_by_index);
817
818
819
820
821
822
823
824
825
826
827
828
829struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
830{
831 struct net_device *dev;
832 struct hlist_head *head = dev_index_hash(net, ifindex);
833
834 hlist_for_each_entry_rcu(dev, head, index_hlist)
835 if (dev->ifindex == ifindex)
836 return dev;
837
838 return NULL;
839}
840EXPORT_SYMBOL(dev_get_by_index_rcu);
841
842
843
844
845
846
847
848
849
850
851
852
853
854struct net_device *dev_get_by_index(struct net *net, int ifindex)
855{
856 struct net_device *dev;
857
858 rcu_read_lock();
859 dev = dev_get_by_index_rcu(net, ifindex);
860 if (dev)
861 dev_hold(dev);
862 rcu_read_unlock();
863 return dev;
864}
865EXPORT_SYMBOL(dev_get_by_index);
866
867
868
869
870
871
872
873
874
875
876
877int netdev_get_name(struct net *net, char *name, int ifindex)
878{
879 struct net_device *dev;
880 unsigned int seq;
881
882retry:
883 seq = raw_seqcount_begin(&devnet_rename_seq);
884 rcu_read_lock();
885 dev = dev_get_by_index_rcu(net, ifindex);
886 if (!dev) {
887 rcu_read_unlock();
888 return -ENODEV;
889 }
890
891 strcpy(name, dev->name);
892 rcu_read_unlock();
893 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
894 cond_resched();
895 goto retry;
896 }
897
898 return 0;
899}
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
916 const char *ha)
917{
918 struct net_device *dev;
919
920 for_each_netdev_rcu(net, dev)
921 if (dev->type == type &&
922 !memcmp(dev->dev_addr, ha, dev->addr_len))
923 return dev;
924
925 return NULL;
926}
927EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
928
929struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
930{
931 struct net_device *dev;
932
933 ASSERT_RTNL();
934 for_each_netdev(net, dev)
935 if (dev->type == type)
936 return dev;
937
938 return NULL;
939}
940EXPORT_SYMBOL(__dev_getfirstbyhwtype);
941
942struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
943{
944 struct net_device *dev, *ret = NULL;
945
946 rcu_read_lock();
947 for_each_netdev_rcu(net, dev)
948 if (dev->type == type) {
949 dev_hold(dev);
950 ret = dev;
951 break;
952 }
953 rcu_read_unlock();
954 return ret;
955}
956EXPORT_SYMBOL(dev_getfirstbyhwtype);
957
958
959
960
961
962
963
964
965
966
967
968
969struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
970 unsigned short mask)
971{
972 struct net_device *dev, *ret;
973
974 ASSERT_RTNL();
975
976 ret = NULL;
977 for_each_netdev(net, dev) {
978 if (((dev->flags ^ if_flags) & mask) == 0) {
979 ret = dev;
980 break;
981 }
982 }
983 return ret;
984}
985EXPORT_SYMBOL(__dev_get_by_flags);
986
987
988
989
990
991
992
993
994
995bool dev_valid_name(const char *name)
996{
997 if (*name == '\0')
998 return false;
999 if (strlen(name) >= IFNAMSIZ)
1000 return false;
1001 if (!strcmp(name, ".") || !strcmp(name, ".."))
1002 return false;
1003
1004 while (*name) {
1005 if (*name == '/' || *name == ':' || isspace(*name))
1006 return false;
1007 name++;
1008 }
1009 return true;
1010}
1011EXPORT_SYMBOL(dev_valid_name);
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1029{
1030 int i = 0;
1031 const char *p;
1032 const int max_netdevices = 8*PAGE_SIZE;
1033 unsigned long *inuse;
1034 struct net_device *d;
1035
1036 p = strnchr(name, IFNAMSIZ-1, '%');
1037 if (p) {
1038
1039
1040
1041
1042
1043 if (p[1] != 'd' || strchr(p + 2, '%'))
1044 return -EINVAL;
1045
1046
1047 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1048 if (!inuse)
1049 return -ENOMEM;
1050
1051 for_each_netdev(net, d) {
1052 if (!sscanf(d->name, name, &i))
1053 continue;
1054 if (i < 0 || i >= max_netdevices)
1055 continue;
1056
1057
1058 snprintf(buf, IFNAMSIZ, name, i);
1059 if (!strncmp(buf, d->name, IFNAMSIZ))
1060 set_bit(i, inuse);
1061 }
1062
1063 i = find_first_zero_bit(inuse, max_netdevices);
1064 free_page((unsigned long) inuse);
1065 }
1066
1067 if (buf != name)
1068 snprintf(buf, IFNAMSIZ, name, i);
1069 if (!__dev_get_by_name(net, buf))
1070 return i;
1071
1072
1073
1074
1075
1076 return -ENFILE;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093int dev_alloc_name(struct net_device *dev, const char *name)
1094{
1095 char buf[IFNAMSIZ];
1096 struct net *net;
1097 int ret;
1098
1099 BUG_ON(!dev_net(dev));
1100 net = dev_net(dev);
1101 ret = __dev_alloc_name(net, name, buf);
1102 if (ret >= 0)
1103 strlcpy(dev->name, buf, IFNAMSIZ);
1104 return ret;
1105}
1106EXPORT_SYMBOL(dev_alloc_name);
1107
1108static int dev_alloc_name_ns(struct net *net,
1109 struct net_device *dev,
1110 const char *name)
1111{
1112 char buf[IFNAMSIZ];
1113 int ret;
1114
1115 ret = __dev_alloc_name(net, name, buf);
1116 if (ret >= 0)
1117 strlcpy(dev->name, buf, IFNAMSIZ);
1118 return ret;
1119}
1120
1121static int dev_get_valid_name(struct net *net,
1122 struct net_device *dev,
1123 const char *name)
1124{
1125 BUG_ON(!net);
1126
1127 if (!dev_valid_name(name))
1128 return -EINVAL;
1129
1130 if (strchr(name, '%'))
1131 return dev_alloc_name_ns(net, dev, name);
1132 else if (__dev_get_by_name(net, name))
1133 return -EEXIST;
1134 else if (dev->name != name)
1135 strlcpy(dev->name, name, IFNAMSIZ);
1136
1137 return 0;
1138}
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148int dev_change_name(struct net_device *dev, const char *newname)
1149{
1150 unsigned char old_assign_type;
1151 char oldname[IFNAMSIZ];
1152 int err = 0;
1153 int ret;
1154 struct net *net;
1155
1156 ASSERT_RTNL();
1157 BUG_ON(!dev_net(dev));
1158
1159 net = dev_net(dev);
1160 if (dev->flags & IFF_UP)
1161 return -EBUSY;
1162
1163 write_seqcount_begin(&devnet_rename_seq);
1164
1165 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1166 write_seqcount_end(&devnet_rename_seq);
1167 return 0;
1168 }
1169
1170 memcpy(oldname, dev->name, IFNAMSIZ);
1171
1172 err = dev_get_valid_name(net, dev, newname);
1173 if (err < 0) {
1174 write_seqcount_end(&devnet_rename_seq);
1175 return err;
1176 }
1177
1178 if (oldname[0] && !strchr(oldname, '%'))
1179 netdev_info(dev, "renamed from %s\n", oldname);
1180
1181 old_assign_type = dev->name_assign_type;
1182 dev->name_assign_type = NET_NAME_RENAMED;
1183
1184rollback:
1185 ret = device_rename(&dev->dev, dev->name);
1186 if (ret) {
1187 memcpy(dev->name, oldname, IFNAMSIZ);
1188 dev->name_assign_type = old_assign_type;
1189 write_seqcount_end(&devnet_rename_seq);
1190 return ret;
1191 }
1192
1193 write_seqcount_end(&devnet_rename_seq);
1194
1195 netdev_adjacent_rename_links(dev, oldname);
1196
1197 write_lock_bh(&dev_base_lock);
1198 hlist_del_rcu(&dev->name_hlist);
1199 write_unlock_bh(&dev_base_lock);
1200
1201 synchronize_rcu();
1202
1203 write_lock_bh(&dev_base_lock);
1204 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1205 write_unlock_bh(&dev_base_lock);
1206
1207 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1208 ret = notifier_to_errno(ret);
1209
1210 if (ret) {
1211
1212 if (err >= 0) {
1213 err = ret;
1214 write_seqcount_begin(&devnet_rename_seq);
1215 memcpy(dev->name, oldname, IFNAMSIZ);
1216 memcpy(oldname, newname, IFNAMSIZ);
1217 dev->name_assign_type = old_assign_type;
1218 old_assign_type = NET_NAME_RENAMED;
1219 goto rollback;
1220 } else {
1221 pr_err("%s: name change rollback failed: %d\n",
1222 dev->name, ret);
1223 }
1224 }
1225
1226 return err;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1238{
1239 char *new_ifalias;
1240
1241 ASSERT_RTNL();
1242
1243 if (len >= IFALIASZ)
1244 return -EINVAL;
1245
1246 if (!len) {
1247 kfree(dev->ifalias);
1248 dev->ifalias = NULL;
1249 return 0;
1250 }
1251
1252 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1253 if (!new_ifalias)
1254 return -ENOMEM;
1255 dev->ifalias = new_ifalias;
1256 memcpy(dev->ifalias, alias, len);
1257 dev->ifalias[len] = 0;
1258
1259 return len;
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269void netdev_features_change(struct net_device *dev)
1270{
1271 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1272}
1273EXPORT_SYMBOL(netdev_features_change);
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283void netdev_state_change(struct net_device *dev)
1284{
1285 if (dev->flags & IFF_UP) {
1286 struct netdev_notifier_change_info change_info;
1287
1288 change_info.flags_changed = 0;
1289 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1290 &change_info.info);
1291 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1292 }
1293}
1294EXPORT_SYMBOL(netdev_state_change);
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306void netdev_notify_peers(struct net_device *dev)
1307{
1308 rtnl_lock();
1309 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1310 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1311 rtnl_unlock();
1312}
1313EXPORT_SYMBOL(netdev_notify_peers);
1314
1315static int __dev_open(struct net_device *dev)
1316{
1317 const struct net_device_ops *ops = dev->netdev_ops;
1318 int ret;
1319
1320 ASSERT_RTNL();
1321
1322 if (!netif_device_present(dev))
1323 return -ENODEV;
1324
1325
1326
1327
1328
1329 netpoll_poll_disable(dev);
1330
1331 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1332 ret = notifier_to_errno(ret);
1333 if (ret)
1334 return ret;
1335
1336 set_bit(__LINK_STATE_START, &dev->state);
1337
1338 if (ops->ndo_validate_addr)
1339 ret = ops->ndo_validate_addr(dev);
1340
1341 if (!ret && ops->ndo_open)
1342 ret = ops->ndo_open(dev);
1343
1344 netpoll_poll_enable(dev);
1345
1346 if (ret)
1347 clear_bit(__LINK_STATE_START, &dev->state);
1348 else {
1349 dev->flags |= IFF_UP;
1350 dev_set_rx_mode(dev);
1351 dev_activate(dev);
1352 add_device_randomness(dev->dev_addr, dev->addr_len);
1353 }
1354
1355 return ret;
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370int dev_open(struct net_device *dev)
1371{
1372 int ret;
1373
1374 if (dev->flags & IFF_UP)
1375 return 0;
1376
1377 ret = __dev_open(dev);
1378 if (ret < 0)
1379 return ret;
1380
1381 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1382 call_netdevice_notifiers(NETDEV_UP, dev);
1383
1384 return ret;
1385}
1386EXPORT_SYMBOL(dev_open);
1387
1388static int __dev_close_many(struct list_head *head)
1389{
1390 struct net_device *dev;
1391
1392 ASSERT_RTNL();
1393 might_sleep();
1394
1395 list_for_each_entry(dev, head, close_list) {
1396
1397 netpoll_poll_disable(dev);
1398
1399 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1400
1401 clear_bit(__LINK_STATE_START, &dev->state);
1402
1403
1404
1405
1406
1407
1408
1409 smp_mb__after_atomic();
1410 }
1411
1412 dev_deactivate_many(head);
1413
1414 list_for_each_entry(dev, head, close_list) {
1415 const struct net_device_ops *ops = dev->netdev_ops;
1416
1417
1418
1419
1420
1421
1422
1423
1424 if (ops->ndo_stop)
1425 ops->ndo_stop(dev);
1426
1427 dev->flags &= ~IFF_UP;
1428 netpoll_poll_enable(dev);
1429 }
1430
1431 return 0;
1432}
1433
1434static int __dev_close(struct net_device *dev)
1435{
1436 int retval;
1437 LIST_HEAD(single);
1438
1439 list_add(&dev->close_list, &single);
1440 retval = __dev_close_many(&single);
1441 list_del(&single);
1442
1443 return retval;
1444}
1445
1446int dev_close_many(struct list_head *head, bool unlink)
1447{
1448 struct net_device *dev, *tmp;
1449
1450
1451 list_for_each_entry_safe(dev, tmp, head, close_list)
1452 if (!(dev->flags & IFF_UP))
1453 list_del_init(&dev->close_list);
1454
1455 __dev_close_many(head);
1456
1457 list_for_each_entry_safe(dev, tmp, head, close_list) {
1458 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1459 call_netdevice_notifiers(NETDEV_DOWN, dev);
1460 if (unlink)
1461 list_del_init(&dev->close_list);
1462 }
1463
1464 return 0;
1465}
1466EXPORT_SYMBOL(dev_close_many);
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477int dev_close(struct net_device *dev)
1478{
1479 if (dev->flags & IFF_UP) {
1480 LIST_HEAD(single);
1481
1482 list_add(&dev->close_list, &single);
1483 dev_close_many(&single, true);
1484 list_del(&single);
1485 }
1486 return 0;
1487}
1488EXPORT_SYMBOL(dev_close);
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499void dev_disable_lro(struct net_device *dev)
1500{
1501 struct net_device *lower_dev;
1502 struct list_head *iter;
1503
1504 dev->wanted_features &= ~NETIF_F_LRO;
1505 netdev_update_features(dev);
1506
1507 if (unlikely(dev->features & NETIF_F_LRO))
1508 netdev_WARN(dev, "failed to disable LRO!\n");
1509
1510 netdev_for_each_lower_dev(dev, lower_dev, iter)
1511 dev_disable_lro(lower_dev);
1512}
1513EXPORT_SYMBOL(dev_disable_lro);
1514
1515static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1516 struct net_device *dev)
1517{
1518 struct netdev_notifier_info info;
1519
1520 netdev_notifier_info_init(&info, dev);
1521 return nb->notifier_call(nb, val, &info);
1522}
1523
1524static int dev_boot_phase = 1;
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540int register_netdevice_notifier(struct notifier_block *nb)
1541{
1542 struct net_device *dev;
1543 struct net_device *last;
1544 struct net *net;
1545 int err;
1546
1547 rtnl_lock();
1548 err = raw_notifier_chain_register(&netdev_chain, nb);
1549 if (err)
1550 goto unlock;
1551 if (dev_boot_phase)
1552 goto unlock;
1553 for_each_net(net) {
1554 for_each_netdev(net, dev) {
1555 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1556 err = notifier_to_errno(err);
1557 if (err)
1558 goto rollback;
1559
1560 if (!(dev->flags & IFF_UP))
1561 continue;
1562
1563 call_netdevice_notifier(nb, NETDEV_UP, dev);
1564 }
1565 }
1566
1567unlock:
1568 rtnl_unlock();
1569 return err;
1570
1571rollback:
1572 last = dev;
1573 for_each_net(net) {
1574 for_each_netdev(net, dev) {
1575 if (dev == last)
1576 goto outroll;
1577
1578 if (dev->flags & IFF_UP) {
1579 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1580 dev);
1581 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1582 }
1583 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1584 }
1585 }
1586
1587outroll:
1588 raw_notifier_chain_unregister(&netdev_chain, nb);
1589 goto unlock;
1590}
1591EXPORT_SYMBOL(register_netdevice_notifier);
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607int unregister_netdevice_notifier(struct notifier_block *nb)
1608{
1609 struct net_device *dev;
1610 struct net *net;
1611 int err;
1612
1613 rtnl_lock();
1614 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1615 if (err)
1616 goto unlock;
1617
1618 for_each_net(net) {
1619 for_each_netdev(net, dev) {
1620 if (dev->flags & IFF_UP) {
1621 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1622 dev);
1623 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1624 }
1625 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1626 }
1627 }
1628unlock:
1629 rtnl_unlock();
1630 return err;
1631}
1632EXPORT_SYMBOL(unregister_netdevice_notifier);
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644static int call_netdevice_notifiers_info(unsigned long val,
1645 struct net_device *dev,
1646 struct netdev_notifier_info *info)
1647{
1648 ASSERT_RTNL();
1649 netdev_notifier_info_init(info, dev);
1650 return raw_notifier_call_chain(&netdev_chain, val, info);
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1663{
1664 struct netdev_notifier_info info;
1665
1666 return call_netdevice_notifiers_info(val, dev, &info);
1667}
1668EXPORT_SYMBOL(call_netdevice_notifiers);
1669
1670#ifdef CONFIG_NET_INGRESS
1671static struct static_key ingress_needed __read_mostly;
1672
1673void net_inc_ingress_queue(void)
1674{
1675 static_key_slow_inc(&ingress_needed);
1676}
1677EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1678
1679void net_dec_ingress_queue(void)
1680{
1681 static_key_slow_dec(&ingress_needed);
1682}
1683EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1684#endif
1685
1686#ifdef CONFIG_NET_EGRESS
1687static struct static_key egress_needed __read_mostly;
1688
1689void net_inc_egress_queue(void)
1690{
1691 static_key_slow_inc(&egress_needed);
1692}
1693EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1694
1695void net_dec_egress_queue(void)
1696{
1697 static_key_slow_dec(&egress_needed);
1698}
1699EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1700#endif
1701
1702static struct static_key netstamp_needed __read_mostly;
1703#ifdef HAVE_JUMP_LABEL
1704static atomic_t netstamp_needed_deferred;
1705static atomic_t netstamp_wanted;
1706static void netstamp_clear(struct work_struct *work)
1707{
1708 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1709 int wanted;
1710
1711 wanted = atomic_add_return(deferred, &netstamp_wanted);
1712 if (wanted > 0)
1713 static_key_enable(&netstamp_needed);
1714 else
1715 static_key_disable(&netstamp_needed);
1716}
1717static DECLARE_WORK(netstamp_work, netstamp_clear);
1718#endif
1719
1720void net_enable_timestamp(void)
1721{
1722#ifdef HAVE_JUMP_LABEL
1723 int wanted;
1724
1725 while (1) {
1726 wanted = atomic_read(&netstamp_wanted);
1727 if (wanted <= 0)
1728 break;
1729 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1730 return;
1731 }
1732 atomic_inc(&netstamp_needed_deferred);
1733 schedule_work(&netstamp_work);
1734#else
1735 static_key_slow_inc(&netstamp_needed);
1736#endif
1737}
1738EXPORT_SYMBOL(net_enable_timestamp);
1739
1740void net_disable_timestamp(void)
1741{
1742#ifdef HAVE_JUMP_LABEL
1743 int wanted;
1744
1745 while (1) {
1746 wanted = atomic_read(&netstamp_wanted);
1747 if (wanted <= 1)
1748 break;
1749 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1750 return;
1751 }
1752 atomic_dec(&netstamp_needed_deferred);
1753 schedule_work(&netstamp_work);
1754#else
1755 static_key_slow_dec(&netstamp_needed);
1756#endif
1757}
1758EXPORT_SYMBOL(net_disable_timestamp);
1759
1760static inline void net_timestamp_set(struct sk_buff *skb)
1761{
1762 skb->tstamp = 0;
1763 if (static_key_false(&netstamp_needed))
1764 __net_timestamp(skb);
1765}
1766
1767#define net_timestamp_check(COND, SKB) \
1768 if (static_key_false(&netstamp_needed)) { \
1769 if ((COND) && !(SKB)->tstamp) \
1770 __net_timestamp(SKB); \
1771 } \
1772
1773bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1774{
1775 unsigned int len;
1776
1777 if (!(dev->flags & IFF_UP))
1778 return false;
1779
1780 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1781 if (skb->len <= len)
1782 return true;
1783
1784
1785
1786
1787 if (skb_is_gso(skb))
1788 return true;
1789
1790 return false;
1791}
1792EXPORT_SYMBOL_GPL(is_skb_forwardable);
1793
1794int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1795{
1796 int ret = ____dev_forward_skb(dev, skb);
1797
1798 if (likely(!ret)) {
1799 skb->protocol = eth_type_trans(skb, dev);
1800 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1801 }
1802
1803 return ret;
1804}
1805EXPORT_SYMBOL_GPL(__dev_forward_skb);
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1826{
1827 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1828}
1829EXPORT_SYMBOL_GPL(dev_forward_skb);
1830
1831static inline int deliver_skb(struct sk_buff *skb,
1832 struct packet_type *pt_prev,
1833 struct net_device *orig_dev)
1834{
1835 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1836 return -ENOMEM;
1837 atomic_inc(&skb->users);
1838 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1839}
1840
1841static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1842 struct packet_type **pt,
1843 struct net_device *orig_dev,
1844 __be16 type,
1845 struct list_head *ptype_list)
1846{
1847 struct packet_type *ptype, *pt_prev = *pt;
1848
1849 list_for_each_entry_rcu(ptype, ptype_list, list) {
1850 if (ptype->type != type)
1851 continue;
1852 if (pt_prev)
1853 deliver_skb(skb, pt_prev, orig_dev);
1854 pt_prev = ptype;
1855 }
1856 *pt = pt_prev;
1857}
1858
1859static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1860{
1861 if (!ptype->af_packet_priv || !skb->sk)
1862 return false;
1863
1864 if (ptype->id_match)
1865 return ptype->id_match(ptype, skb->sk);
1866 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1867 return true;
1868
1869 return false;
1870}
1871
1872
1873
1874
1875
1876
1877void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1878{
1879 struct packet_type *ptype;
1880 struct sk_buff *skb2 = NULL;
1881 struct packet_type *pt_prev = NULL;
1882 struct list_head *ptype_list = &ptype_all;
1883
1884 rcu_read_lock();
1885again:
1886 list_for_each_entry_rcu(ptype, ptype_list, list) {
1887
1888
1889
1890 if (skb_loop_sk(ptype, skb))
1891 continue;
1892
1893 if (pt_prev) {
1894 deliver_skb(skb2, pt_prev, skb->dev);
1895 pt_prev = ptype;
1896 continue;
1897 }
1898
1899
1900 skb2 = skb_clone(skb, GFP_ATOMIC);
1901 if (!skb2)
1902 goto out_unlock;
1903
1904 net_timestamp_set(skb2);
1905
1906
1907
1908
1909
1910 skb_reset_mac_header(skb2);
1911
1912 if (skb_network_header(skb2) < skb2->data ||
1913 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1914 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1915 ntohs(skb2->protocol),
1916 dev->name);
1917 skb_reset_network_header(skb2);
1918 }
1919
1920 skb2->transport_header = skb2->network_header;
1921 skb2->pkt_type = PACKET_OUTGOING;
1922 pt_prev = ptype;
1923 }
1924
1925 if (ptype_list == &ptype_all) {
1926 ptype_list = &dev->ptype_all;
1927 goto again;
1928 }
1929out_unlock:
1930 if (pt_prev)
1931 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1932 rcu_read_unlock();
1933}
1934EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1950{
1951 int i;
1952 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1953
1954
1955 if (tc->offset + tc->count > txq) {
1956 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1957 dev->num_tc = 0;
1958 return;
1959 }
1960
1961
1962 for (i = 1; i < TC_BITMASK + 1; i++) {
1963 int q = netdev_get_prio_tc_map(dev, i);
1964
1965 tc = &dev->tc_to_txq[q];
1966 if (tc->offset + tc->count > txq) {
1967 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1968 i, q);
1969 netdev_set_prio_tc_map(dev, i, 0);
1970 }
1971 }
1972}
1973
1974int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
1975{
1976 if (dev->num_tc) {
1977 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1978 int i;
1979
1980 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
1981 if ((txq - tc->offset) < tc->count)
1982 return i;
1983 }
1984
1985 return -1;
1986 }
1987
1988 return 0;
1989}
1990
1991#ifdef CONFIG_XPS
1992static DEFINE_MUTEX(xps_map_mutex);
1993#define xmap_dereference(P) \
1994 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1995
1996static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
1997 int tci, u16 index)
1998{
1999 struct xps_map *map = NULL;
2000 int pos;
2001
2002 if (dev_maps)
2003 map = xmap_dereference(dev_maps->cpu_map[tci]);
2004 if (!map)
2005 return false;
2006
2007 for (pos = map->len; pos--;) {
2008 if (map->queues[pos] != index)
2009 continue;
2010
2011 if (map->len > 1) {
2012 map->queues[pos] = map->queues[--map->len];
2013 break;
2014 }
2015
2016 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
2017 kfree_rcu(map, rcu);
2018 return false;
2019 }
2020
2021 return true;
2022}
2023
2024static bool remove_xps_queue_cpu(struct net_device *dev,
2025 struct xps_dev_maps *dev_maps,
2026 int cpu, u16 offset, u16 count)
2027{
2028 int num_tc = dev->num_tc ? : 1;
2029 bool active = false;
2030 int tci;
2031
2032 for (tci = cpu * num_tc; num_tc--; tci++) {
2033 int i, j;
2034
2035 for (i = count, j = offset; i--; j++) {
2036 if (!remove_xps_queue(dev_maps, cpu, j))
2037 break;
2038 }
2039
2040 active |= i < 0;
2041 }
2042
2043 return active;
2044}
2045
2046static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2047 u16 count)
2048{
2049 struct xps_dev_maps *dev_maps;
2050 int cpu, i;
2051 bool active = false;
2052
2053 mutex_lock(&xps_map_mutex);
2054 dev_maps = xmap_dereference(dev->xps_maps);
2055
2056 if (!dev_maps)
2057 goto out_no_maps;
2058
2059 for_each_possible_cpu(cpu)
2060 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2061 offset, count);
2062
2063 if (!active) {
2064 RCU_INIT_POINTER(dev->xps_maps, NULL);
2065 kfree_rcu(dev_maps, rcu);
2066 }
2067
2068 for (i = offset + (count - 1); count--; i--)
2069 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2070 NUMA_NO_NODE);
2071
2072out_no_maps:
2073 mutex_unlock(&xps_map_mutex);
2074}
2075
2076static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2077{
2078 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2079}
2080
2081static struct xps_map *expand_xps_map(struct xps_map *map,
2082 int cpu, u16 index)
2083{
2084 struct xps_map *new_map;
2085 int alloc_len = XPS_MIN_MAP_ALLOC;
2086 int i, pos;
2087
2088 for (pos = 0; map && pos < map->len; pos++) {
2089 if (map->queues[pos] != index)
2090 continue;
2091 return map;
2092 }
2093
2094
2095 if (map) {
2096 if (pos < map->alloc_len)
2097 return map;
2098
2099 alloc_len = map->alloc_len * 2;
2100 }
2101
2102
2103 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2104 cpu_to_node(cpu));
2105 if (!new_map)
2106 return NULL;
2107
2108 for (i = 0; i < pos; i++)
2109 new_map->queues[i] = map->queues[i];
2110 new_map->alloc_len = alloc_len;
2111 new_map->len = pos;
2112
2113 return new_map;
2114}
2115
2116int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2117 u16 index)
2118{
2119 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2120 int i, cpu, tci, numa_node_id = -2;
2121 int maps_sz, num_tc = 1, tc = 0;
2122 struct xps_map *map, *new_map;
2123 bool active = false;
2124
2125 if (dev->num_tc) {
2126 num_tc = dev->num_tc;
2127 tc = netdev_txq_to_tc(dev, index);
2128 if (tc < 0)
2129 return -EINVAL;
2130 }
2131
2132 maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2133 if (maps_sz < L1_CACHE_BYTES)
2134 maps_sz = L1_CACHE_BYTES;
2135
2136 mutex_lock(&xps_map_mutex);
2137
2138 dev_maps = xmap_dereference(dev->xps_maps);
2139
2140
2141 for_each_cpu_and(cpu, cpu_online_mask, mask) {
2142 if (!new_dev_maps)
2143 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2144 if (!new_dev_maps) {
2145 mutex_unlock(&xps_map_mutex);
2146 return -ENOMEM;
2147 }
2148
2149 tci = cpu * num_tc + tc;
2150 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
2151 NULL;
2152
2153 map = expand_xps_map(map, cpu, index);
2154 if (!map)
2155 goto error;
2156
2157 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2158 }
2159
2160 if (!new_dev_maps)
2161 goto out_no_new_maps;
2162
2163 for_each_possible_cpu(cpu) {
2164
2165 for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2166
2167 map = xmap_dereference(dev_maps->cpu_map[tci]);
2168 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2169 }
2170
2171
2172
2173
2174 tci = cpu * num_tc + tc;
2175
2176 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2177
2178 int pos = 0;
2179
2180 map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2181 while ((pos < map->len) && (map->queues[pos] != index))
2182 pos++;
2183
2184 if (pos == map->len)
2185 map->queues[map->len++] = index;
2186#ifdef CONFIG_NUMA
2187 if (numa_node_id == -2)
2188 numa_node_id = cpu_to_node(cpu);
2189 else if (numa_node_id != cpu_to_node(cpu))
2190 numa_node_id = -1;
2191#endif
2192 } else if (dev_maps) {
2193
2194 map = xmap_dereference(dev_maps->cpu_map[tci]);
2195 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2196 }
2197
2198
2199 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2200
2201 map = xmap_dereference(dev_maps->cpu_map[tci]);
2202 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2203 }
2204 }
2205
2206 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2207
2208
2209 if (!dev_maps)
2210 goto out_no_old_maps;
2211
2212 for_each_possible_cpu(cpu) {
2213 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2214 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2215 map = xmap_dereference(dev_maps->cpu_map[tci]);
2216 if (map && map != new_map)
2217 kfree_rcu(map, rcu);
2218 }
2219 }
2220
2221 kfree_rcu(dev_maps, rcu);
2222
2223out_no_old_maps:
2224 dev_maps = new_dev_maps;
2225 active = true;
2226
2227out_no_new_maps:
2228
2229 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2230 (numa_node_id >= 0) ? numa_node_id :
2231 NUMA_NO_NODE);
2232
2233 if (!dev_maps)
2234 goto out_no_maps;
2235
2236
2237 for_each_possible_cpu(cpu) {
2238 for (i = tc, tci = cpu * num_tc; i--; tci++)
2239 active |= remove_xps_queue(dev_maps, tci, index);
2240 if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2241 active |= remove_xps_queue(dev_maps, tci, index);
2242 for (i = num_tc - tc, tci++; --i; tci++)
2243 active |= remove_xps_queue(dev_maps, tci, index);
2244 }
2245
2246
2247 if (!active) {
2248 RCU_INIT_POINTER(dev->xps_maps, NULL);
2249 kfree_rcu(dev_maps, rcu);
2250 }
2251
2252out_no_maps:
2253 mutex_unlock(&xps_map_mutex);
2254
2255 return 0;
2256error:
2257
2258 for_each_possible_cpu(cpu) {
2259 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2260 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2261 map = dev_maps ?
2262 xmap_dereference(dev_maps->cpu_map[tci]) :
2263 NULL;
2264 if (new_map && new_map != map)
2265 kfree(new_map);
2266 }
2267 }
2268
2269 mutex_unlock(&xps_map_mutex);
2270
2271 kfree(new_dev_maps);
2272 return -ENOMEM;
2273}
2274EXPORT_SYMBOL(netif_set_xps_queue);
2275
2276#endif
2277void netdev_reset_tc(struct net_device *dev)
2278{
2279#ifdef CONFIG_XPS
2280 netif_reset_xps_queues_gt(dev, 0);
2281#endif
2282 dev->num_tc = 0;
2283 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2284 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2285}
2286EXPORT_SYMBOL(netdev_reset_tc);
2287
2288int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2289{
2290 if (tc >= dev->num_tc)
2291 return -EINVAL;
2292
2293#ifdef CONFIG_XPS
2294 netif_reset_xps_queues(dev, offset, count);
2295#endif
2296 dev->tc_to_txq[tc].count = count;
2297 dev->tc_to_txq[tc].offset = offset;
2298 return 0;
2299}
2300EXPORT_SYMBOL(netdev_set_tc_queue);
2301
2302int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2303{
2304 if (num_tc > TC_MAX_QUEUE)
2305 return -EINVAL;
2306
2307#ifdef CONFIG_XPS
2308 netif_reset_xps_queues_gt(dev, 0);
2309#endif
2310 dev->num_tc = num_tc;
2311 return 0;
2312}
2313EXPORT_SYMBOL(netdev_set_num_tc);
2314
2315
2316
2317
2318
2319int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2320{
2321 int rc;
2322
2323 if (txq < 1 || txq > dev->num_tx_queues)
2324 return -EINVAL;
2325
2326 if (dev->reg_state == NETREG_REGISTERED ||
2327 dev->reg_state == NETREG_UNREGISTERING) {
2328 ASSERT_RTNL();
2329
2330 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2331 txq);
2332 if (rc)
2333 return rc;
2334
2335 if (dev->num_tc)
2336 netif_setup_tc(dev, txq);
2337
2338 if (txq < dev->real_num_tx_queues) {
2339 qdisc_reset_all_tx_gt(dev, txq);
2340#ifdef CONFIG_XPS
2341 netif_reset_xps_queues_gt(dev, txq);
2342#endif
2343 }
2344 }
2345
2346 dev->real_num_tx_queues = txq;
2347 return 0;
2348}
2349EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2350
2351#ifdef CONFIG_SYSFS
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2363{
2364 int rc;
2365
2366 if (rxq < 1 || rxq > dev->num_rx_queues)
2367 return -EINVAL;
2368
2369 if (dev->reg_state == NETREG_REGISTERED) {
2370 ASSERT_RTNL();
2371
2372 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2373 rxq);
2374 if (rc)
2375 return rc;
2376 }
2377
2378 dev->real_num_rx_queues = rxq;
2379 return 0;
2380}
2381EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2382#endif
2383
2384
2385
2386
2387
2388
2389
2390int netif_get_num_default_rss_queues(void)
2391{
2392 return is_kdump_kernel() ?
2393 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2394}
2395EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2396
2397static void __netif_reschedule(struct Qdisc *q)
2398{
2399 struct softnet_data *sd;
2400 unsigned long flags;
2401
2402 local_irq_save(flags);
2403 sd = this_cpu_ptr(&softnet_data);
2404 q->next_sched = NULL;
2405 *sd->output_queue_tailp = q;
2406 sd->output_queue_tailp = &q->next_sched;
2407 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2408 local_irq_restore(flags);
2409}
2410
2411void __netif_schedule(struct Qdisc *q)
2412{
2413 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2414 __netif_reschedule(q);
2415}
2416EXPORT_SYMBOL(__netif_schedule);
2417
2418struct dev_kfree_skb_cb {
2419 enum skb_free_reason reason;
2420};
2421
2422static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2423{
2424 return (struct dev_kfree_skb_cb *)skb->cb;
2425}
2426
2427void netif_schedule_queue(struct netdev_queue *txq)
2428{
2429 rcu_read_lock();
2430 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2431 struct Qdisc *q = rcu_dereference(txq->qdisc);
2432
2433 __netif_schedule(q);
2434 }
2435 rcu_read_unlock();
2436}
2437EXPORT_SYMBOL(netif_schedule_queue);
2438
2439void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2440{
2441 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2442 struct Qdisc *q;
2443
2444 rcu_read_lock();
2445 q = rcu_dereference(dev_queue->qdisc);
2446 __netif_schedule(q);
2447 rcu_read_unlock();
2448 }
2449}
2450EXPORT_SYMBOL(netif_tx_wake_queue);
2451
2452void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2453{
2454 unsigned long flags;
2455
2456 if (unlikely(!skb))
2457 return;
2458
2459 if (likely(atomic_read(&skb->users) == 1)) {
2460 smp_rmb();
2461 atomic_set(&skb->users, 0);
2462 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2463 return;
2464 }
2465 get_kfree_skb_cb(skb)->reason = reason;
2466 local_irq_save(flags);
2467 skb->next = __this_cpu_read(softnet_data.completion_queue);
2468 __this_cpu_write(softnet_data.completion_queue, skb);
2469 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2470 local_irq_restore(flags);
2471}
2472EXPORT_SYMBOL(__dev_kfree_skb_irq);
2473
2474void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2475{
2476 if (in_irq() || irqs_disabled())
2477 __dev_kfree_skb_irq(skb, reason);
2478 else
2479 dev_kfree_skb(skb);
2480}
2481EXPORT_SYMBOL(__dev_kfree_skb_any);
2482
2483
2484
2485
2486
2487
2488
2489
2490void netif_device_detach(struct net_device *dev)
2491{
2492 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2493 netif_running(dev)) {
2494 netif_tx_stop_all_queues(dev);
2495 }
2496}
2497EXPORT_SYMBOL(netif_device_detach);
2498
2499
2500
2501
2502
2503
2504
2505void netif_device_attach(struct net_device *dev)
2506{
2507 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2508 netif_running(dev)) {
2509 netif_tx_wake_all_queues(dev);
2510 __netdev_watchdog_up(dev);
2511 }
2512}
2513EXPORT_SYMBOL(netif_device_attach);
2514
2515
2516
2517
2518
2519u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2520 unsigned int num_tx_queues)
2521{
2522 u32 hash;
2523 u16 qoffset = 0;
2524 u16 qcount = num_tx_queues;
2525
2526 if (skb_rx_queue_recorded(skb)) {
2527 hash = skb_get_rx_queue(skb);
2528 while (unlikely(hash >= num_tx_queues))
2529 hash -= num_tx_queues;
2530 return hash;
2531 }
2532
2533 if (dev->num_tc) {
2534 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2535
2536 qoffset = dev->tc_to_txq[tc].offset;
2537 qcount = dev->tc_to_txq[tc].count;
2538 }
2539
2540 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2541}
2542EXPORT_SYMBOL(__skb_tx_hash);
2543
2544static void skb_warn_bad_offload(const struct sk_buff *skb)
2545{
2546 static const netdev_features_t null_features;
2547 struct net_device *dev = skb->dev;
2548 const char *name = "";
2549
2550 if (!net_ratelimit())
2551 return;
2552
2553 if (dev) {
2554 if (dev->dev.parent)
2555 name = dev_driver_string(dev->dev.parent);
2556 else
2557 name = netdev_name(dev);
2558 }
2559 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2560 "gso_type=%d ip_summed=%d\n",
2561 name, dev ? &dev->features : &null_features,
2562 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2563 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2564 skb_shinfo(skb)->gso_type, skb->ip_summed);
2565}
2566
2567
2568
2569
2570
2571int skb_checksum_help(struct sk_buff *skb)
2572{
2573 __wsum csum;
2574 int ret = 0, offset;
2575
2576 if (skb->ip_summed == CHECKSUM_COMPLETE)
2577 goto out_set_summed;
2578
2579 if (unlikely(skb_shinfo(skb)->gso_size)) {
2580 skb_warn_bad_offload(skb);
2581 return -EINVAL;
2582 }
2583
2584
2585
2586
2587 if (skb_has_shared_frag(skb)) {
2588 ret = __skb_linearize(skb);
2589 if (ret)
2590 goto out;
2591 }
2592
2593 offset = skb_checksum_start_offset(skb);
2594 BUG_ON(offset >= skb_headlen(skb));
2595 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2596
2597 offset += skb->csum_offset;
2598 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2599
2600 if (skb_cloned(skb) &&
2601 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2602 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2603 if (ret)
2604 goto out;
2605 }
2606
2607 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2608out_set_summed:
2609 skb->ip_summed = CHECKSUM_NONE;
2610out:
2611 return ret;
2612}
2613EXPORT_SYMBOL(skb_checksum_help);
2614
2615__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2616{
2617 __be16 type = skb->protocol;
2618
2619
2620 if (type == htons(ETH_P_TEB)) {
2621 struct ethhdr *eth;
2622
2623 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2624 return 0;
2625
2626 eth = (struct ethhdr *)skb_mac_header(skb);
2627 type = eth->h_proto;
2628 }
2629
2630 return __vlan_get_protocol(skb, type, depth);
2631}
2632
2633
2634
2635
2636
2637
2638struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2639 netdev_features_t features)
2640{
2641 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2642 struct packet_offload *ptype;
2643 int vlan_depth = skb->mac_len;
2644 __be16 type = skb_network_protocol(skb, &vlan_depth);
2645
2646 if (unlikely(!type))
2647 return ERR_PTR(-EINVAL);
2648
2649 __skb_pull(skb, vlan_depth);
2650
2651 rcu_read_lock();
2652 list_for_each_entry_rcu(ptype, &offload_base, list) {
2653 if (ptype->type == type && ptype->callbacks.gso_segment) {
2654 segs = ptype->callbacks.gso_segment(skb, features);
2655 break;
2656 }
2657 }
2658 rcu_read_unlock();
2659
2660 __skb_push(skb, skb->data - skb_mac_header(skb));
2661
2662 return segs;
2663}
2664EXPORT_SYMBOL(skb_mac_gso_segment);
2665
2666
2667
2668
2669static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2670{
2671 if (tx_path)
2672 return skb->ip_summed != CHECKSUM_PARTIAL &&
2673 skb->ip_summed != CHECKSUM_NONE;
2674
2675 return skb->ip_summed == CHECKSUM_NONE;
2676}
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2692 netdev_features_t features, bool tx_path)
2693{
2694 struct sk_buff *segs;
2695
2696 if (unlikely(skb_needs_check(skb, tx_path))) {
2697 int err;
2698
2699
2700 err = skb_cow_head(skb, 0);
2701 if (err < 0)
2702 return ERR_PTR(err);
2703 }
2704
2705
2706
2707
2708
2709 if (features & NETIF_F_GSO_PARTIAL) {
2710 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2711 struct net_device *dev = skb->dev;
2712
2713 partial_features |= dev->features & dev->gso_partial_features;
2714 if (!skb_gso_ok(skb, features | partial_features))
2715 features &= ~NETIF_F_GSO_PARTIAL;
2716 }
2717
2718 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2719 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2720
2721 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2722 SKB_GSO_CB(skb)->encap_level = 0;
2723
2724 skb_reset_mac_header(skb);
2725 skb_reset_mac_len(skb);
2726
2727 segs = skb_mac_gso_segment(skb, features);
2728
2729 if (unlikely(skb_needs_check(skb, tx_path)))
2730 skb_warn_bad_offload(skb);
2731
2732 return segs;
2733}
2734EXPORT_SYMBOL(__skb_gso_segment);
2735
2736
2737#ifdef CONFIG_BUG
2738void netdev_rx_csum_fault(struct net_device *dev)
2739{
2740 if (net_ratelimit()) {
2741 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2742 dump_stack();
2743 }
2744}
2745EXPORT_SYMBOL(netdev_rx_csum_fault);
2746#endif
2747
2748
2749
2750
2751
2752
2753static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2754{
2755#ifdef CONFIG_HIGHMEM
2756 int i;
2757
2758 if (!(dev->features & NETIF_F_HIGHDMA)) {
2759 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2760 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2761
2762 if (PageHighMem(skb_frag_page(frag)))
2763 return 1;
2764 }
2765 }
2766
2767 if (PCI_DMA_BUS_IS_PHYS) {
2768 struct device *pdev = dev->dev.parent;
2769
2770 if (!pdev)
2771 return 0;
2772 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2773 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2774 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2775
2776 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2777 return 1;
2778 }
2779 }
2780#endif
2781 return 0;
2782}
2783
2784
2785
2786
2787#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2788static netdev_features_t net_mpls_features(struct sk_buff *skb,
2789 netdev_features_t features,
2790 __be16 type)
2791{
2792 if (eth_p_mpls(type))
2793 features &= skb->dev->mpls_features;
2794
2795 return features;
2796}
2797#else
2798static netdev_features_t net_mpls_features(struct sk_buff *skb,
2799 netdev_features_t features,
2800 __be16 type)
2801{
2802 return features;
2803}
2804#endif
2805
2806static netdev_features_t harmonize_features(struct sk_buff *skb,
2807 netdev_features_t features)
2808{
2809 int tmp;
2810 __be16 type;
2811
2812 type = skb_network_protocol(skb, &tmp);
2813 features = net_mpls_features(skb, features, type);
2814
2815 if (skb->ip_summed != CHECKSUM_NONE &&
2816 !can_checksum_protocol(features, type)) {
2817 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2818 }
2819 if (illegal_highdma(skb->dev, skb))
2820 features &= ~NETIF_F_SG;
2821
2822 return features;
2823}
2824
2825netdev_features_t passthru_features_check(struct sk_buff *skb,
2826 struct net_device *dev,
2827 netdev_features_t features)
2828{
2829 return features;
2830}
2831EXPORT_SYMBOL(passthru_features_check);
2832
2833static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2834 struct net_device *dev,
2835 netdev_features_t features)
2836{
2837 return vlan_features_check(skb, features);
2838}
2839
2840static netdev_features_t gso_features_check(const struct sk_buff *skb,
2841 struct net_device *dev,
2842 netdev_features_t features)
2843{
2844 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2845
2846 if (gso_segs > dev->gso_max_segs)
2847 return features & ~NETIF_F_GSO_MASK;
2848
2849
2850
2851
2852
2853
2854
2855 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2856 features &= ~dev->gso_partial_features;
2857
2858
2859
2860
2861 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2862 struct iphdr *iph = skb->encapsulation ?
2863 inner_ip_hdr(skb) : ip_hdr(skb);
2864
2865 if (!(iph->frag_off & htons(IP_DF)))
2866 features &= ~NETIF_F_TSO_MANGLEID;
2867 }
2868
2869 return features;
2870}
2871
2872netdev_features_t netif_skb_features(struct sk_buff *skb)
2873{
2874 struct net_device *dev = skb->dev;
2875 netdev_features_t features = dev->features;
2876
2877 if (skb_is_gso(skb))
2878 features = gso_features_check(skb, dev, features);
2879
2880
2881
2882
2883
2884 if (skb->encapsulation)
2885 features &= dev->hw_enc_features;
2886
2887 if (skb_vlan_tagged(skb))
2888 features = netdev_intersect_features(features,
2889 dev->vlan_features |
2890 NETIF_F_HW_VLAN_CTAG_TX |
2891 NETIF_F_HW_VLAN_STAG_TX);
2892
2893 if (dev->netdev_ops->ndo_features_check)
2894 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2895 features);
2896 else
2897 features &= dflt_features_check(skb, dev, features);
2898
2899 return harmonize_features(skb, features);
2900}
2901EXPORT_SYMBOL(netif_skb_features);
2902
2903static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2904 struct netdev_queue *txq, bool more)
2905{
2906 unsigned int len;
2907 int rc;
2908
2909 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2910 dev_queue_xmit_nit(skb, dev);
2911
2912 len = skb->len;
2913 trace_net_dev_start_xmit(skb, dev);
2914 rc = netdev_start_xmit(skb, dev, txq, more);
2915 trace_net_dev_xmit(skb, rc, dev, len);
2916
2917 return rc;
2918}
2919
2920struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2921 struct netdev_queue *txq, int *ret)
2922{
2923 struct sk_buff *skb = first;
2924 int rc = NETDEV_TX_OK;
2925
2926 while (skb) {
2927 struct sk_buff *next = skb->next;
2928
2929 skb->next = NULL;
2930 rc = xmit_one(skb, dev, txq, next != NULL);
2931 if (unlikely(!dev_xmit_complete(rc))) {
2932 skb->next = next;
2933 goto out;
2934 }
2935
2936 skb = next;
2937 if (netif_xmit_stopped(txq) && skb) {
2938 rc = NETDEV_TX_BUSY;
2939 break;
2940 }
2941 }
2942
2943out:
2944 *ret = rc;
2945 return skb;
2946}
2947
2948static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2949 netdev_features_t features)
2950{
2951 if (skb_vlan_tag_present(skb) &&
2952 !vlan_hw_offload_capable(features, skb->vlan_proto))
2953 skb = __vlan_hwaccel_push_inside(skb);
2954 return skb;
2955}
2956
2957static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2958{
2959 netdev_features_t features;
2960
2961 features = netif_skb_features(skb);
2962 skb = validate_xmit_vlan(skb, features);
2963 if (unlikely(!skb))
2964 goto out_null;
2965
2966 if (netif_needs_gso(skb, features)) {
2967 struct sk_buff *segs;
2968
2969 segs = skb_gso_segment(skb, features);
2970 if (IS_ERR(segs)) {
2971 goto out_kfree_skb;
2972 } else if (segs) {
2973 consume_skb(skb);
2974 skb = segs;
2975 }
2976 } else {
2977 if (skb_needs_linearize(skb, features) &&
2978 __skb_linearize(skb))
2979 goto out_kfree_skb;
2980
2981 if (validate_xmit_xfrm(skb, features))
2982 goto out_kfree_skb;
2983
2984
2985
2986
2987
2988 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2989 if (skb->encapsulation)
2990 skb_set_inner_transport_header(skb,
2991 skb_checksum_start_offset(skb));
2992 else
2993 skb_set_transport_header(skb,
2994 skb_checksum_start_offset(skb));
2995 if (!(features & NETIF_F_CSUM_MASK) &&
2996 skb_checksum_help(skb))
2997 goto out_kfree_skb;
2998 }
2999 }
3000
3001 return skb;
3002
3003out_kfree_skb:
3004 kfree_skb(skb);
3005out_null:
3006 atomic_long_inc(&dev->tx_dropped);
3007 return NULL;
3008}
3009
3010struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
3011{
3012 struct sk_buff *next, *head = NULL, *tail;
3013
3014 for (; skb != NULL; skb = next) {
3015 next = skb->next;
3016 skb->next = NULL;
3017
3018
3019 skb->prev = skb;
3020
3021 skb = validate_xmit_skb(skb, dev);
3022 if (!skb)
3023 continue;
3024
3025 if (!head)
3026 head = skb;
3027 else
3028 tail->next = skb;
3029
3030
3031
3032 tail = skb->prev;
3033 }
3034 return head;
3035}
3036EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3037
3038static void qdisc_pkt_len_init(struct sk_buff *skb)
3039{
3040 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3041
3042 qdisc_skb_cb(skb)->pkt_len = skb->len;
3043
3044
3045
3046
3047 if (shinfo->gso_size) {
3048 unsigned int hdr_len;
3049 u16 gso_segs = shinfo->gso_segs;
3050
3051
3052 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3053
3054
3055 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3056 hdr_len += tcp_hdrlen(skb);
3057 else
3058 hdr_len += sizeof(struct udphdr);
3059
3060 if (shinfo->gso_type & SKB_GSO_DODGY)
3061 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3062 shinfo->gso_size);
3063
3064 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3065 }
3066}
3067
3068static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3069 struct net_device *dev,
3070 struct netdev_queue *txq)
3071{
3072 spinlock_t *root_lock = qdisc_lock(q);
3073 struct sk_buff *to_free = NULL;
3074 bool contended;
3075 int rc;
3076
3077 qdisc_calculate_pkt_len(skb, q);
3078
3079
3080
3081
3082
3083
3084 contended = qdisc_is_running(q);
3085 if (unlikely(contended))
3086 spin_lock(&q->busylock);
3087
3088 spin_lock(root_lock);
3089 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3090 __qdisc_drop(skb, &to_free);
3091 rc = NET_XMIT_DROP;
3092 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3093 qdisc_run_begin(q)) {
3094
3095
3096
3097
3098
3099
3100 qdisc_bstats_update(q, skb);
3101
3102 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3103 if (unlikely(contended)) {
3104 spin_unlock(&q->busylock);
3105 contended = false;
3106 }
3107 __qdisc_run(q);
3108 } else
3109 qdisc_run_end(q);
3110
3111 rc = NET_XMIT_SUCCESS;
3112 } else {
3113 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3114 if (qdisc_run_begin(q)) {
3115 if (unlikely(contended)) {
3116 spin_unlock(&q->busylock);
3117 contended = false;
3118 }
3119 __qdisc_run(q);
3120 }
3121 }
3122 spin_unlock(root_lock);
3123 if (unlikely(to_free))
3124 kfree_skb_list(to_free);
3125 if (unlikely(contended))
3126 spin_unlock(&q->busylock);
3127 return rc;
3128}
3129
3130#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3131static void skb_update_prio(struct sk_buff *skb)
3132{
3133 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
3134
3135 if (!skb->priority && skb->sk && map) {
3136 unsigned int prioidx =
3137 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
3138
3139 if (prioidx < map->priomap_len)
3140 skb->priority = map->priomap[prioidx];
3141 }
3142}
3143#else
3144#define skb_update_prio(skb)
3145#endif
3146
3147DEFINE_PER_CPU(int, xmit_recursion);
3148EXPORT_SYMBOL(xmit_recursion);
3149
3150
3151
3152
3153
3154
3155
3156int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3157{
3158 skb_reset_mac_header(skb);
3159 __skb_pull(skb, skb_network_offset(skb));
3160 skb->pkt_type = PACKET_LOOPBACK;
3161 skb->ip_summed = CHECKSUM_UNNECESSARY;
3162 WARN_ON(!skb_dst(skb));
3163 skb_dst_force(skb);
3164 netif_rx_ni(skb);
3165 return 0;
3166}
3167EXPORT_SYMBOL(dev_loopback_xmit);
3168
3169#ifdef CONFIG_NET_EGRESS
3170static struct sk_buff *
3171sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3172{
3173 struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3174 struct tcf_result cl_res;
3175
3176 if (!cl)
3177 return skb;
3178
3179
3180 qdisc_bstats_cpu_update(cl->q, skb);
3181
3182 switch (tc_classify(skb, cl, &cl_res, false)) {
3183 case TC_ACT_OK:
3184 case TC_ACT_RECLASSIFY:
3185 skb->tc_index = TC_H_MIN(cl_res.classid);
3186 break;
3187 case TC_ACT_SHOT:
3188 qdisc_qstats_cpu_drop(cl->q);
3189 *ret = NET_XMIT_DROP;
3190 kfree_skb(skb);
3191 return NULL;
3192 case TC_ACT_STOLEN:
3193 case TC_ACT_QUEUED:
3194 *ret = NET_XMIT_SUCCESS;
3195 consume_skb(skb);
3196 return NULL;
3197 case TC_ACT_REDIRECT:
3198
3199 skb_do_redirect(skb);
3200 *ret = NET_XMIT_SUCCESS;
3201 return NULL;
3202 default:
3203 break;
3204 }
3205
3206 return skb;
3207}
3208#endif
3209
3210static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3211{
3212#ifdef CONFIG_XPS
3213 struct xps_dev_maps *dev_maps;
3214 struct xps_map *map;
3215 int queue_index = -1;
3216
3217 rcu_read_lock();
3218 dev_maps = rcu_dereference(dev->xps_maps);
3219 if (dev_maps) {
3220 unsigned int tci = skb->sender_cpu - 1;
3221
3222 if (dev->num_tc) {
3223 tci *= dev->num_tc;
3224 tci += netdev_get_prio_tc_map(dev, skb->priority);
3225 }
3226
3227 map = rcu_dereference(dev_maps->cpu_map[tci]);
3228 if (map) {
3229 if (map->len == 1)
3230 queue_index = map->queues[0];
3231 else
3232 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3233 map->len)];
3234 if (unlikely(queue_index >= dev->real_num_tx_queues))
3235 queue_index = -1;
3236 }
3237 }
3238 rcu_read_unlock();
3239
3240 return queue_index;
3241#else
3242 return -1;
3243#endif
3244}
3245
3246static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3247{
3248 struct sock *sk = skb->sk;
3249 int queue_index = sk_tx_queue_get(sk);
3250
3251 if (queue_index < 0 || skb->ooo_okay ||
3252 queue_index >= dev->real_num_tx_queues) {
3253 int new_index = get_xps_queue(dev, skb);
3254
3255 if (new_index < 0)
3256 new_index = skb_tx_hash(dev, skb);
3257
3258 if (queue_index != new_index && sk &&
3259 sk_fullsock(sk) &&
3260 rcu_access_pointer(sk->sk_dst_cache))
3261 sk_tx_queue_set(sk, new_index);
3262
3263 queue_index = new_index;
3264 }
3265
3266 return queue_index;
3267}
3268
3269struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3270 struct sk_buff *skb,
3271 void *accel_priv)
3272{
3273 int queue_index = 0;
3274
3275#ifdef CONFIG_XPS
3276 u32 sender_cpu = skb->sender_cpu - 1;
3277
3278 if (sender_cpu >= (u32)NR_CPUS)
3279 skb->sender_cpu = raw_smp_processor_id() + 1;
3280#endif
3281
3282 if (dev->real_num_tx_queues != 1) {
3283 const struct net_device_ops *ops = dev->netdev_ops;
3284
3285 if (ops->ndo_select_queue)
3286 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3287 __netdev_pick_tx);
3288 else
3289 queue_index = __netdev_pick_tx(dev, skb);
3290
3291 if (!accel_priv)
3292 queue_index = netdev_cap_txqueue(dev, queue_index);
3293 }
3294
3295 skb_set_queue_mapping(skb, queue_index);
3296 return netdev_get_tx_queue(dev, queue_index);
3297}
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
3326{
3327 struct net_device *dev = skb->dev;
3328 struct netdev_queue *txq;
3329 struct Qdisc *q;
3330 int rc = -ENOMEM;
3331
3332 skb_reset_mac_header(skb);
3333
3334 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3335 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3336
3337
3338
3339
3340 rcu_read_lock_bh();
3341
3342 skb_update_prio(skb);
3343
3344 qdisc_pkt_len_init(skb);
3345#ifdef CONFIG_NET_CLS_ACT
3346 skb->tc_at_ingress = 0;
3347# ifdef CONFIG_NET_EGRESS
3348 if (static_key_false(&egress_needed)) {
3349 skb = sch_handle_egress(skb, &rc, dev);
3350 if (!skb)
3351 goto out;
3352 }
3353# endif
3354#endif
3355
3356
3357
3358 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3359 skb_dst_drop(skb);
3360 else
3361 skb_dst_force(skb);
3362
3363 txq = netdev_pick_tx(dev, skb, accel_priv);
3364 q = rcu_dereference_bh(txq->qdisc);
3365
3366 trace_net_dev_queue(skb);
3367 if (q->enqueue) {
3368 rc = __dev_xmit_skb(skb, q, dev, txq);
3369 goto out;
3370 }
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384 if (dev->flags & IFF_UP) {
3385 int cpu = smp_processor_id();
3386
3387 if (txq->xmit_lock_owner != cpu) {
3388 if (unlikely(__this_cpu_read(xmit_recursion) >
3389 XMIT_RECURSION_LIMIT))
3390 goto recursion_alert;
3391
3392 skb = validate_xmit_skb(skb, dev);
3393 if (!skb)
3394 goto out;
3395
3396 HARD_TX_LOCK(dev, txq, cpu);
3397
3398 if (!netif_xmit_stopped(txq)) {
3399 __this_cpu_inc(xmit_recursion);
3400 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3401 __this_cpu_dec(xmit_recursion);
3402 if (dev_xmit_complete(rc)) {
3403 HARD_TX_UNLOCK(dev, txq);
3404 goto out;
3405 }
3406 }
3407 HARD_TX_UNLOCK(dev, txq);
3408 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3409 dev->name);
3410 } else {
3411
3412
3413
3414recursion_alert:
3415 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3416 dev->name);
3417 }
3418 }
3419
3420 rc = -ENETDOWN;
3421 rcu_read_unlock_bh();
3422
3423 atomic_long_inc(&dev->tx_dropped);
3424 kfree_skb_list(skb);
3425 return rc;
3426out:
3427 rcu_read_unlock_bh();
3428 return rc;
3429}
3430
3431int dev_queue_xmit(struct sk_buff *skb)
3432{
3433 return __dev_queue_xmit(skb, NULL);
3434}
3435EXPORT_SYMBOL(dev_queue_xmit);
3436
3437int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3438{
3439 return __dev_queue_xmit(skb, accel_priv);
3440}
3441EXPORT_SYMBOL(dev_queue_xmit_accel);
3442
3443
3444
3445
3446
3447
3448int netdev_max_backlog __read_mostly = 1000;
3449EXPORT_SYMBOL(netdev_max_backlog);
3450
3451int netdev_tstamp_prequeue __read_mostly = 1;
3452int netdev_budget __read_mostly = 300;
3453unsigned int __read_mostly netdev_budget_usecs = 2000;
3454int weight_p __read_mostly = 64;
3455int dev_weight_rx_bias __read_mostly = 1;
3456int dev_weight_tx_bias __read_mostly = 1;
3457int dev_rx_weight __read_mostly = 64;
3458int dev_tx_weight __read_mostly = 64;
3459
3460
3461static inline void ____napi_schedule(struct softnet_data *sd,
3462 struct napi_struct *napi)
3463{
3464 list_add_tail(&napi->poll_list, &sd->poll_list);
3465 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3466}
3467
3468#ifdef CONFIG_RPS
3469
3470
3471struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3472EXPORT_SYMBOL(rps_sock_flow_table);
3473u32 rps_cpu_mask __read_mostly;
3474EXPORT_SYMBOL(rps_cpu_mask);
3475
3476struct static_key rps_needed __read_mostly;
3477EXPORT_SYMBOL(rps_needed);
3478struct static_key rfs_needed __read_mostly;
3479EXPORT_SYMBOL(rfs_needed);
3480
3481static struct rps_dev_flow *
3482set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3483 struct rps_dev_flow *rflow, u16 next_cpu)
3484{
3485 if (next_cpu < nr_cpu_ids) {
3486#ifdef CONFIG_RFS_ACCEL
3487 struct netdev_rx_queue *rxqueue;
3488 struct rps_dev_flow_table *flow_table;
3489 struct rps_dev_flow *old_rflow;
3490 u32 flow_id;
3491 u16 rxq_index;
3492 int rc;
3493
3494
3495 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3496 !(dev->features & NETIF_F_NTUPLE))
3497 goto out;
3498 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3499 if (rxq_index == skb_get_rx_queue(skb))
3500 goto out;
3501
3502 rxqueue = dev->_rx + rxq_index;
3503 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3504 if (!flow_table)
3505 goto out;
3506 flow_id = skb_get_hash(skb) & flow_table->mask;
3507 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3508 rxq_index, flow_id);
3509 if (rc < 0)
3510 goto out;
3511 old_rflow = rflow;
3512 rflow = &flow_table->flows[flow_id];
3513 rflow->filter = rc;
3514 if (old_rflow->filter == rflow->filter)
3515 old_rflow->filter = RPS_NO_FILTER;
3516 out:
3517#endif
3518 rflow->last_qtail =
3519 per_cpu(softnet_data, next_cpu).input_queue_head;
3520 }
3521
3522 rflow->cpu = next_cpu;
3523 return rflow;
3524}
3525
3526
3527
3528
3529
3530
3531static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3532 struct rps_dev_flow **rflowp)
3533{
3534 const struct rps_sock_flow_table *sock_flow_table;
3535 struct netdev_rx_queue *rxqueue = dev->_rx;
3536 struct rps_dev_flow_table *flow_table;
3537 struct rps_map *map;
3538 int cpu = -1;
3539 u32 tcpu;
3540 u32 hash;
3541
3542 if (skb_rx_queue_recorded(skb)) {
3543 u16 index = skb_get_rx_queue(skb);
3544
3545 if (unlikely(index >= dev->real_num_rx_queues)) {
3546 WARN_ONCE(dev->real_num_rx_queues > 1,
3547 "%s received packet on queue %u, but number "
3548 "of RX queues is %u\n",
3549 dev->name, index, dev->real_num_rx_queues);
3550 goto done;
3551 }
3552 rxqueue += index;
3553 }
3554
3555
3556
3557 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3558 map = rcu_dereference(rxqueue->rps_map);
3559 if (!flow_table && !map)
3560 goto done;
3561
3562 skb_reset_network_header(skb);
3563 hash = skb_get_hash(skb);
3564 if (!hash)
3565 goto done;
3566
3567 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3568 if (flow_table && sock_flow_table) {
3569 struct rps_dev_flow *rflow;
3570 u32 next_cpu;
3571 u32 ident;
3572
3573
3574 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3575 if ((ident ^ hash) & ~rps_cpu_mask)
3576 goto try_rps;
3577
3578 next_cpu = ident & rps_cpu_mask;
3579
3580
3581
3582
3583 rflow = &flow_table->flows[hash & flow_table->mask];
3584 tcpu = rflow->cpu;
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597 if (unlikely(tcpu != next_cpu) &&
3598 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3599 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3600 rflow->last_qtail)) >= 0)) {
3601 tcpu = next_cpu;
3602 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3603 }
3604
3605 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3606 *rflowp = rflow;
3607 cpu = tcpu;
3608 goto done;
3609 }
3610 }
3611
3612try_rps:
3613
3614 if (map) {
3615 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3616 if (cpu_online(tcpu)) {
3617 cpu = tcpu;
3618 goto done;
3619 }
3620 }
3621
3622done:
3623 return cpu;
3624}
3625
3626#ifdef CONFIG_RFS_ACCEL
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3640 u32 flow_id, u16 filter_id)
3641{
3642 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3643 struct rps_dev_flow_table *flow_table;
3644 struct rps_dev_flow *rflow;
3645 bool expire = true;
3646 unsigned int cpu;
3647
3648 rcu_read_lock();
3649 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3650 if (flow_table && flow_id <= flow_table->mask) {
3651 rflow = &flow_table->flows[flow_id];
3652 cpu = ACCESS_ONCE(rflow->cpu);
3653 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3654 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3655 rflow->last_qtail) <
3656 (int)(10 * flow_table->mask)))
3657 expire = false;
3658 }
3659 rcu_read_unlock();
3660 return expire;
3661}
3662EXPORT_SYMBOL(rps_may_expire_flow);
3663
3664#endif
3665
3666
3667static void rps_trigger_softirq(void *data)
3668{
3669 struct softnet_data *sd = data;
3670
3671 ____napi_schedule(sd, &sd->backlog);
3672 sd->received_rps++;
3673}
3674
3675#endif
3676
3677
3678
3679
3680
3681
3682static int rps_ipi_queued(struct softnet_data *sd)
3683{
3684#ifdef CONFIG_RPS
3685 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3686
3687 if (sd != mysd) {
3688 sd->rps_ipi_next = mysd->rps_ipi_list;
3689 mysd->rps_ipi_list = sd;
3690
3691 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3692 return 1;
3693 }
3694#endif
3695 return 0;
3696}
3697
3698#ifdef CONFIG_NET_FLOW_LIMIT
3699int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3700#endif
3701
3702static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3703{
3704#ifdef CONFIG_NET_FLOW_LIMIT
3705 struct sd_flow_limit *fl;
3706 struct softnet_data *sd;
3707 unsigned int old_flow, new_flow;
3708
3709 if (qlen < (netdev_max_backlog >> 1))
3710 return false;
3711
3712 sd = this_cpu_ptr(&softnet_data);
3713
3714 rcu_read_lock();
3715 fl = rcu_dereference(sd->flow_limit);
3716 if (fl) {
3717 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3718 old_flow = fl->history[fl->history_head];
3719 fl->history[fl->history_head] = new_flow;
3720
3721 fl->history_head++;
3722 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3723
3724 if (likely(fl->buckets[old_flow]))
3725 fl->buckets[old_flow]--;
3726
3727 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3728 fl->count++;
3729 rcu_read_unlock();
3730 return true;
3731 }
3732 }
3733 rcu_read_unlock();
3734#endif
3735 return false;
3736}
3737
3738
3739
3740
3741
3742static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3743 unsigned int *qtail)
3744{
3745 struct softnet_data *sd;
3746 unsigned long flags;
3747 unsigned int qlen;
3748
3749 sd = &per_cpu(softnet_data, cpu);
3750
3751 local_irq_save(flags);
3752
3753 rps_lock(sd);
3754 if (!netif_running(skb->dev))
3755 goto drop;
3756 qlen = skb_queue_len(&sd->input_pkt_queue);
3757 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3758 if (qlen) {
3759enqueue:
3760 __skb_queue_tail(&sd->input_pkt_queue, skb);
3761 input_queue_tail_incr_save(sd, qtail);
3762 rps_unlock(sd);
3763 local_irq_restore(flags);
3764 return NET_RX_SUCCESS;
3765 }
3766
3767
3768
3769
3770 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3771 if (!rps_ipi_queued(sd))
3772 ____napi_schedule(sd, &sd->backlog);
3773 }
3774 goto enqueue;
3775 }
3776
3777drop:
3778 sd->dropped++;
3779 rps_unlock(sd);
3780
3781 local_irq_restore(flags);
3782
3783 atomic_long_inc(&skb->dev->rx_dropped);
3784 kfree_skb(skb);
3785 return NET_RX_DROP;
3786}
3787
3788static int netif_rx_internal(struct sk_buff *skb)
3789{
3790 int ret;
3791
3792 net_timestamp_check(netdev_tstamp_prequeue, skb);
3793
3794 trace_netif_rx(skb);
3795#ifdef CONFIG_RPS
3796 if (static_key_false(&rps_needed)) {
3797 struct rps_dev_flow voidflow, *rflow = &voidflow;
3798 int cpu;
3799
3800 preempt_disable();
3801 rcu_read_lock();
3802
3803 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3804 if (cpu < 0)
3805 cpu = smp_processor_id();
3806
3807 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3808
3809 rcu_read_unlock();
3810 preempt_enable();
3811 } else
3812#endif
3813 {
3814 unsigned int qtail;
3815
3816 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3817 put_cpu();
3818 }
3819 return ret;
3820}
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837int netif_rx(struct sk_buff *skb)
3838{
3839 trace_netif_rx_entry(skb);
3840
3841 return netif_rx_internal(skb);
3842}
3843EXPORT_SYMBOL(netif_rx);
3844
3845int netif_rx_ni(struct sk_buff *skb)
3846{
3847 int err;
3848
3849 trace_netif_rx_ni_entry(skb);
3850
3851 preempt_disable();
3852 err = netif_rx_internal(skb);
3853 if (local_softirq_pending())
3854 do_softirq();
3855 preempt_enable();
3856
3857 return err;
3858}
3859EXPORT_SYMBOL(netif_rx_ni);
3860
3861static __latent_entropy void net_tx_action(struct softirq_action *h)
3862{
3863 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3864
3865 if (sd->completion_queue) {
3866 struct sk_buff *clist;
3867
3868 local_irq_disable();
3869 clist = sd->completion_queue;
3870 sd->completion_queue = NULL;
3871 local_irq_enable();
3872
3873 while (clist) {
3874 struct sk_buff *skb = clist;
3875
3876 clist = clist->next;
3877
3878 WARN_ON(atomic_read(&skb->users));
3879 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3880 trace_consume_skb(skb);
3881 else
3882 trace_kfree_skb(skb, net_tx_action);
3883
3884 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
3885 __kfree_skb(skb);
3886 else
3887 __kfree_skb_defer(skb);
3888 }
3889
3890 __kfree_skb_flush();
3891 }
3892
3893 if (sd->output_queue) {
3894 struct Qdisc *head;
3895
3896 local_irq_disable();
3897 head = sd->output_queue;
3898 sd->output_queue = NULL;
3899 sd->output_queue_tailp = &sd->output_queue;
3900 local_irq_enable();
3901
3902 while (head) {
3903 struct Qdisc *q = head;
3904 spinlock_t *root_lock;
3905
3906 head = head->next_sched;
3907
3908 root_lock = qdisc_lock(q);
3909 spin_lock(root_lock);
3910
3911
3912
3913 smp_mb__before_atomic();
3914 clear_bit(__QDISC_STATE_SCHED, &q->state);
3915 qdisc_run(q);
3916 spin_unlock(root_lock);
3917 }
3918 }
3919}
3920
3921#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
3922
3923int (*br_fdb_test_addr_hook)(struct net_device *dev,
3924 unsigned char *addr) __read_mostly;
3925EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3926#endif
3927
3928static inline struct sk_buff *
3929sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3930 struct net_device *orig_dev)
3931{
3932#ifdef CONFIG_NET_CLS_ACT
3933 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3934 struct tcf_result cl_res;
3935
3936
3937
3938
3939
3940
3941 if (!cl)
3942 return skb;
3943 if (*pt_prev) {
3944 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3945 *pt_prev = NULL;
3946 }
3947
3948 qdisc_skb_cb(skb)->pkt_len = skb->len;
3949 skb->tc_at_ingress = 1;
3950 qdisc_bstats_cpu_update(cl->q, skb);
3951
3952 switch (tc_classify(skb, cl, &cl_res, false)) {
3953 case TC_ACT_OK:
3954 case TC_ACT_RECLASSIFY:
3955 skb->tc_index = TC_H_MIN(cl_res.classid);
3956 break;
3957 case TC_ACT_SHOT:
3958 qdisc_qstats_cpu_drop(cl->q);
3959 kfree_skb(skb);
3960 return NULL;
3961 case TC_ACT_STOLEN:
3962 case TC_ACT_QUEUED:
3963 consume_skb(skb);
3964 return NULL;
3965 case TC_ACT_REDIRECT:
3966
3967
3968
3969
3970 __skb_push(skb, skb->mac_len);
3971 skb_do_redirect(skb);
3972 return NULL;
3973 default:
3974 break;
3975 }
3976#endif
3977 return skb;
3978}
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989bool netdev_is_rx_handler_busy(struct net_device *dev)
3990{
3991 ASSERT_RTNL();
3992 return dev && rtnl_dereference(dev->rx_handler);
3993}
3994EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010int netdev_rx_handler_register(struct net_device *dev,
4011 rx_handler_func_t *rx_handler,
4012 void *rx_handler_data)
4013{
4014 if (netdev_is_rx_handler_busy(dev))
4015 return -EBUSY;
4016
4017
4018 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
4019 rcu_assign_pointer(dev->rx_handler, rx_handler);
4020
4021 return 0;
4022}
4023EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033void netdev_rx_handler_unregister(struct net_device *dev)
4034{
4035
4036 ASSERT_RTNL();
4037 RCU_INIT_POINTER(dev->rx_handler, NULL);
4038
4039
4040
4041
4042 synchronize_net();
4043 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
4044}
4045EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4046
4047
4048
4049
4050
4051static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4052{
4053 switch (skb->protocol) {
4054 case htons(ETH_P_ARP):
4055 case htons(ETH_P_IP):
4056 case htons(ETH_P_IPV6):
4057 case htons(ETH_P_8021Q):
4058 case htons(ETH_P_8021AD):
4059 return true;
4060 default:
4061 return false;
4062 }
4063}
4064
4065static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4066 int *ret, struct net_device *orig_dev)
4067{
4068#ifdef CONFIG_NETFILTER_INGRESS
4069 if (nf_hook_ingress_active(skb)) {
4070 int ingress_retval;
4071
4072 if (*pt_prev) {
4073 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4074 *pt_prev = NULL;
4075 }
4076
4077 rcu_read_lock();
4078 ingress_retval = nf_hook_ingress(skb);
4079 rcu_read_unlock();
4080 return ingress_retval;
4081 }
4082#endif
4083 return 0;
4084}
4085
4086static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
4087{
4088 struct packet_type *ptype, *pt_prev;
4089 rx_handler_func_t *rx_handler;
4090 struct net_device *orig_dev;
4091 bool deliver_exact = false;
4092 int ret = NET_RX_DROP;
4093 __be16 type;
4094
4095 net_timestamp_check(!netdev_tstamp_prequeue, skb);
4096
4097 trace_netif_receive_skb(skb);
4098
4099 orig_dev = skb->dev;
4100
4101 skb_reset_network_header(skb);
4102 if (!skb_transport_header_was_set(skb))
4103 skb_reset_transport_header(skb);
4104 skb_reset_mac_len(skb);
4105
4106 pt_prev = NULL;
4107
4108another_round:
4109 skb->skb_iif = skb->dev->ifindex;
4110
4111 __this_cpu_inc(softnet_data.processed);
4112
4113 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4114 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4115 skb = skb_vlan_untag(skb);
4116 if (unlikely(!skb))
4117 goto out;
4118 }
4119
4120 if (skb_skip_tc_classify(skb))
4121 goto skip_classify;
4122
4123 if (pfmemalloc)
4124 goto skip_taps;
4125
4126 list_for_each_entry_rcu(ptype, &ptype_all, list) {
4127 if (pt_prev)
4128 ret = deliver_skb(skb, pt_prev, orig_dev);
4129 pt_prev = ptype;
4130 }
4131
4132 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4133 if (pt_prev)
4134 ret = deliver_skb(skb, pt_prev, orig_dev);
4135 pt_prev = ptype;
4136 }
4137
4138skip_taps:
4139#ifdef CONFIG_NET_INGRESS
4140 if (static_key_false(&ingress_needed)) {
4141 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4142 if (!skb)
4143 goto out;
4144
4145 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
4146 goto out;
4147 }
4148#endif
4149 skb_reset_tc(skb);
4150skip_classify:
4151 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4152 goto drop;
4153
4154 if (skb_vlan_tag_present(skb)) {
4155 if (pt_prev) {
4156 ret = deliver_skb(skb, pt_prev, orig_dev);
4157 pt_prev = NULL;
4158 }
4159 if (vlan_do_receive(&skb))
4160 goto another_round;
4161 else if (unlikely(!skb))
4162 goto out;
4163 }
4164
4165 rx_handler = rcu_dereference(skb->dev->rx_handler);
4166 if (rx_handler) {
4167 if (pt_prev) {
4168 ret = deliver_skb(skb, pt_prev, orig_dev);
4169 pt_prev = NULL;
4170 }
4171 switch (rx_handler(&skb)) {
4172 case RX_HANDLER_CONSUMED:
4173 ret = NET_RX_SUCCESS;
4174 goto out;
4175 case RX_HANDLER_ANOTHER:
4176 goto another_round;
4177 case RX_HANDLER_EXACT:
4178 deliver_exact = true;
4179 case RX_HANDLER_PASS:
4180 break;
4181 default:
4182 BUG();
4183 }
4184 }
4185
4186 if (unlikely(skb_vlan_tag_present(skb))) {
4187 if (skb_vlan_tag_get_id(skb))
4188 skb->pkt_type = PACKET_OTHERHOST;
4189
4190
4191
4192
4193 skb->vlan_tci = 0;
4194 }
4195
4196 type = skb->protocol;
4197
4198
4199 if (likely(!deliver_exact)) {
4200 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4201 &ptype_base[ntohs(type) &
4202 PTYPE_HASH_MASK]);
4203 }
4204
4205 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4206 &orig_dev->ptype_specific);
4207
4208 if (unlikely(skb->dev != orig_dev)) {
4209 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4210 &skb->dev->ptype_specific);
4211 }
4212
4213 if (pt_prev) {
4214 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
4215 goto drop;
4216 else
4217 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4218 } else {
4219drop:
4220 if (!deliver_exact)
4221 atomic_long_inc(&skb->dev->rx_dropped);
4222 else
4223 atomic_long_inc(&skb->dev->rx_nohandler);
4224 kfree_skb(skb);
4225
4226
4227
4228 ret = NET_RX_DROP;
4229 }
4230
4231out:
4232 return ret;
4233}
4234
4235static int __netif_receive_skb(struct sk_buff *skb)
4236{
4237 int ret;
4238
4239 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4240 unsigned int noreclaim_flag;
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251 noreclaim_flag = memalloc_noreclaim_save();
4252 ret = __netif_receive_skb_core(skb, true);
4253 memalloc_noreclaim_restore(noreclaim_flag);
4254 } else
4255 ret = __netif_receive_skb_core(skb, false);
4256
4257 return ret;
4258}
4259
4260static struct static_key generic_xdp_needed __read_mostly;
4261
4262static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
4263{
4264 struct bpf_prog *new = xdp->prog;
4265 int ret = 0;
4266
4267 switch (xdp->command) {
4268 case XDP_SETUP_PROG: {
4269 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
4270
4271 rcu_assign_pointer(dev->xdp_prog, new);
4272 if (old)
4273 bpf_prog_put(old);
4274
4275 if (old && !new) {
4276 static_key_slow_dec(&generic_xdp_needed);
4277 } else if (new && !old) {
4278 static_key_slow_inc(&generic_xdp_needed);
4279 dev_disable_lro(dev);
4280 }
4281 break;
4282 }
4283
4284 case XDP_QUERY_PROG:
4285 xdp->prog_attached = !!rcu_access_pointer(dev->xdp_prog);
4286 break;
4287
4288 default:
4289 ret = -EINVAL;
4290 break;
4291 }
4292
4293 return ret;
4294}
4295
4296static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4297 struct bpf_prog *xdp_prog)
4298{
4299 struct xdp_buff xdp;
4300 u32 act = XDP_DROP;
4301 void *orig_data;
4302 int hlen, off;
4303 u32 mac_len;
4304
4305
4306
4307
4308 if (skb_cloned(skb))
4309 return XDP_PASS;
4310
4311 if (skb_linearize(skb))
4312 goto do_drop;
4313
4314
4315
4316
4317 mac_len = skb->data - skb_mac_header(skb);
4318 hlen = skb_headlen(skb) + mac_len;
4319 xdp.data = skb->data - mac_len;
4320 xdp.data_end = xdp.data + hlen;
4321 xdp.data_hard_start = skb->data - skb_headroom(skb);
4322 orig_data = xdp.data;
4323
4324 act = bpf_prog_run_xdp(xdp_prog, &xdp);
4325
4326 off = xdp.data - orig_data;
4327 if (off > 0)
4328 __skb_pull(skb, off);
4329 else if (off < 0)
4330 __skb_push(skb, -off);
4331
4332 switch (act) {
4333 case XDP_TX:
4334 __skb_push(skb, mac_len);
4335
4336 case XDP_PASS:
4337 break;
4338
4339 default:
4340 bpf_warn_invalid_xdp_action(act);
4341
4342 case XDP_ABORTED:
4343 trace_xdp_exception(skb->dev, xdp_prog, act);
4344
4345 case XDP_DROP:
4346 do_drop:
4347 kfree_skb(skb);
4348 break;
4349 }
4350
4351 return act;
4352}
4353
4354
4355
4356
4357static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4358{
4359 struct net_device *dev = skb->dev;
4360 struct netdev_queue *txq;
4361 bool free_skb = true;
4362 int cpu, rc;
4363
4364 txq = netdev_pick_tx(dev, skb, NULL);
4365 cpu = smp_processor_id();
4366 HARD_TX_LOCK(dev, txq, cpu);
4367 if (!netif_xmit_stopped(txq)) {
4368 rc = netdev_start_xmit(skb, dev, txq, 0);
4369 if (dev_xmit_complete(rc))
4370 free_skb = false;
4371 }
4372 HARD_TX_UNLOCK(dev, txq);
4373 if (free_skb) {
4374 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4375 kfree_skb(skb);
4376 }
4377}
4378
4379static int netif_receive_skb_internal(struct sk_buff *skb)
4380{
4381 int ret;
4382
4383 net_timestamp_check(netdev_tstamp_prequeue, skb);
4384
4385 if (skb_defer_rx_timestamp(skb))
4386 return NET_RX_SUCCESS;
4387
4388 rcu_read_lock();
4389
4390 if (static_key_false(&generic_xdp_needed)) {
4391 struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog);
4392
4393 if (xdp_prog) {
4394 u32 act = netif_receive_generic_xdp(skb, xdp_prog);
4395
4396 if (act != XDP_PASS) {
4397 rcu_read_unlock();
4398 if (act == XDP_TX)
4399 generic_xdp_tx(skb, xdp_prog);
4400 return NET_RX_DROP;
4401 }
4402 }
4403 }
4404
4405#ifdef CONFIG_RPS
4406 if (static_key_false(&rps_needed)) {
4407 struct rps_dev_flow voidflow, *rflow = &voidflow;
4408 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
4409
4410 if (cpu >= 0) {
4411 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4412 rcu_read_unlock();
4413 return ret;
4414 }
4415 }
4416#endif
4417 ret = __netif_receive_skb(skb);
4418 rcu_read_unlock();
4419 return ret;
4420}
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437int netif_receive_skb(struct sk_buff *skb)
4438{
4439 trace_netif_receive_skb_entry(skb);
4440
4441 return netif_receive_skb_internal(skb);
4442}
4443EXPORT_SYMBOL(netif_receive_skb);
4444
4445DEFINE_PER_CPU(struct work_struct, flush_works);
4446
4447
4448static void flush_backlog(struct work_struct *work)
4449{
4450 struct sk_buff *skb, *tmp;
4451 struct softnet_data *sd;
4452
4453 local_bh_disable();
4454 sd = this_cpu_ptr(&softnet_data);
4455
4456 local_irq_disable();
4457 rps_lock(sd);
4458 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
4459 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
4460 __skb_unlink(skb, &sd->input_pkt_queue);
4461 kfree_skb(skb);
4462 input_queue_head_incr(sd);
4463 }
4464 }
4465 rps_unlock(sd);
4466 local_irq_enable();
4467
4468 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4469 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
4470 __skb_unlink(skb, &sd->process_queue);
4471 kfree_skb(skb);
4472 input_queue_head_incr(sd);
4473 }
4474 }
4475 local_bh_enable();
4476}
4477
4478static void flush_all_backlogs(void)
4479{
4480 unsigned int cpu;
4481
4482 get_online_cpus();
4483
4484 for_each_online_cpu(cpu)
4485 queue_work_on(cpu, system_highpri_wq,
4486 per_cpu_ptr(&flush_works, cpu));
4487
4488 for_each_online_cpu(cpu)
4489 flush_work(per_cpu_ptr(&flush_works, cpu));
4490
4491 put_online_cpus();
4492}
4493
4494static int napi_gro_complete(struct sk_buff *skb)
4495{
4496 struct packet_offload *ptype;
4497 __be16 type = skb->protocol;
4498 struct list_head *head = &offload_base;
4499 int err = -ENOENT;
4500
4501 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4502
4503 if (NAPI_GRO_CB(skb)->count == 1) {
4504 skb_shinfo(skb)->gso_size = 0;
4505 goto out;
4506 }
4507
4508 rcu_read_lock();
4509 list_for_each_entry_rcu(ptype, head, list) {
4510 if (ptype->type != type || !ptype->callbacks.gro_complete)
4511 continue;
4512
4513 err = ptype->callbacks.gro_complete(skb, 0);
4514 break;
4515 }
4516 rcu_read_unlock();
4517
4518 if (err) {
4519 WARN_ON(&ptype->list == head);
4520 kfree_skb(skb);
4521 return NET_RX_SUCCESS;
4522 }
4523
4524out:
4525 return netif_receive_skb_internal(skb);
4526}
4527
4528
4529
4530
4531
4532void napi_gro_flush(struct napi_struct *napi, bool flush_old)
4533{
4534 struct sk_buff *skb, *prev = NULL;
4535
4536
4537 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4538 skb->prev = prev;
4539 prev = skb;
4540 }
4541
4542 for (skb = prev; skb; skb = prev) {
4543 skb->next = NULL;
4544
4545 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4546 return;
4547
4548 prev = skb->prev;
4549 napi_gro_complete(skb);
4550 napi->gro_count--;
4551 }
4552
4553 napi->gro_list = NULL;
4554}
4555EXPORT_SYMBOL(napi_gro_flush);
4556
4557static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4558{
4559 struct sk_buff *p;
4560 unsigned int maclen = skb->dev->hard_header_len;
4561 u32 hash = skb_get_hash_raw(skb);
4562
4563 for (p = napi->gro_list; p; p = p->next) {
4564 unsigned long diffs;
4565
4566 NAPI_GRO_CB(p)->flush = 0;
4567
4568 if (hash != skb_get_hash_raw(p)) {
4569 NAPI_GRO_CB(p)->same_flow = 0;
4570 continue;
4571 }
4572
4573 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4574 diffs |= p->vlan_tci ^ skb->vlan_tci;
4575 diffs |= skb_metadata_dst_cmp(p, skb);
4576 if (maclen == ETH_HLEN)
4577 diffs |= compare_ether_header(skb_mac_header(p),
4578 skb_mac_header(skb));
4579 else if (!diffs)
4580 diffs = memcmp(skb_mac_header(p),
4581 skb_mac_header(skb),
4582 maclen);
4583 NAPI_GRO_CB(p)->same_flow = !diffs;
4584 }
4585}
4586
4587static void skb_gro_reset_offset(struct sk_buff *skb)
4588{
4589 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4590 const skb_frag_t *frag0 = &pinfo->frags[0];
4591
4592 NAPI_GRO_CB(skb)->data_offset = 0;
4593 NAPI_GRO_CB(skb)->frag0 = NULL;
4594 NAPI_GRO_CB(skb)->frag0_len = 0;
4595
4596 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4597 pinfo->nr_frags &&
4598 !PageHighMem(skb_frag_page(frag0))) {
4599 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4600 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4601 skb_frag_size(frag0),
4602 skb->end - skb->tail);
4603 }
4604}
4605
4606static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4607{
4608 struct skb_shared_info *pinfo = skb_shinfo(skb);
4609
4610 BUG_ON(skb->end - skb->tail < grow);
4611
4612 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4613
4614 skb->data_len -= grow;
4615 skb->tail += grow;
4616
4617 pinfo->frags[0].page_offset += grow;
4618 skb_frag_size_sub(&pinfo->frags[0], grow);
4619
4620 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4621 skb_frag_unref(skb, 0);
4622 memmove(pinfo->frags, pinfo->frags + 1,
4623 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4624 }
4625}
4626
4627static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4628{
4629 struct sk_buff **pp = NULL;
4630 struct packet_offload *ptype;
4631 __be16 type = skb->protocol;
4632 struct list_head *head = &offload_base;
4633 int same_flow;
4634 enum gro_result ret;
4635 int grow;
4636
4637 if (netif_elide_gro(skb->dev))
4638 goto normal;
4639
4640 if (skb->csum_bad)
4641 goto normal;
4642
4643 gro_list_prepare(napi, skb);
4644
4645 rcu_read_lock();
4646 list_for_each_entry_rcu(ptype, head, list) {
4647 if (ptype->type != type || !ptype->callbacks.gro_receive)
4648 continue;
4649
4650 skb_set_network_header(skb, skb_gro_offset(skb));
4651 skb_reset_mac_len(skb);
4652 NAPI_GRO_CB(skb)->same_flow = 0;
4653 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
4654 NAPI_GRO_CB(skb)->free = 0;
4655 NAPI_GRO_CB(skb)->encap_mark = 0;
4656 NAPI_GRO_CB(skb)->recursion_counter = 0;
4657 NAPI_GRO_CB(skb)->is_fou = 0;
4658 NAPI_GRO_CB(skb)->is_atomic = 1;
4659 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4660
4661
4662 switch (skb->ip_summed) {
4663 case CHECKSUM_COMPLETE:
4664 NAPI_GRO_CB(skb)->csum = skb->csum;
4665 NAPI_GRO_CB(skb)->csum_valid = 1;
4666 NAPI_GRO_CB(skb)->csum_cnt = 0;
4667 break;
4668 case CHECKSUM_UNNECESSARY:
4669 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4670 NAPI_GRO_CB(skb)->csum_valid = 0;
4671 break;
4672 default:
4673 NAPI_GRO_CB(skb)->csum_cnt = 0;
4674 NAPI_GRO_CB(skb)->csum_valid = 0;
4675 }
4676
4677 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4678 break;
4679 }
4680 rcu_read_unlock();
4681
4682 if (&ptype->list == head)
4683 goto normal;
4684
4685 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4686 ret = GRO_CONSUMED;
4687 goto ok;
4688 }
4689
4690 same_flow = NAPI_GRO_CB(skb)->same_flow;
4691 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4692
4693 if (pp) {
4694 struct sk_buff *nskb = *pp;
4695
4696 *pp = nskb->next;
4697 nskb->next = NULL;
4698 napi_gro_complete(nskb);
4699 napi->gro_count--;
4700 }
4701
4702 if (same_flow)
4703 goto ok;
4704
4705 if (NAPI_GRO_CB(skb)->flush)
4706 goto normal;
4707
4708 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4709 struct sk_buff *nskb = napi->gro_list;
4710
4711
4712 while (nskb->next) {
4713 pp = &nskb->next;
4714 nskb = *pp;
4715 }
4716 *pp = NULL;
4717 nskb->next = NULL;
4718 napi_gro_complete(nskb);
4719 } else {
4720 napi->gro_count++;
4721 }
4722 NAPI_GRO_CB(skb)->count = 1;
4723 NAPI_GRO_CB(skb)->age = jiffies;
4724 NAPI_GRO_CB(skb)->last = skb;
4725 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4726 skb->next = napi->gro_list;
4727 napi->gro_list = skb;
4728 ret = GRO_HELD;
4729
4730pull:
4731 grow = skb_gro_offset(skb) - skb_headlen(skb);
4732 if (grow > 0)
4733 gro_pull_from_frag0(skb, grow);
4734ok:
4735 return ret;
4736
4737normal:
4738 ret = GRO_NORMAL;
4739 goto pull;
4740}
4741
4742struct packet_offload *gro_find_receive_by_type(__be16 type)
4743{
4744 struct list_head *offload_head = &offload_base;
4745 struct packet_offload *ptype;
4746
4747 list_for_each_entry_rcu(ptype, offload_head, list) {
4748 if (ptype->type != type || !ptype->callbacks.gro_receive)
4749 continue;
4750 return ptype;
4751 }
4752 return NULL;
4753}
4754EXPORT_SYMBOL(gro_find_receive_by_type);
4755
4756struct packet_offload *gro_find_complete_by_type(__be16 type)
4757{
4758 struct list_head *offload_head = &offload_base;
4759 struct packet_offload *ptype;
4760
4761 list_for_each_entry_rcu(ptype, offload_head, list) {
4762 if (ptype->type != type || !ptype->callbacks.gro_complete)
4763 continue;
4764 return ptype;
4765 }
4766 return NULL;
4767}
4768EXPORT_SYMBOL(gro_find_complete_by_type);
4769
4770static void napi_skb_free_stolen_head(struct sk_buff *skb)
4771{
4772 skb_dst_drop(skb);
4773 secpath_reset(skb);
4774 kmem_cache_free(skbuff_head_cache, skb);
4775}
4776
4777static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4778{
4779 switch (ret) {
4780 case GRO_NORMAL:
4781 if (netif_receive_skb_internal(skb))
4782 ret = GRO_DROP;
4783 break;
4784
4785 case GRO_DROP:
4786 kfree_skb(skb);
4787 break;
4788
4789 case GRO_MERGED_FREE:
4790 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4791 napi_skb_free_stolen_head(skb);
4792 else
4793 __kfree_skb(skb);
4794 break;
4795
4796 case GRO_HELD:
4797 case GRO_MERGED:
4798 case GRO_CONSUMED:
4799 break;
4800 }
4801
4802 return ret;
4803}
4804
4805gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4806{
4807 skb_mark_napi_id(skb, napi);
4808 trace_napi_gro_receive_entry(skb);
4809
4810 skb_gro_reset_offset(skb);
4811
4812 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4813}
4814EXPORT_SYMBOL(napi_gro_receive);
4815
4816static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4817{
4818 if (unlikely(skb->pfmemalloc)) {
4819 consume_skb(skb);
4820 return;
4821 }
4822 __skb_pull(skb, skb_headlen(skb));
4823
4824 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4825 skb->vlan_tci = 0;
4826 skb->dev = napi->dev;
4827 skb->skb_iif = 0;
4828 skb->encapsulation = 0;
4829 skb_shinfo(skb)->gso_type = 0;
4830 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4831 secpath_reset(skb);
4832
4833 napi->skb = skb;
4834}
4835
4836struct sk_buff *napi_get_frags(struct napi_struct *napi)
4837{
4838 struct sk_buff *skb = napi->skb;
4839
4840 if (!skb) {
4841 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4842 if (skb) {
4843 napi->skb = skb;
4844 skb_mark_napi_id(skb, napi);
4845 }
4846 }
4847 return skb;
4848}
4849EXPORT_SYMBOL(napi_get_frags);
4850
4851static gro_result_t napi_frags_finish(struct napi_struct *napi,
4852 struct sk_buff *skb,
4853 gro_result_t ret)
4854{
4855 switch (ret) {
4856 case GRO_NORMAL:
4857 case GRO_HELD:
4858 __skb_push(skb, ETH_HLEN);
4859 skb->protocol = eth_type_trans(skb, skb->dev);
4860 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4861 ret = GRO_DROP;
4862 break;
4863
4864 case GRO_DROP:
4865 napi_reuse_skb(napi, skb);
4866 break;
4867
4868 case GRO_MERGED_FREE:
4869 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4870 napi_skb_free_stolen_head(skb);
4871 else
4872 napi_reuse_skb(napi, skb);
4873 break;
4874
4875 case GRO_MERGED:
4876 case GRO_CONSUMED:
4877 break;
4878 }
4879
4880 return ret;
4881}
4882
4883
4884
4885
4886
4887static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4888{
4889 struct sk_buff *skb = napi->skb;
4890 const struct ethhdr *eth;
4891 unsigned int hlen = sizeof(*eth);
4892
4893 napi->skb = NULL;
4894
4895 skb_reset_mac_header(skb);
4896 skb_gro_reset_offset(skb);
4897
4898 eth = skb_gro_header_fast(skb, 0);
4899 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4900 eth = skb_gro_header_slow(skb, hlen, 0);
4901 if (unlikely(!eth)) {
4902 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
4903 __func__, napi->dev->name);
4904 napi_reuse_skb(napi, skb);
4905 return NULL;
4906 }
4907 } else {
4908 gro_pull_from_frag0(skb, hlen);
4909 NAPI_GRO_CB(skb)->frag0 += hlen;
4910 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4911 }
4912 __skb_pull(skb, hlen);
4913
4914
4915
4916
4917
4918
4919 skb->protocol = eth->h_proto;
4920
4921 return skb;
4922}
4923
4924gro_result_t napi_gro_frags(struct napi_struct *napi)
4925{
4926 struct sk_buff *skb = napi_frags_skb(napi);
4927
4928 if (!skb)
4929 return GRO_DROP;
4930
4931 trace_napi_gro_frags_entry(skb);
4932
4933 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4934}
4935EXPORT_SYMBOL(napi_gro_frags);
4936
4937
4938
4939
4940__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4941{
4942 __wsum wsum;
4943 __sum16 sum;
4944
4945 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4946
4947
4948 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4949 if (likely(!sum)) {
4950 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4951 !skb->csum_complete_sw)
4952 netdev_rx_csum_fault(skb->dev);
4953 }
4954
4955 NAPI_GRO_CB(skb)->csum = wsum;
4956 NAPI_GRO_CB(skb)->csum_valid = 1;
4957
4958 return sum;
4959}
4960EXPORT_SYMBOL(__skb_gro_checksum_complete);
4961
4962static void net_rps_send_ipi(struct softnet_data *remsd)
4963{
4964#ifdef CONFIG_RPS
4965 while (remsd) {
4966 struct softnet_data *next = remsd->rps_ipi_next;
4967
4968 if (cpu_online(remsd->cpu))
4969 smp_call_function_single_async(remsd->cpu, &remsd->csd);
4970 remsd = next;
4971 }
4972#endif
4973}
4974
4975
4976
4977
4978
4979static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4980{
4981#ifdef CONFIG_RPS
4982 struct softnet_data *remsd = sd->rps_ipi_list;
4983
4984 if (remsd) {
4985 sd->rps_ipi_list = NULL;
4986
4987 local_irq_enable();
4988
4989
4990 net_rps_send_ipi(remsd);
4991 } else
4992#endif
4993 local_irq_enable();
4994}
4995
4996static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4997{
4998#ifdef CONFIG_RPS
4999 return sd->rps_ipi_list != NULL;
5000#else
5001 return false;
5002#endif
5003}
5004
5005static int process_backlog(struct napi_struct *napi, int quota)
5006{
5007 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5008 bool again = true;
5009 int work = 0;
5010
5011
5012
5013
5014 if (sd_has_rps_ipi_waiting(sd)) {
5015 local_irq_disable();
5016 net_rps_action_and_irq_enable(sd);
5017 }
5018
5019 napi->weight = dev_rx_weight;
5020 while (again) {
5021 struct sk_buff *skb;
5022
5023 while ((skb = __skb_dequeue(&sd->process_queue))) {
5024 rcu_read_lock();
5025 __netif_receive_skb(skb);
5026 rcu_read_unlock();
5027 input_queue_head_incr(sd);
5028 if (++work >= quota)
5029 return work;
5030
5031 }
5032
5033 local_irq_disable();
5034 rps_lock(sd);
5035 if (skb_queue_empty(&sd->input_pkt_queue)) {
5036
5037
5038
5039
5040
5041
5042
5043
5044 napi->state = 0;
5045 again = false;
5046 } else {
5047 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5048 &sd->process_queue);
5049 }
5050 rps_unlock(sd);
5051 local_irq_enable();
5052 }
5053
5054 return work;
5055}
5056
5057
5058
5059
5060
5061
5062
5063
5064void __napi_schedule(struct napi_struct *n)
5065{
5066 unsigned long flags;
5067
5068 local_irq_save(flags);
5069 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5070 local_irq_restore(flags);
5071}
5072EXPORT_SYMBOL(__napi_schedule);
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083bool napi_schedule_prep(struct napi_struct *n)
5084{
5085 unsigned long val, new;
5086
5087 do {
5088 val = READ_ONCE(n->state);
5089 if (unlikely(val & NAPIF_STATE_DISABLE))
5090 return false;
5091 new = val | NAPIF_STATE_SCHED;
5092
5093
5094
5095
5096
5097
5098
5099 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5100 NAPIF_STATE_MISSED;
5101 } while (cmpxchg(&n->state, val, new) != val);
5102
5103 return !(val & NAPIF_STATE_SCHED);
5104}
5105EXPORT_SYMBOL(napi_schedule_prep);
5106
5107
5108
5109
5110
5111
5112
5113void __napi_schedule_irqoff(struct napi_struct *n)
5114{
5115 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5116}
5117EXPORT_SYMBOL(__napi_schedule_irqoff);
5118
5119bool napi_complete_done(struct napi_struct *n, int work_done)
5120{
5121 unsigned long flags, val, new;
5122
5123
5124
5125
5126
5127
5128
5129 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5130 NAPIF_STATE_IN_BUSY_POLL)))
5131 return false;
5132
5133 if (n->gro_list) {
5134 unsigned long timeout = 0;
5135
5136 if (work_done)
5137 timeout = n->dev->gro_flush_timeout;
5138
5139 if (timeout)
5140 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5141 HRTIMER_MODE_REL_PINNED);
5142 else
5143 napi_gro_flush(n, false);
5144 }
5145 if (unlikely(!list_empty(&n->poll_list))) {
5146
5147 local_irq_save(flags);
5148 list_del_init(&n->poll_list);
5149 local_irq_restore(flags);
5150 }
5151
5152 do {
5153 val = READ_ONCE(n->state);
5154
5155 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5156
5157 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5158
5159
5160
5161
5162
5163 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5164 NAPIF_STATE_SCHED;
5165 } while (cmpxchg(&n->state, val, new) != val);
5166
5167 if (unlikely(val & NAPIF_STATE_MISSED)) {
5168 __napi_schedule(n);
5169 return false;
5170 }
5171
5172 return true;
5173}
5174EXPORT_SYMBOL(napi_complete_done);
5175
5176
5177static struct napi_struct *napi_by_id(unsigned int napi_id)
5178{
5179 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5180 struct napi_struct *napi;
5181
5182 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5183 if (napi->napi_id == napi_id)
5184 return napi;
5185
5186 return NULL;
5187}
5188
5189#if defined(CONFIG_NET_RX_BUSY_POLL)
5190
5191#define BUSY_POLL_BUDGET 8
5192
5193static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5194{
5195 int rc;
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206 clear_bit(NAPI_STATE_MISSED, &napi->state);
5207 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5208
5209 local_bh_disable();
5210
5211
5212
5213
5214 rc = napi->poll(napi, BUSY_POLL_BUDGET);
5215 netpoll_poll_unlock(have_poll_lock);
5216 if (rc == BUSY_POLL_BUDGET)
5217 __napi_schedule(napi);
5218 local_bh_enable();
5219}
5220
5221void napi_busy_loop(unsigned int napi_id,
5222 bool (*loop_end)(void *, unsigned long),
5223 void *loop_end_arg)
5224{
5225 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
5226 int (*napi_poll)(struct napi_struct *napi, int budget);
5227 void *have_poll_lock = NULL;
5228 struct napi_struct *napi;
5229
5230restart:
5231 napi_poll = NULL;
5232
5233 rcu_read_lock();
5234
5235 napi = napi_by_id(napi_id);
5236 if (!napi)
5237 goto out;
5238
5239 preempt_disable();
5240 for (;;) {
5241 int work = 0;
5242
5243 local_bh_disable();
5244 if (!napi_poll) {
5245 unsigned long val = READ_ONCE(napi->state);
5246
5247
5248
5249
5250 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5251 NAPIF_STATE_IN_BUSY_POLL))
5252 goto count;
5253 if (cmpxchg(&napi->state, val,
5254 val | NAPIF_STATE_IN_BUSY_POLL |
5255 NAPIF_STATE_SCHED) != val)
5256 goto count;
5257 have_poll_lock = netpoll_poll_lock(napi);
5258 napi_poll = napi->poll;
5259 }
5260 work = napi_poll(napi, BUSY_POLL_BUDGET);
5261 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
5262count:
5263 if (work > 0)
5264 __NET_ADD_STATS(dev_net(napi->dev),
5265 LINUX_MIB_BUSYPOLLRXPACKETS, work);
5266 local_bh_enable();
5267
5268 if (!loop_end || loop_end(loop_end_arg, start_time))
5269 break;
5270
5271 if (unlikely(need_resched())) {
5272 if (napi_poll)
5273 busy_poll_stop(napi, have_poll_lock);
5274 preempt_enable();
5275 rcu_read_unlock();
5276 cond_resched();
5277 if (loop_end(loop_end_arg, start_time))
5278 return;
5279 goto restart;
5280 }
5281 cpu_relax();
5282 }
5283 if (napi_poll)
5284 busy_poll_stop(napi, have_poll_lock);
5285 preempt_enable();
5286out:
5287 rcu_read_unlock();
5288}
5289EXPORT_SYMBOL(napi_busy_loop);
5290
5291#endif
5292
5293static void napi_hash_add(struct napi_struct *napi)
5294{
5295 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5296 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
5297 return;
5298
5299 spin_lock(&napi_hash_lock);
5300
5301
5302 do {
5303 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5304 napi_gen_id = MIN_NAPI_ID;
5305 } while (napi_by_id(napi_gen_id));
5306 napi->napi_id = napi_gen_id;
5307
5308 hlist_add_head_rcu(&napi->napi_hash_node,
5309 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
5310
5311 spin_unlock(&napi_hash_lock);
5312}
5313
5314
5315
5316
5317bool napi_hash_del(struct napi_struct *napi)
5318{
5319 bool rcu_sync_needed = false;
5320
5321 spin_lock(&napi_hash_lock);
5322
5323 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5324 rcu_sync_needed = true;
5325 hlist_del_rcu(&napi->napi_hash_node);
5326 }
5327 spin_unlock(&napi_hash_lock);
5328 return rcu_sync_needed;
5329}
5330EXPORT_SYMBOL_GPL(napi_hash_del);
5331
5332static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5333{
5334 struct napi_struct *napi;
5335
5336 napi = container_of(timer, struct napi_struct, timer);
5337
5338
5339
5340
5341 if (napi->gro_list && !napi_disable_pending(napi) &&
5342 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5343 __napi_schedule_irqoff(napi);
5344
5345 return HRTIMER_NORESTART;
5346}
5347
5348void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5349 int (*poll)(struct napi_struct *, int), int weight)
5350{
5351 INIT_LIST_HEAD(&napi->poll_list);
5352 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5353 napi->timer.function = napi_watchdog;
5354 napi->gro_count = 0;
5355 napi->gro_list = NULL;
5356 napi->skb = NULL;
5357 napi->poll = poll;
5358 if (weight > NAPI_POLL_WEIGHT)
5359 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5360 weight, dev->name);
5361 napi->weight = weight;
5362 list_add(&napi->dev_list, &dev->napi_list);
5363 napi->dev = dev;
5364#ifdef CONFIG_NETPOLL
5365 napi->poll_owner = -1;
5366#endif
5367 set_bit(NAPI_STATE_SCHED, &napi->state);
5368 napi_hash_add(napi);
5369}
5370EXPORT_SYMBOL(netif_napi_add);
5371
5372void napi_disable(struct napi_struct *n)
5373{
5374 might_sleep();
5375 set_bit(NAPI_STATE_DISABLE, &n->state);
5376
5377 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5378 msleep(1);
5379 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5380 msleep(1);
5381
5382 hrtimer_cancel(&n->timer);
5383
5384 clear_bit(NAPI_STATE_DISABLE, &n->state);
5385}
5386EXPORT_SYMBOL(napi_disable);
5387
5388
5389void netif_napi_del(struct napi_struct *napi)
5390{
5391 might_sleep();
5392 if (napi_hash_del(napi))
5393 synchronize_net();
5394 list_del_init(&napi->dev_list);
5395 napi_free_frags(napi);
5396
5397 kfree_skb_list(napi->gro_list);
5398 napi->gro_list = NULL;
5399 napi->gro_count = 0;
5400}
5401EXPORT_SYMBOL(netif_napi_del);
5402
5403static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5404{
5405 void *have;
5406 int work, weight;
5407
5408 list_del_init(&n->poll_list);
5409
5410 have = netpoll_poll_lock(n);
5411
5412 weight = n->weight;
5413
5414
5415
5416
5417
5418
5419
5420 work = 0;
5421 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5422 work = n->poll(n, weight);
5423 trace_napi_poll(n, work, weight);
5424 }
5425
5426 WARN_ON_ONCE(work > weight);
5427
5428 if (likely(work < weight))
5429 goto out_unlock;
5430
5431
5432
5433
5434
5435
5436 if (unlikely(napi_disable_pending(n))) {
5437 napi_complete(n);
5438 goto out_unlock;
5439 }
5440
5441 if (n->gro_list) {
5442
5443
5444
5445 napi_gro_flush(n, HZ >= 1000);
5446 }
5447
5448
5449
5450
5451 if (unlikely(!list_empty(&n->poll_list))) {
5452 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5453 n->dev ? n->dev->name : "backlog");
5454 goto out_unlock;
5455 }
5456
5457 list_add_tail(&n->poll_list, repoll);
5458
5459out_unlock:
5460 netpoll_poll_unlock(have);
5461
5462 return work;
5463}
5464
5465static __latent_entropy void net_rx_action(struct softirq_action *h)
5466{
5467 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5468 unsigned long time_limit = jiffies +
5469 usecs_to_jiffies(netdev_budget_usecs);
5470 int budget = netdev_budget;
5471 LIST_HEAD(list);
5472 LIST_HEAD(repoll);
5473
5474 local_irq_disable();
5475 list_splice_init(&sd->poll_list, &list);
5476 local_irq_enable();
5477
5478 for (;;) {
5479 struct napi_struct *n;
5480
5481 if (list_empty(&list)) {
5482 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
5483 goto out;
5484 break;
5485 }
5486
5487 n = list_first_entry(&list, struct napi_struct, poll_list);
5488 budget -= napi_poll(n, &repoll);
5489
5490
5491
5492
5493
5494 if (unlikely(budget <= 0 ||
5495 time_after_eq(jiffies, time_limit))) {
5496 sd->time_squeeze++;
5497 break;
5498 }
5499 }
5500
5501 local_irq_disable();
5502
5503 list_splice_tail_init(&sd->poll_list, &list);
5504 list_splice_tail(&repoll, &list);
5505 list_splice(&list, &sd->poll_list);
5506 if (!list_empty(&sd->poll_list))
5507 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5508
5509 net_rps_action_and_irq_enable(sd);
5510out:
5511 __kfree_skb_flush();
5512}
5513
5514struct netdev_adjacent {
5515 struct net_device *dev;
5516
5517
5518 bool master;
5519
5520
5521 u16 ref_nr;
5522
5523
5524 void *private;
5525
5526 struct list_head list;
5527 struct rcu_head rcu;
5528};
5529
5530static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
5531 struct list_head *adj_list)
5532{
5533 struct netdev_adjacent *adj;
5534
5535 list_for_each_entry(adj, adj_list, list) {
5536 if (adj->dev == adj_dev)
5537 return adj;
5538 }
5539 return NULL;
5540}
5541
5542static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5543{
5544 struct net_device *dev = data;
5545
5546 return upper_dev == dev;
5547}
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558bool netdev_has_upper_dev(struct net_device *dev,
5559 struct net_device *upper_dev)
5560{
5561 ASSERT_RTNL();
5562
5563 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5564 upper_dev);
5565}
5566EXPORT_SYMBOL(netdev_has_upper_dev);
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5579 struct net_device *upper_dev)
5580{
5581 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5582 upper_dev);
5583}
5584EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5585
5586
5587
5588
5589
5590
5591
5592
5593static bool netdev_has_any_upper_dev(struct net_device *dev)
5594{
5595 ASSERT_RTNL();
5596
5597 return !list_empty(&dev->adj_list.upper);
5598}
5599
5600
5601
5602
5603
5604
5605
5606
5607struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5608{
5609 struct netdev_adjacent *upper;
5610
5611 ASSERT_RTNL();
5612
5613 if (list_empty(&dev->adj_list.upper))
5614 return NULL;
5615
5616 upper = list_first_entry(&dev->adj_list.upper,
5617 struct netdev_adjacent, list);
5618 if (likely(upper->master))
5619 return upper->dev;
5620 return NULL;
5621}
5622EXPORT_SYMBOL(netdev_master_upper_dev_get);
5623
5624
5625
5626
5627
5628
5629
5630
5631static bool netdev_has_any_lower_dev(struct net_device *dev)
5632{
5633 ASSERT_RTNL();
5634
5635 return !list_empty(&dev->adj_list.lower);
5636}
5637
5638void *netdev_adjacent_get_private(struct list_head *adj_list)
5639{
5640 struct netdev_adjacent *adj;
5641
5642 adj = list_entry(adj_list, struct netdev_adjacent, list);
5643
5644 return adj->private;
5645}
5646EXPORT_SYMBOL(netdev_adjacent_get_private);
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5657 struct list_head **iter)
5658{
5659 struct netdev_adjacent *upper;
5660
5661 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5662
5663 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5664
5665 if (&upper->list == &dev->adj_list.upper)
5666 return NULL;
5667
5668 *iter = &upper->list;
5669
5670 return upper->dev;
5671}
5672EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5673
5674static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5675 struct list_head **iter)
5676{
5677 struct netdev_adjacent *upper;
5678
5679 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5680
5681 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5682
5683 if (&upper->list == &dev->adj_list.upper)
5684 return NULL;
5685
5686 *iter = &upper->list;
5687
5688 return upper->dev;
5689}
5690
5691int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5692 int (*fn)(struct net_device *dev,
5693 void *data),
5694 void *data)
5695{
5696 struct net_device *udev;
5697 struct list_head *iter;
5698 int ret;
5699
5700 for (iter = &dev->adj_list.upper,
5701 udev = netdev_next_upper_dev_rcu(dev, &iter);
5702 udev;
5703 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
5704
5705 ret = fn(udev, data);
5706 if (ret)
5707 return ret;
5708
5709
5710 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
5711 if (ret)
5712 return ret;
5713 }
5714
5715 return 0;
5716}
5717EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730void *netdev_lower_get_next_private(struct net_device *dev,
5731 struct list_head **iter)
5732{
5733 struct netdev_adjacent *lower;
5734
5735 lower = list_entry(*iter, struct netdev_adjacent, list);
5736
5737 if (&lower->list == &dev->adj_list.lower)
5738 return NULL;
5739
5740 *iter = lower->list.next;
5741
5742 return lower->private;
5743}
5744EXPORT_SYMBOL(netdev_lower_get_next_private);
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5757 struct list_head **iter)
5758{
5759 struct netdev_adjacent *lower;
5760
5761 WARN_ON_ONCE(!rcu_read_lock_held());
5762
5763 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5764
5765 if (&lower->list == &dev->adj_list.lower)
5766 return NULL;
5767
5768 *iter = &lower->list;
5769
5770 return lower->private;
5771}
5772EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5786{
5787 struct netdev_adjacent *lower;
5788
5789 lower = list_entry(*iter, struct netdev_adjacent, list);
5790
5791 if (&lower->list == &dev->adj_list.lower)
5792 return NULL;
5793
5794 *iter = lower->list.next;
5795
5796 return lower->dev;
5797}
5798EXPORT_SYMBOL(netdev_lower_get_next);
5799
5800static struct net_device *netdev_next_lower_dev(struct net_device *dev,
5801 struct list_head **iter)
5802{
5803 struct netdev_adjacent *lower;
5804
5805 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5806
5807 if (&lower->list == &dev->adj_list.lower)
5808 return NULL;
5809
5810 *iter = &lower->list;
5811
5812 return lower->dev;
5813}
5814
5815int netdev_walk_all_lower_dev(struct net_device *dev,
5816 int (*fn)(struct net_device *dev,
5817 void *data),
5818 void *data)
5819{
5820 struct net_device *ldev;
5821 struct list_head *iter;
5822 int ret;
5823
5824 for (iter = &dev->adj_list.lower,
5825 ldev = netdev_next_lower_dev(dev, &iter);
5826 ldev;
5827 ldev = netdev_next_lower_dev(dev, &iter)) {
5828
5829 ret = fn(ldev, data);
5830 if (ret)
5831 return ret;
5832
5833
5834 ret = netdev_walk_all_lower_dev(ldev, fn, data);
5835 if (ret)
5836 return ret;
5837 }
5838
5839 return 0;
5840}
5841EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
5842
5843static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5844 struct list_head **iter)
5845{
5846 struct netdev_adjacent *lower;
5847
5848 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5849 if (&lower->list == &dev->adj_list.lower)
5850 return NULL;
5851
5852 *iter = &lower->list;
5853
5854 return lower->dev;
5855}
5856
5857int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5858 int (*fn)(struct net_device *dev,
5859 void *data),
5860 void *data)
5861{
5862 struct net_device *ldev;
5863 struct list_head *iter;
5864 int ret;
5865
5866 for (iter = &dev->adj_list.lower,
5867 ldev = netdev_next_lower_dev_rcu(dev, &iter);
5868 ldev;
5869 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
5870
5871 ret = fn(ldev, data);
5872 if (ret)
5873 return ret;
5874
5875
5876 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
5877 if (ret)
5878 return ret;
5879 }
5880
5881 return 0;
5882}
5883EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
5884
5885
5886
5887
5888
5889
5890
5891
5892
5893
5894void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5895{
5896 struct netdev_adjacent *lower;
5897
5898 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5899 struct netdev_adjacent, list);
5900 if (lower)
5901 return lower->private;
5902 return NULL;
5903}
5904EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5905
5906
5907
5908
5909
5910
5911
5912
5913struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5914{
5915 struct netdev_adjacent *upper;
5916
5917 upper = list_first_or_null_rcu(&dev->adj_list.upper,
5918 struct netdev_adjacent, list);
5919 if (upper && likely(upper->master))
5920 return upper->dev;
5921 return NULL;
5922}
5923EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5924
5925static int netdev_adjacent_sysfs_add(struct net_device *dev,
5926 struct net_device *adj_dev,
5927 struct list_head *dev_list)
5928{
5929 char linkname[IFNAMSIZ+7];
5930
5931 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5932 "upper_%s" : "lower_%s", adj_dev->name);
5933 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5934 linkname);
5935}
5936static void netdev_adjacent_sysfs_del(struct net_device *dev,
5937 char *name,
5938 struct list_head *dev_list)
5939{
5940 char linkname[IFNAMSIZ+7];
5941
5942 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5943 "upper_%s" : "lower_%s", name);
5944 sysfs_remove_link(&(dev->dev.kobj), linkname);
5945}
5946
5947static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5948 struct net_device *adj_dev,
5949 struct list_head *dev_list)
5950{
5951 return (dev_list == &dev->adj_list.upper ||
5952 dev_list == &dev->adj_list.lower) &&
5953 net_eq(dev_net(dev), dev_net(adj_dev));
5954}
5955
5956static int __netdev_adjacent_dev_insert(struct net_device *dev,
5957 struct net_device *adj_dev,
5958 struct list_head *dev_list,
5959 void *private, bool master)
5960{
5961 struct netdev_adjacent *adj;
5962 int ret;
5963
5964 adj = __netdev_find_adj(adj_dev, dev_list);
5965
5966 if (adj) {
5967 adj->ref_nr += 1;
5968 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
5969 dev->name, adj_dev->name, adj->ref_nr);
5970
5971 return 0;
5972 }
5973
5974 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5975 if (!adj)
5976 return -ENOMEM;
5977
5978 adj->dev = adj_dev;
5979 adj->master = master;
5980 adj->ref_nr = 1;
5981 adj->private = private;
5982 dev_hold(adj_dev);
5983
5984 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
5985 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
5986
5987 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
5988 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5989 if (ret)
5990 goto free_adj;
5991 }
5992
5993
5994 if (master) {
5995 ret = sysfs_create_link(&(dev->dev.kobj),
5996 &(adj_dev->dev.kobj), "master");
5997 if (ret)
5998 goto remove_symlinks;
5999
6000 list_add_rcu(&adj->list, dev_list);
6001 } else {
6002 list_add_tail_rcu(&adj->list, dev_list);
6003 }
6004
6005 return 0;
6006
6007remove_symlinks:
6008 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6009 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6010free_adj:
6011 kfree(adj);
6012 dev_put(adj_dev);
6013
6014 return ret;
6015}
6016
6017static void __netdev_adjacent_dev_remove(struct net_device *dev,
6018 struct net_device *adj_dev,
6019 u16 ref_nr,
6020 struct list_head *dev_list)
6021{
6022 struct netdev_adjacent *adj;
6023
6024 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6025 dev->name, adj_dev->name, ref_nr);
6026
6027 adj = __netdev_find_adj(adj_dev, dev_list);
6028
6029 if (!adj) {
6030 pr_err("Adjacency does not exist for device %s from %s\n",
6031 dev->name, adj_dev->name);
6032 WARN_ON(1);
6033 return;
6034 }
6035
6036 if (adj->ref_nr > ref_nr) {
6037 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6038 dev->name, adj_dev->name, ref_nr,
6039 adj->ref_nr - ref_nr);
6040 adj->ref_nr -= ref_nr;
6041 return;
6042 }
6043
6044 if (adj->master)
6045 sysfs_remove_link(&(dev->dev.kobj), "master");
6046
6047 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6048 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6049
6050 list_del_rcu(&adj->list);
6051 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6052 adj_dev->name, dev->name, adj_dev->name);
6053 dev_put(adj_dev);
6054 kfree_rcu(adj, rcu);
6055}
6056
6057static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6058 struct net_device *upper_dev,
6059 struct list_head *up_list,
6060 struct list_head *down_list,
6061 void *private, bool master)
6062{
6063 int ret;
6064
6065 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
6066 private, master);
6067 if (ret)
6068 return ret;
6069
6070 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
6071 private, false);
6072 if (ret) {
6073 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
6074 return ret;
6075 }
6076
6077 return 0;
6078}
6079
6080static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6081 struct net_device *upper_dev,
6082 u16 ref_nr,
6083 struct list_head *up_list,
6084 struct list_head *down_list)
6085{
6086 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6087 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
6088}
6089
6090static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6091 struct net_device *upper_dev,
6092 void *private, bool master)
6093{
6094 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6095 &dev->adj_list.upper,
6096 &upper_dev->adj_list.lower,
6097 private, master);
6098}
6099
6100static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6101 struct net_device *upper_dev)
6102{
6103 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
6104 &dev->adj_list.upper,
6105 &upper_dev->adj_list.lower);
6106}
6107
6108static int __netdev_upper_dev_link(struct net_device *dev,
6109 struct net_device *upper_dev, bool master,
6110 void *upper_priv, void *upper_info)
6111{
6112 struct netdev_notifier_changeupper_info changeupper_info;
6113 int ret = 0;
6114
6115 ASSERT_RTNL();
6116
6117 if (dev == upper_dev)
6118 return -EBUSY;
6119
6120
6121 if (netdev_has_upper_dev(upper_dev, dev))
6122 return -EBUSY;
6123
6124 if (netdev_has_upper_dev(dev, upper_dev))
6125 return -EEXIST;
6126
6127 if (master && netdev_master_upper_dev_get(dev))
6128 return -EBUSY;
6129
6130 changeupper_info.upper_dev = upper_dev;
6131 changeupper_info.master = master;
6132 changeupper_info.linking = true;
6133 changeupper_info.upper_info = upper_info;
6134
6135 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6136 &changeupper_info.info);
6137 ret = notifier_to_errno(ret);
6138 if (ret)
6139 return ret;
6140
6141 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
6142 master);
6143 if (ret)
6144 return ret;
6145
6146 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6147 &changeupper_info.info);
6148 ret = notifier_to_errno(ret);
6149 if (ret)
6150 goto rollback;
6151
6152 return 0;
6153
6154rollback:
6155 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
6156
6157 return ret;
6158}
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170int netdev_upper_dev_link(struct net_device *dev,
6171 struct net_device *upper_dev)
6172{
6173 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
6174}
6175EXPORT_SYMBOL(netdev_upper_dev_link);
6176
6177
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190int netdev_master_upper_dev_link(struct net_device *dev,
6191 struct net_device *upper_dev,
6192 void *upper_priv, void *upper_info)
6193{
6194 return __netdev_upper_dev_link(dev, upper_dev, true,
6195 upper_priv, upper_info);
6196}
6197EXPORT_SYMBOL(netdev_master_upper_dev_link);
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207void netdev_upper_dev_unlink(struct net_device *dev,
6208 struct net_device *upper_dev)
6209{
6210 struct netdev_notifier_changeupper_info changeupper_info;
6211
6212 ASSERT_RTNL();
6213
6214 changeupper_info.upper_dev = upper_dev;
6215 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
6216 changeupper_info.linking = false;
6217
6218 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6219 &changeupper_info.info);
6220
6221 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
6222
6223 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6224 &changeupper_info.info);
6225}
6226EXPORT_SYMBOL(netdev_upper_dev_unlink);
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236void netdev_bonding_info_change(struct net_device *dev,
6237 struct netdev_bonding_info *bonding_info)
6238{
6239 struct netdev_notifier_bonding_info info;
6240
6241 memcpy(&info.bonding_info, bonding_info,
6242 sizeof(struct netdev_bonding_info));
6243 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
6244 &info.info);
6245}
6246EXPORT_SYMBOL(netdev_bonding_info_change);
6247
6248static void netdev_adjacent_add_links(struct net_device *dev)
6249{
6250 struct netdev_adjacent *iter;
6251
6252 struct net *net = dev_net(dev);
6253
6254 list_for_each_entry(iter, &dev->adj_list.upper, list) {
6255 if (!net_eq(net, dev_net(iter->dev)))
6256 continue;
6257 netdev_adjacent_sysfs_add(iter->dev, dev,
6258 &iter->dev->adj_list.lower);
6259 netdev_adjacent_sysfs_add(dev, iter->dev,
6260 &dev->adj_list.upper);
6261 }
6262
6263 list_for_each_entry(iter, &dev->adj_list.lower, list) {
6264 if (!net_eq(net, dev_net(iter->dev)))
6265 continue;
6266 netdev_adjacent_sysfs_add(iter->dev, dev,
6267 &iter->dev->adj_list.upper);
6268 netdev_adjacent_sysfs_add(dev, iter->dev,
6269 &dev->adj_list.lower);
6270 }
6271}
6272
6273static void netdev_adjacent_del_links(struct net_device *dev)
6274{
6275 struct netdev_adjacent *iter;
6276
6277 struct net *net = dev_net(dev);
6278
6279 list_for_each_entry(iter, &dev->adj_list.upper, list) {
6280 if (!net_eq(net, dev_net(iter->dev)))
6281 continue;
6282 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6283 &iter->dev->adj_list.lower);
6284 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6285 &dev->adj_list.upper);
6286 }
6287
6288 list_for_each_entry(iter, &dev->adj_list.lower, list) {
6289 if (!net_eq(net, dev_net(iter->dev)))
6290 continue;
6291 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6292 &iter->dev->adj_list.upper);
6293 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6294 &dev->adj_list.lower);
6295 }
6296}
6297
6298void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
6299{
6300 struct netdev_adjacent *iter;
6301
6302 struct net *net = dev_net(dev);
6303
6304 list_for_each_entry(iter, &dev->adj_list.upper, list) {
6305 if (!net_eq(net, dev_net(iter->dev)))
6306 continue;
6307 netdev_adjacent_sysfs_del(iter->dev, oldname,
6308 &iter->dev->adj_list.lower);
6309 netdev_adjacent_sysfs_add(iter->dev, dev,
6310 &iter->dev->adj_list.lower);
6311 }
6312
6313 list_for_each_entry(iter, &dev->adj_list.lower, list) {
6314 if (!net_eq(net, dev_net(iter->dev)))
6315 continue;
6316 netdev_adjacent_sysfs_del(iter->dev, oldname,
6317 &iter->dev->adj_list.upper);
6318 netdev_adjacent_sysfs_add(iter->dev, dev,
6319 &iter->dev->adj_list.upper);
6320 }
6321}
6322
6323void *netdev_lower_dev_get_private(struct net_device *dev,
6324 struct net_device *lower_dev)
6325{
6326 struct netdev_adjacent *lower;
6327
6328 if (!lower_dev)
6329 return NULL;
6330 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
6331 if (!lower)
6332 return NULL;
6333
6334 return lower->private;
6335}
6336EXPORT_SYMBOL(netdev_lower_dev_get_private);
6337
6338
6339int dev_get_nest_level(struct net_device *dev)
6340{
6341 struct net_device *lower = NULL;
6342 struct list_head *iter;
6343 int max_nest = -1;
6344 int nest;
6345
6346 ASSERT_RTNL();
6347
6348 netdev_for_each_lower_dev(dev, lower, iter) {
6349 nest = dev_get_nest_level(lower);
6350 if (max_nest < nest)
6351 max_nest = nest;
6352 }
6353
6354 return max_nest + 1;
6355}
6356EXPORT_SYMBOL(dev_get_nest_level);
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366void netdev_lower_state_changed(struct net_device *lower_dev,
6367 void *lower_state_info)
6368{
6369 struct netdev_notifier_changelowerstate_info changelowerstate_info;
6370
6371 ASSERT_RTNL();
6372 changelowerstate_info.lower_state_info = lower_state_info;
6373 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
6374 &changelowerstate_info.info);
6375}
6376EXPORT_SYMBOL(netdev_lower_state_changed);
6377
6378static void dev_change_rx_flags(struct net_device *dev, int flags)
6379{
6380 const struct net_device_ops *ops = dev->netdev_ops;
6381
6382 if (ops->ndo_change_rx_flags)
6383 ops->ndo_change_rx_flags(dev, flags);
6384}
6385
6386static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
6387{
6388 unsigned int old_flags = dev->flags;
6389 kuid_t uid;
6390 kgid_t gid;
6391
6392 ASSERT_RTNL();
6393
6394 dev->flags |= IFF_PROMISC;
6395 dev->promiscuity += inc;
6396 if (dev->promiscuity == 0) {
6397
6398
6399
6400
6401 if (inc < 0)
6402 dev->flags &= ~IFF_PROMISC;
6403 else {
6404 dev->promiscuity -= inc;
6405 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6406 dev->name);
6407 return -EOVERFLOW;
6408 }
6409 }
6410 if (dev->flags != old_flags) {
6411 pr_info("device %s %s promiscuous mode\n",
6412 dev->name,
6413 dev->flags & IFF_PROMISC ? "entered" : "left");
6414 if (audit_enabled) {
6415 current_uid_gid(&uid, &gid);
6416 audit_log(current->audit_context, GFP_ATOMIC,
6417 AUDIT_ANOM_PROMISCUOUS,
6418 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6419 dev->name, (dev->flags & IFF_PROMISC),
6420 (old_flags & IFF_PROMISC),
6421 from_kuid(&init_user_ns, audit_get_loginuid(current)),
6422 from_kuid(&init_user_ns, uid),
6423 from_kgid(&init_user_ns, gid),
6424 audit_get_sessionid(current));
6425 }
6426
6427 dev_change_rx_flags(dev, IFF_PROMISC);
6428 }
6429 if (notify)
6430 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
6431 return 0;
6432}
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445int dev_set_promiscuity(struct net_device *dev, int inc)
6446{
6447 unsigned int old_flags = dev->flags;
6448 int err;
6449
6450 err = __dev_set_promiscuity(dev, inc, true);
6451 if (err < 0)
6452 return err;
6453 if (dev->flags != old_flags)
6454 dev_set_rx_mode(dev);
6455 return err;
6456}
6457EXPORT_SYMBOL(dev_set_promiscuity);
6458
6459static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
6460{
6461 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
6462
6463 ASSERT_RTNL();
6464
6465 dev->flags |= IFF_ALLMULTI;
6466 dev->allmulti += inc;
6467 if (dev->allmulti == 0) {
6468
6469
6470
6471
6472 if (inc < 0)
6473 dev->flags &= ~IFF_ALLMULTI;
6474 else {
6475 dev->allmulti -= inc;
6476 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6477 dev->name);
6478 return -EOVERFLOW;
6479 }
6480 }
6481 if (dev->flags ^ old_flags) {
6482 dev_change_rx_flags(dev, IFF_ALLMULTI);
6483 dev_set_rx_mode(dev);
6484 if (notify)
6485 __dev_notify_flags(dev, old_flags,
6486 dev->gflags ^ old_gflags);
6487 }
6488 return 0;
6489}
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504int dev_set_allmulti(struct net_device *dev, int inc)
6505{
6506 return __dev_set_allmulti(dev, inc, true);
6507}
6508EXPORT_SYMBOL(dev_set_allmulti);
6509
6510
6511
6512
6513
6514
6515
6516void __dev_set_rx_mode(struct net_device *dev)
6517{
6518 const struct net_device_ops *ops = dev->netdev_ops;
6519
6520
6521 if (!(dev->flags&IFF_UP))
6522 return;
6523
6524 if (!netif_device_present(dev))
6525 return;
6526
6527 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
6528
6529
6530
6531 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
6532 __dev_set_promiscuity(dev, 1, false);
6533 dev->uc_promisc = true;
6534 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
6535 __dev_set_promiscuity(dev, -1, false);
6536 dev->uc_promisc = false;
6537 }
6538 }
6539
6540 if (ops->ndo_set_rx_mode)
6541 ops->ndo_set_rx_mode(dev);
6542}
6543
6544void dev_set_rx_mode(struct net_device *dev)
6545{
6546 netif_addr_lock_bh(dev);
6547 __dev_set_rx_mode(dev);
6548 netif_addr_unlock_bh(dev);
6549}
6550
6551
6552
6553
6554
6555
6556
6557unsigned int dev_get_flags(const struct net_device *dev)
6558{
6559 unsigned int flags;
6560
6561 flags = (dev->flags & ~(IFF_PROMISC |
6562 IFF_ALLMULTI |
6563 IFF_RUNNING |
6564 IFF_LOWER_UP |
6565 IFF_DORMANT)) |
6566 (dev->gflags & (IFF_PROMISC |
6567 IFF_ALLMULTI));
6568
6569 if (netif_running(dev)) {
6570 if (netif_oper_up(dev))
6571 flags |= IFF_RUNNING;
6572 if (netif_carrier_ok(dev))
6573 flags |= IFF_LOWER_UP;
6574 if (netif_dormant(dev))
6575 flags |= IFF_DORMANT;
6576 }
6577
6578 return flags;
6579}
6580EXPORT_SYMBOL(dev_get_flags);
6581
6582int __dev_change_flags(struct net_device *dev, unsigned int flags)
6583{
6584 unsigned int old_flags = dev->flags;
6585 int ret;
6586
6587 ASSERT_RTNL();
6588
6589
6590
6591
6592
6593 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6594 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6595 IFF_AUTOMEDIA)) |
6596 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6597 IFF_ALLMULTI));
6598
6599
6600
6601
6602
6603 if ((old_flags ^ flags) & IFF_MULTICAST)
6604 dev_change_rx_flags(dev, IFF_MULTICAST);
6605
6606 dev_set_rx_mode(dev);
6607
6608
6609
6610
6611
6612
6613
6614 ret = 0;
6615 if ((old_flags ^ flags) & IFF_UP)
6616 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
6617
6618 if ((flags ^ dev->gflags) & IFF_PROMISC) {
6619 int inc = (flags & IFF_PROMISC) ? 1 : -1;
6620 unsigned int old_flags = dev->flags;
6621
6622 dev->gflags ^= IFF_PROMISC;
6623
6624 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6625 if (dev->flags != old_flags)
6626 dev_set_rx_mode(dev);
6627 }
6628
6629
6630
6631
6632
6633 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
6634 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6635
6636 dev->gflags ^= IFF_ALLMULTI;
6637 __dev_set_allmulti(dev, inc, false);
6638 }
6639
6640 return ret;
6641}
6642
6643void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6644 unsigned int gchanges)
6645{
6646 unsigned int changes = dev->flags ^ old_flags;
6647
6648 if (gchanges)
6649 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
6650
6651 if (changes & IFF_UP) {
6652 if (dev->flags & IFF_UP)
6653 call_netdevice_notifiers(NETDEV_UP, dev);
6654 else
6655 call_netdevice_notifiers(NETDEV_DOWN, dev);
6656 }
6657
6658 if (dev->flags & IFF_UP &&
6659 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6660 struct netdev_notifier_change_info change_info;
6661
6662 change_info.flags_changed = changes;
6663 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6664 &change_info.info);
6665 }
6666}
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676int dev_change_flags(struct net_device *dev, unsigned int flags)
6677{
6678 int ret;
6679 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
6680
6681 ret = __dev_change_flags(dev, flags);
6682 if (ret < 0)
6683 return ret;
6684
6685 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
6686 __dev_notify_flags(dev, old_flags, changes);
6687 return ret;
6688}
6689EXPORT_SYMBOL(dev_change_flags);
6690
6691static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6692{
6693 const struct net_device_ops *ops = dev->netdev_ops;
6694
6695 if (ops->ndo_change_mtu)
6696 return ops->ndo_change_mtu(dev, new_mtu);
6697
6698 dev->mtu = new_mtu;
6699 return 0;
6700}
6701
6702
6703
6704
6705
6706
6707
6708
6709int dev_set_mtu(struct net_device *dev, int new_mtu)
6710{
6711 int err, orig_mtu;
6712
6713 if (new_mtu == dev->mtu)
6714 return 0;
6715
6716
6717 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
6718 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6719 dev->name, new_mtu, dev->min_mtu);
6720 return -EINVAL;
6721 }
6722
6723 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
6724 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
6725 dev->name, new_mtu, dev->max_mtu);
6726 return -EINVAL;
6727 }
6728
6729 if (!netif_device_present(dev))
6730 return -ENODEV;
6731
6732 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6733 err = notifier_to_errno(err);
6734 if (err)
6735 return err;
6736
6737 orig_mtu = dev->mtu;
6738 err = __dev_set_mtu(dev, new_mtu);
6739
6740 if (!err) {
6741 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6742 err = notifier_to_errno(err);
6743 if (err) {
6744
6745
6746
6747 __dev_set_mtu(dev, orig_mtu);
6748 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6749 }
6750 }
6751 return err;
6752}
6753EXPORT_SYMBOL(dev_set_mtu);
6754
6755
6756
6757
6758
6759
6760void dev_set_group(struct net_device *dev, int new_group)
6761{
6762 dev->group = new_group;
6763}
6764EXPORT_SYMBOL(dev_set_group);
6765
6766
6767
6768
6769
6770
6771
6772
6773int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6774{
6775 const struct net_device_ops *ops = dev->netdev_ops;
6776 int err;
6777
6778 if (!ops->ndo_set_mac_address)
6779 return -EOPNOTSUPP;
6780 if (sa->sa_family != dev->type)
6781 return -EINVAL;
6782 if (!netif_device_present(dev))
6783 return -ENODEV;
6784 err = ops->ndo_set_mac_address(dev, sa);
6785 if (err)
6786 return err;
6787 dev->addr_assign_type = NET_ADDR_SET;
6788 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
6789 add_device_randomness(dev->dev_addr, dev->addr_len);
6790 return 0;
6791}
6792EXPORT_SYMBOL(dev_set_mac_address);
6793
6794
6795
6796
6797
6798
6799
6800
6801int dev_change_carrier(struct net_device *dev, bool new_carrier)
6802{
6803 const struct net_device_ops *ops = dev->netdev_ops;
6804
6805 if (!ops->ndo_change_carrier)
6806 return -EOPNOTSUPP;
6807 if (!netif_device_present(dev))
6808 return -ENODEV;
6809 return ops->ndo_change_carrier(dev, new_carrier);
6810}
6811EXPORT_SYMBOL(dev_change_carrier);
6812
6813
6814
6815
6816
6817
6818
6819
6820int dev_get_phys_port_id(struct net_device *dev,
6821 struct netdev_phys_item_id *ppid)
6822{
6823 const struct net_device_ops *ops = dev->netdev_ops;
6824
6825 if (!ops->ndo_get_phys_port_id)
6826 return -EOPNOTSUPP;
6827 return ops->ndo_get_phys_port_id(dev, ppid);
6828}
6829EXPORT_SYMBOL(dev_get_phys_port_id);
6830
6831
6832
6833
6834
6835
6836
6837
6838
6839int dev_get_phys_port_name(struct net_device *dev,
6840 char *name, size_t len)
6841{
6842 const struct net_device_ops *ops = dev->netdev_ops;
6843
6844 if (!ops->ndo_get_phys_port_name)
6845 return -EOPNOTSUPP;
6846 return ops->ndo_get_phys_port_name(dev, name, len);
6847}
6848EXPORT_SYMBOL(dev_get_phys_port_name);
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858int dev_change_proto_down(struct net_device *dev, bool proto_down)
6859{
6860 const struct net_device_ops *ops = dev->netdev_ops;
6861
6862 if (!ops->ndo_change_proto_down)
6863 return -EOPNOTSUPP;
6864 if (!netif_device_present(dev))
6865 return -ENODEV;
6866 return ops->ndo_change_proto_down(dev, proto_down);
6867}
6868EXPORT_SYMBOL(dev_change_proto_down);
6869
6870bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op)
6871{
6872 struct netdev_xdp xdp;
6873
6874 memset(&xdp, 0, sizeof(xdp));
6875 xdp.command = XDP_QUERY_PROG;
6876
6877
6878 WARN_ON(xdp_op(dev, &xdp) < 0);
6879 return xdp.prog_attached;
6880}
6881
6882static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
6883 struct netlink_ext_ack *extack,
6884 struct bpf_prog *prog)
6885{
6886 struct netdev_xdp xdp;
6887
6888 memset(&xdp, 0, sizeof(xdp));
6889 xdp.command = XDP_SETUP_PROG;
6890 xdp.extack = extack;
6891 xdp.prog = prog;
6892
6893 return xdp_op(dev, &xdp);
6894}
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
6906 int fd, u32 flags)
6907{
6908 const struct net_device_ops *ops = dev->netdev_ops;
6909 struct bpf_prog *prog = NULL;
6910 xdp_op_t xdp_op, xdp_chk;
6911 int err;
6912
6913 ASSERT_RTNL();
6914
6915 xdp_op = xdp_chk = ops->ndo_xdp;
6916 if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE))
6917 return -EOPNOTSUPP;
6918 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
6919 xdp_op = generic_xdp_install;
6920 if (xdp_op == xdp_chk)
6921 xdp_chk = generic_xdp_install;
6922
6923 if (fd >= 0) {
6924 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk))
6925 return -EEXIST;
6926 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
6927 __dev_xdp_attached(dev, xdp_op))
6928 return -EBUSY;
6929
6930 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
6931 if (IS_ERR(prog))
6932 return PTR_ERR(prog);
6933 }
6934
6935 err = dev_xdp_install(dev, xdp_op, extack, prog);
6936 if (err < 0 && prog)
6937 bpf_prog_put(prog);
6938
6939 return err;
6940}
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950static int dev_new_index(struct net *net)
6951{
6952 int ifindex = net->ifindex;
6953
6954 for (;;) {
6955 if (++ifindex <= 0)
6956 ifindex = 1;
6957 if (!__dev_get_by_index(net, ifindex))
6958 return net->ifindex = ifindex;
6959 }
6960}
6961
6962
6963static LIST_HEAD(net_todo_list);
6964DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
6965
6966static void net_set_todo(struct net_device *dev)
6967{
6968 list_add_tail(&dev->todo_list, &net_todo_list);
6969 dev_net(dev)->dev_unreg_count++;
6970}
6971
6972static void rollback_registered_many(struct list_head *head)
6973{
6974 struct net_device *dev, *tmp;
6975 LIST_HEAD(close_head);
6976
6977 BUG_ON(dev_boot_phase);
6978 ASSERT_RTNL();
6979
6980 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
6981
6982
6983
6984
6985 if (dev->reg_state == NETREG_UNINITIALIZED) {
6986 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6987 dev->name, dev);
6988
6989 WARN_ON(1);
6990 list_del(&dev->unreg_list);
6991 continue;
6992 }
6993 dev->dismantle = true;
6994 BUG_ON(dev->reg_state != NETREG_REGISTERED);
6995 }
6996
6997
6998 list_for_each_entry(dev, head, unreg_list)
6999 list_add_tail(&dev->close_list, &close_head);
7000 dev_close_many(&close_head, true);
7001
7002 list_for_each_entry(dev, head, unreg_list) {
7003
7004 unlist_netdevice(dev);
7005
7006 dev->reg_state = NETREG_UNREGISTERING;
7007 }
7008 flush_all_backlogs();
7009
7010 synchronize_net();
7011
7012 list_for_each_entry(dev, head, unreg_list) {
7013 struct sk_buff *skb = NULL;
7014
7015
7016 dev_shutdown(dev);
7017
7018
7019
7020
7021
7022 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7023
7024 if (!dev->rtnl_link_ops ||
7025 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7026 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
7027 GFP_KERNEL);
7028
7029
7030
7031
7032 dev_uc_flush(dev);
7033 dev_mc_flush(dev);
7034
7035 if (dev->netdev_ops->ndo_uninit)
7036 dev->netdev_ops->ndo_uninit(dev);
7037
7038 if (skb)
7039 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
7040
7041
7042 WARN_ON(netdev_has_any_upper_dev(dev));
7043 WARN_ON(netdev_has_any_lower_dev(dev));
7044
7045
7046 netdev_unregister_kobject(dev);
7047#ifdef CONFIG_XPS
7048
7049 netif_reset_xps_queues_gt(dev, 0);
7050#endif
7051 }
7052
7053 synchronize_net();
7054
7055 list_for_each_entry(dev, head, unreg_list)
7056 dev_put(dev);
7057}
7058
7059static void rollback_registered(struct net_device *dev)
7060{
7061 LIST_HEAD(single);
7062
7063 list_add(&dev->unreg_list, &single);
7064 rollback_registered_many(&single);
7065 list_del(&single);
7066}
7067
7068static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7069 struct net_device *upper, netdev_features_t features)
7070{
7071 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7072 netdev_features_t feature;
7073 int feature_bit;
7074
7075 for_each_netdev_feature(&upper_disables, feature_bit) {
7076 feature = __NETIF_F_BIT(feature_bit);
7077 if (!(upper->wanted_features & feature)
7078 && (features & feature)) {
7079 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7080 &feature, upper->name);
7081 features &= ~feature;
7082 }
7083 }
7084
7085 return features;
7086}
7087
7088static void netdev_sync_lower_features(struct net_device *upper,
7089 struct net_device *lower, netdev_features_t features)
7090{
7091 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7092 netdev_features_t feature;
7093 int feature_bit;
7094
7095 for_each_netdev_feature(&upper_disables, feature_bit) {
7096 feature = __NETIF_F_BIT(feature_bit);
7097 if (!(features & feature) && (lower->features & feature)) {
7098 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7099 &feature, lower->name);
7100 lower->wanted_features &= ~feature;
7101 netdev_update_features(lower);
7102
7103 if (unlikely(lower->features & feature))
7104 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7105 &feature, lower->name);
7106 }
7107 }
7108}
7109
7110static netdev_features_t netdev_fix_features(struct net_device *dev,
7111 netdev_features_t features)
7112{
7113
7114 if ((features & NETIF_F_HW_CSUM) &&
7115 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
7116 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
7117 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7118 }
7119
7120
7121 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
7122 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
7123 features &= ~NETIF_F_ALL_TSO;
7124 }
7125
7126 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7127 !(features & NETIF_F_IP_CSUM)) {
7128 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7129 features &= ~NETIF_F_TSO;
7130 features &= ~NETIF_F_TSO_ECN;
7131 }
7132
7133 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7134 !(features & NETIF_F_IPV6_CSUM)) {
7135 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7136 features &= ~NETIF_F_TSO6;
7137 }
7138
7139
7140 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7141 features &= ~NETIF_F_TSO_MANGLEID;
7142
7143
7144 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7145 features &= ~NETIF_F_TSO_ECN;
7146
7147
7148 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
7149 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
7150 features &= ~NETIF_F_GSO;
7151 }
7152
7153
7154 if (features & NETIF_F_UFO) {
7155
7156 if (!(features & NETIF_F_HW_CSUM) &&
7157 ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
7158 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
7159 netdev_dbg(dev,
7160 "Dropping NETIF_F_UFO since no checksum offload features.\n");
7161 features &= ~NETIF_F_UFO;
7162 }
7163
7164 if (!(features & NETIF_F_SG)) {
7165 netdev_dbg(dev,
7166 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
7167 features &= ~NETIF_F_UFO;
7168 }
7169 }
7170
7171
7172 if ((features & dev->gso_partial_features) &&
7173 !(features & NETIF_F_GSO_PARTIAL)) {
7174 netdev_dbg(dev,
7175 "Dropping partially supported GSO features since no GSO partial.\n");
7176 features &= ~dev->gso_partial_features;
7177 }
7178
7179 return features;
7180}
7181
7182int __netdev_update_features(struct net_device *dev)
7183{
7184 struct net_device *upper, *lower;
7185 netdev_features_t features;
7186 struct list_head *iter;
7187 int err = -1;
7188
7189 ASSERT_RTNL();
7190
7191 features = netdev_get_wanted_features(dev);
7192
7193 if (dev->netdev_ops->ndo_fix_features)
7194 features = dev->netdev_ops->ndo_fix_features(dev, features);
7195
7196
7197 features = netdev_fix_features(dev, features);
7198
7199
7200 netdev_for_each_upper_dev_rcu(dev, upper, iter)
7201 features = netdev_sync_upper_features(dev, upper, features);
7202
7203 if (dev->features == features)
7204 goto sync_lower;
7205
7206 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7207 &dev->features, &features);
7208
7209 if (dev->netdev_ops->ndo_set_features)
7210 err = dev->netdev_ops->ndo_set_features(dev, features);
7211 else
7212 err = 0;
7213
7214 if (unlikely(err < 0)) {
7215 netdev_err(dev,
7216 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7217 err, &features, &dev->features);
7218
7219
7220
7221 return -1;
7222 }
7223
7224sync_lower:
7225
7226
7227
7228 netdev_for_each_lower_dev(dev, lower, iter)
7229 netdev_sync_lower_features(dev, lower, features);
7230
7231 if (!err)
7232 dev->features = features;
7233
7234 return err < 0 ? 0 : 1;
7235}
7236
7237
7238
7239
7240
7241
7242
7243
7244
7245void netdev_update_features(struct net_device *dev)
7246{
7247 if (__netdev_update_features(dev))
7248 netdev_features_change(dev);
7249}
7250EXPORT_SYMBOL(netdev_update_features);
7251
7252
7253
7254
7255
7256
7257
7258
7259
7260
7261
7262void netdev_change_features(struct net_device *dev)
7263{
7264 __netdev_update_features(dev);
7265 netdev_features_change(dev);
7266}
7267EXPORT_SYMBOL(netdev_change_features);
7268
7269
7270
7271
7272
7273
7274
7275
7276
7277
7278void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7279 struct net_device *dev)
7280{
7281 if (rootdev->operstate == IF_OPER_DORMANT)
7282 netif_dormant_on(dev);
7283 else
7284 netif_dormant_off(dev);
7285
7286 if (netif_carrier_ok(rootdev))
7287 netif_carrier_on(dev);
7288 else
7289 netif_carrier_off(dev);
7290}
7291EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7292
7293#ifdef CONFIG_SYSFS
7294static int netif_alloc_rx_queues(struct net_device *dev)
7295{
7296 unsigned int i, count = dev->num_rx_queues;
7297 struct netdev_rx_queue *rx;
7298 size_t sz = count * sizeof(*rx);
7299
7300 BUG_ON(count < 1);
7301
7302 rx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
7303 if (!rx)
7304 return -ENOMEM;
7305
7306 dev->_rx = rx;
7307
7308 for (i = 0; i < count; i++)
7309 rx[i].dev = dev;
7310 return 0;
7311}
7312#endif
7313
7314static void netdev_init_one_queue(struct net_device *dev,
7315 struct netdev_queue *queue, void *_unused)
7316{
7317
7318 spin_lock_init(&queue->_xmit_lock);
7319 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7320 queue->xmit_lock_owner = -1;
7321 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
7322 queue->dev = dev;
7323#ifdef CONFIG_BQL
7324 dql_init(&queue->dql, HZ);
7325#endif
7326}
7327
7328static void netif_free_tx_queues(struct net_device *dev)
7329{
7330 kvfree(dev->_tx);
7331}
7332
7333static int netif_alloc_netdev_queues(struct net_device *dev)
7334{
7335 unsigned int count = dev->num_tx_queues;
7336 struct netdev_queue *tx;
7337 size_t sz = count * sizeof(*tx);
7338
7339 if (count < 1 || count > 0xffff)
7340 return -EINVAL;
7341
7342 tx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
7343 if (!tx)
7344 return -ENOMEM;
7345
7346 dev->_tx = tx;
7347
7348 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7349 spin_lock_init(&dev->tx_global_lock);
7350
7351 return 0;
7352}
7353
7354void netif_tx_stop_all_queues(struct net_device *dev)
7355{
7356 unsigned int i;
7357
7358 for (i = 0; i < dev->num_tx_queues; i++) {
7359 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
7360
7361 netif_tx_stop_queue(txq);
7362 }
7363}
7364EXPORT_SYMBOL(netif_tx_stop_all_queues);
7365
7366
7367
7368
7369
7370
7371
7372
7373
7374
7375
7376
7377
7378
7379
7380
7381
7382
7383int register_netdevice(struct net_device *dev)
7384{
7385 int ret;
7386 struct net *net = dev_net(dev);
7387
7388 BUG_ON(dev_boot_phase);
7389 ASSERT_RTNL();
7390
7391 might_sleep();
7392
7393
7394 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
7395 BUG_ON(!net);
7396
7397 spin_lock_init(&dev->addr_list_lock);
7398 netdev_set_addr_lockdep_class(dev);
7399
7400 ret = dev_get_valid_name(net, dev, dev->name);
7401 if (ret < 0)
7402 goto out;
7403
7404
7405 if (dev->netdev_ops->ndo_init) {
7406 ret = dev->netdev_ops->ndo_init(dev);
7407 if (ret) {
7408 if (ret > 0)
7409 ret = -EIO;
7410 goto out;
7411 }
7412 }
7413
7414 if (((dev->hw_features | dev->features) &
7415 NETIF_F_HW_VLAN_CTAG_FILTER) &&
7416 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7417 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7418 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7419 ret = -EINVAL;
7420 goto err_uninit;
7421 }
7422
7423 ret = -EBUSY;
7424 if (!dev->ifindex)
7425 dev->ifindex = dev_new_index(net);
7426 else if (__dev_get_by_index(net, dev->ifindex))
7427 goto err_uninit;
7428
7429
7430
7431
7432 dev->hw_features |= NETIF_F_SOFT_FEATURES;
7433 dev->features |= NETIF_F_SOFT_FEATURES;
7434 dev->wanted_features = dev->features & dev->hw_features;
7435
7436 if (!(dev->flags & IFF_LOOPBACK))
7437 dev->hw_features |= NETIF_F_NOCACHE_COPY;
7438
7439
7440
7441
7442
7443
7444 if (dev->hw_features & NETIF_F_TSO)
7445 dev->hw_features |= NETIF_F_TSO_MANGLEID;
7446 if (dev->vlan_features & NETIF_F_TSO)
7447 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7448 if (dev->mpls_features & NETIF_F_TSO)
7449 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7450 if (dev->hw_enc_features & NETIF_F_TSO)
7451 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
7452
7453
7454
7455 dev->vlan_features |= NETIF_F_HIGHDMA;
7456
7457
7458
7459 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
7460
7461
7462
7463 dev->mpls_features |= NETIF_F_SG;
7464
7465 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7466 ret = notifier_to_errno(ret);
7467 if (ret)
7468 goto err_uninit;
7469
7470 ret = netdev_register_kobject(dev);
7471 if (ret)
7472 goto err_uninit;
7473 dev->reg_state = NETREG_REGISTERED;
7474
7475 __netdev_update_features(dev);
7476
7477
7478
7479
7480
7481
7482 set_bit(__LINK_STATE_PRESENT, &dev->state);
7483
7484 linkwatch_init_dev(dev);
7485
7486 dev_init_scheduler(dev);
7487 dev_hold(dev);
7488 list_netdevice(dev);
7489 add_device_randomness(dev->dev_addr, dev->addr_len);
7490
7491
7492
7493
7494
7495 if (dev->addr_assign_type == NET_ADDR_PERM)
7496 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7497
7498
7499 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
7500 ret = notifier_to_errno(ret);
7501 if (ret) {
7502 rollback_registered(dev);
7503 dev->reg_state = NETREG_UNREGISTERED;
7504 }
7505
7506
7507
7508
7509 if (!dev->rtnl_link_ops ||
7510 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7511 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7512
7513out:
7514 return ret;
7515
7516err_uninit:
7517 if (dev->netdev_ops->ndo_uninit)
7518 dev->netdev_ops->ndo_uninit(dev);
7519 if (dev->priv_destructor)
7520 dev->priv_destructor(dev);
7521 goto out;
7522}
7523EXPORT_SYMBOL(register_netdevice);
7524
7525
7526
7527
7528
7529
7530
7531
7532
7533
7534
7535int init_dummy_netdev(struct net_device *dev)
7536{
7537
7538
7539
7540
7541
7542 memset(dev, 0, sizeof(struct net_device));
7543
7544
7545
7546
7547 dev->reg_state = NETREG_DUMMY;
7548
7549
7550 INIT_LIST_HEAD(&dev->napi_list);
7551
7552
7553 set_bit(__LINK_STATE_PRESENT, &dev->state);
7554 set_bit(__LINK_STATE_START, &dev->state);
7555
7556
7557
7558
7559
7560
7561 return 0;
7562}
7563EXPORT_SYMBOL_GPL(init_dummy_netdev);
7564
7565
7566
7567
7568
7569
7570
7571
7572
7573
7574
7575
7576
7577
7578
7579int register_netdev(struct net_device *dev)
7580{
7581 int err;
7582
7583 rtnl_lock();
7584 err = register_netdevice(dev);
7585 rtnl_unlock();
7586 return err;
7587}
7588EXPORT_SYMBOL(register_netdev);
7589
7590int netdev_refcnt_read(const struct net_device *dev)
7591{
7592 int i, refcnt = 0;
7593
7594 for_each_possible_cpu(i)
7595 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7596 return refcnt;
7597}
7598EXPORT_SYMBOL(netdev_refcnt_read);
7599
7600
7601
7602
7603
7604
7605
7606
7607
7608
7609
7610
7611
7612static void netdev_wait_allrefs(struct net_device *dev)
7613{
7614 unsigned long rebroadcast_time, warning_time;
7615 int refcnt;
7616
7617 linkwatch_forget_dev(dev);
7618
7619 rebroadcast_time = warning_time = jiffies;
7620 refcnt = netdev_refcnt_read(dev);
7621
7622 while (refcnt != 0) {
7623 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
7624 rtnl_lock();
7625
7626
7627 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7628
7629 __rtnl_unlock();
7630 rcu_barrier();
7631 rtnl_lock();
7632
7633 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7634 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7635 &dev->state)) {
7636
7637
7638
7639
7640
7641
7642 linkwatch_run_queue();
7643 }
7644
7645 __rtnl_unlock();
7646
7647 rebroadcast_time = jiffies;
7648 }
7649
7650 msleep(250);
7651
7652 refcnt = netdev_refcnt_read(dev);
7653
7654 if (time_after(jiffies, warning_time + 10 * HZ)) {
7655 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7656 dev->name, refcnt);
7657 warning_time = jiffies;
7658 }
7659 }
7660}
7661
7662
7663
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674
7675
7676
7677
7678
7679
7680
7681
7682
7683
7684
7685
7686void netdev_run_todo(void)
7687{
7688 struct list_head list;
7689
7690
7691 list_replace_init(&net_todo_list, &list);
7692
7693 __rtnl_unlock();
7694
7695
7696
7697 if (!list_empty(&list))
7698 rcu_barrier();
7699
7700 while (!list_empty(&list)) {
7701 struct net_device *dev
7702 = list_first_entry(&list, struct net_device, todo_list);
7703 list_del(&dev->todo_list);
7704
7705 rtnl_lock();
7706 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7707 __rtnl_unlock();
7708
7709 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7710 pr_err("network todo '%s' but state %d\n",
7711 dev->name, dev->reg_state);
7712 dump_stack();
7713 continue;
7714 }
7715
7716 dev->reg_state = NETREG_UNREGISTERED;
7717
7718 netdev_wait_allrefs(dev);
7719
7720
7721 BUG_ON(netdev_refcnt_read(dev));
7722 BUG_ON(!list_empty(&dev->ptype_all));
7723 BUG_ON(!list_empty(&dev->ptype_specific));
7724 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7725 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
7726 WARN_ON(dev->dn_ptr);
7727
7728 if (dev->priv_destructor)
7729 dev->priv_destructor(dev);
7730 if (dev->needs_free_netdev)
7731 free_netdev(dev);
7732
7733
7734 rtnl_lock();
7735 dev_net(dev)->dev_unreg_count--;
7736 __rtnl_unlock();
7737 wake_up(&netdev_unregistering_wq);
7738
7739
7740 kobject_put(&dev->dev.kobj);
7741 }
7742}
7743
7744
7745
7746
7747
7748
7749void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7750 const struct net_device_stats *netdev_stats)
7751{
7752#if BITS_PER_LONG == 64
7753 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
7754 memcpy(stats64, netdev_stats, sizeof(*stats64));
7755
7756 memset((char *)stats64 + sizeof(*netdev_stats), 0,
7757 sizeof(*stats64) - sizeof(*netdev_stats));
7758#else
7759 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
7760 const unsigned long *src = (const unsigned long *)netdev_stats;
7761 u64 *dst = (u64 *)stats64;
7762
7763 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
7764 for (i = 0; i < n; i++)
7765 dst[i] = src[i];
7766
7767 memset((char *)stats64 + n * sizeof(u64), 0,
7768 sizeof(*stats64) - n * sizeof(u64));
7769#endif
7770}
7771EXPORT_SYMBOL(netdev_stats_to_stats64);
7772
7773
7774
7775
7776
7777
7778
7779
7780
7781
7782
7783struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7784 struct rtnl_link_stats64 *storage)
7785{
7786 const struct net_device_ops *ops = dev->netdev_ops;
7787
7788 if (ops->ndo_get_stats64) {
7789 memset(storage, 0, sizeof(*storage));
7790 ops->ndo_get_stats64(dev, storage);
7791 } else if (ops->ndo_get_stats) {
7792 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
7793 } else {
7794 netdev_stats_to_stats64(storage, &dev->stats);
7795 }
7796 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
7797 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
7798 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
7799 return storage;
7800}
7801EXPORT_SYMBOL(dev_get_stats);
7802
7803struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
7804{
7805 struct netdev_queue *queue = dev_ingress_queue(dev);
7806
7807#ifdef CONFIG_NET_CLS_ACT
7808 if (queue)
7809 return queue;
7810 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7811 if (!queue)
7812 return NULL;
7813 netdev_init_one_queue(dev, queue, NULL);
7814 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
7815 queue->qdisc_sleeping = &noop_qdisc;
7816 rcu_assign_pointer(dev->ingress_queue, queue);
7817#endif
7818 return queue;
7819}
7820
7821static const struct ethtool_ops default_ethtool_ops;
7822
7823void netdev_set_default_ethtool_ops(struct net_device *dev,
7824 const struct ethtool_ops *ops)
7825{
7826 if (dev->ethtool_ops == &default_ethtool_ops)
7827 dev->ethtool_ops = ops;
7828}
7829EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7830
7831void netdev_freemem(struct net_device *dev)
7832{
7833 char *addr = (char *)dev - dev->padded;
7834
7835 kvfree(addr);
7836}
7837
7838
7839
7840
7841
7842
7843
7844
7845
7846
7847
7848
7849
7850
7851struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
7852 unsigned char name_assign_type,
7853 void (*setup)(struct net_device *),
7854 unsigned int txqs, unsigned int rxqs)
7855{
7856 struct net_device *dev;
7857 size_t alloc_size;
7858 struct net_device *p;
7859
7860 BUG_ON(strlen(name) >= sizeof(dev->name));
7861
7862 if (txqs < 1) {
7863 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
7864 return NULL;
7865 }
7866
7867#ifdef CONFIG_SYSFS
7868 if (rxqs < 1) {
7869 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
7870 return NULL;
7871 }
7872#endif
7873
7874 alloc_size = sizeof(struct net_device);
7875 if (sizeof_priv) {
7876
7877 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
7878 alloc_size += sizeof_priv;
7879 }
7880
7881 alloc_size += NETDEV_ALIGN - 1;
7882
7883 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_REPEAT);
7884 if (!p)
7885 return NULL;
7886
7887 dev = PTR_ALIGN(p, NETDEV_ALIGN);
7888 dev->padded = (char *)dev - (char *)p;
7889
7890 dev->pcpu_refcnt = alloc_percpu(int);
7891 if (!dev->pcpu_refcnt)
7892 goto free_dev;
7893
7894 if (dev_addr_init(dev))
7895 goto free_pcpu;
7896
7897 dev_mc_init(dev);
7898 dev_uc_init(dev);
7899
7900 dev_net_set(dev, &init_net);
7901
7902 dev->gso_max_size = GSO_MAX_SIZE;
7903 dev->gso_max_segs = GSO_MAX_SEGS;
7904
7905 INIT_LIST_HEAD(&dev->napi_list);
7906 INIT_LIST_HEAD(&dev->unreg_list);
7907 INIT_LIST_HEAD(&dev->close_list);
7908 INIT_LIST_HEAD(&dev->link_watch_list);
7909 INIT_LIST_HEAD(&dev->adj_list.upper);
7910 INIT_LIST_HEAD(&dev->adj_list.lower);
7911 INIT_LIST_HEAD(&dev->ptype_all);
7912 INIT_LIST_HEAD(&dev->ptype_specific);
7913#ifdef CONFIG_NET_SCHED
7914 hash_init(dev->qdisc_hash);
7915#endif
7916 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
7917 setup(dev);
7918
7919 if (!dev->tx_queue_len) {
7920 dev->priv_flags |= IFF_NO_QUEUE;
7921 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
7922 }
7923
7924 dev->num_tx_queues = txqs;
7925 dev->real_num_tx_queues = txqs;
7926 if (netif_alloc_netdev_queues(dev))
7927 goto free_all;
7928
7929#ifdef CONFIG_SYSFS
7930 dev->num_rx_queues = rxqs;
7931 dev->real_num_rx_queues = rxqs;
7932 if (netif_alloc_rx_queues(dev))
7933 goto free_all;
7934#endif
7935
7936 strcpy(dev->name, name);
7937 dev->name_assign_type = name_assign_type;
7938 dev->group = INIT_NETDEV_GROUP;
7939 if (!dev->ethtool_ops)
7940 dev->ethtool_ops = &default_ethtool_ops;
7941
7942 nf_hook_ingress_init(dev);
7943
7944 return dev;
7945
7946free_all:
7947 free_netdev(dev);
7948 return NULL;
7949
7950free_pcpu:
7951 free_percpu(dev->pcpu_refcnt);
7952free_dev:
7953 netdev_freemem(dev);
7954 return NULL;
7955}
7956EXPORT_SYMBOL(alloc_netdev_mqs);
7957
7958
7959
7960
7961
7962
7963
7964
7965
7966
7967void free_netdev(struct net_device *dev)
7968{
7969 struct napi_struct *p, *n;
7970 struct bpf_prog *prog;
7971
7972 might_sleep();
7973 netif_free_tx_queues(dev);
7974#ifdef CONFIG_SYSFS
7975 kvfree(dev->_rx);
7976#endif
7977
7978 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
7979
7980
7981 dev_addr_flush(dev);
7982
7983 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7984 netif_napi_del(p);
7985
7986 free_percpu(dev->pcpu_refcnt);
7987 dev->pcpu_refcnt = NULL;
7988
7989 prog = rcu_dereference_protected(dev->xdp_prog, 1);
7990 if (prog) {
7991 bpf_prog_put(prog);
7992 static_key_slow_dec(&generic_xdp_needed);
7993 }
7994
7995
7996 if (dev->reg_state == NETREG_UNINITIALIZED) {
7997 netdev_freemem(dev);
7998 return;
7999 }
8000
8001 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8002 dev->reg_state = NETREG_RELEASED;
8003
8004
8005 put_device(&dev->dev);
8006}
8007EXPORT_SYMBOL(free_netdev);
8008
8009
8010
8011
8012
8013
8014
8015void synchronize_net(void)
8016{
8017 might_sleep();
8018 if (rtnl_is_locked())
8019 synchronize_rcu_expedited();
8020 else
8021 synchronize_rcu();
8022}
8023EXPORT_SYMBOL(synchronize_net);
8024
8025
8026
8027
8028
8029
8030
8031
8032
8033
8034
8035
8036
8037
8038void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
8039{
8040 ASSERT_RTNL();
8041
8042 if (head) {
8043 list_move_tail(&dev->unreg_list, head);
8044 } else {
8045 rollback_registered(dev);
8046
8047 net_set_todo(dev);
8048 }
8049}
8050EXPORT_SYMBOL(unregister_netdevice_queue);
8051
8052
8053
8054
8055
8056
8057
8058
8059void unregister_netdevice_many(struct list_head *head)
8060{
8061 struct net_device *dev;
8062
8063 if (!list_empty(head)) {
8064 rollback_registered_many(head);
8065 list_for_each_entry(dev, head, unreg_list)
8066 net_set_todo(dev);
8067 list_del(head);
8068 }
8069}
8070EXPORT_SYMBOL(unregister_netdevice_many);
8071
8072
8073
8074
8075
8076
8077
8078
8079
8080
8081
8082
8083void unregister_netdev(struct net_device *dev)
8084{
8085 rtnl_lock();
8086 unregister_netdevice(dev);
8087 rtnl_unlock();
8088}
8089EXPORT_SYMBOL(unregister_netdev);
8090
8091
8092
8093
8094
8095
8096
8097
8098
8099
8100
8101
8102
8103
8104
8105int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8106{
8107 int err;
8108
8109 ASSERT_RTNL();
8110
8111
8112 err = -EINVAL;
8113 if (dev->features & NETIF_F_NETNS_LOCAL)
8114 goto out;
8115
8116
8117 if (dev->reg_state != NETREG_REGISTERED)
8118 goto out;
8119
8120
8121 err = 0;
8122 if (net_eq(dev_net(dev), net))
8123 goto out;
8124
8125
8126
8127
8128 err = -EEXIST;
8129 if (__dev_get_by_name(net, dev->name)) {
8130
8131 if (!pat)
8132 goto out;
8133 if (dev_get_valid_name(net, dev, pat) < 0)
8134 goto out;
8135 }
8136
8137
8138
8139
8140
8141
8142 dev_close(dev);
8143
8144
8145 err = -ENODEV;
8146 unlist_netdevice(dev);
8147
8148 synchronize_net();
8149
8150
8151 dev_shutdown(dev);
8152
8153
8154
8155
8156
8157
8158
8159
8160 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8161 rcu_barrier();
8162 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
8163 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
8164
8165
8166
8167
8168 dev_uc_flush(dev);
8169 dev_mc_flush(dev);
8170
8171
8172 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
8173 netdev_adjacent_del_links(dev);
8174
8175
8176 dev_net_set(dev, net);
8177
8178
8179 if (__dev_get_by_index(net, dev->ifindex))
8180 dev->ifindex = dev_new_index(net);
8181
8182
8183 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
8184 netdev_adjacent_add_links(dev);
8185
8186
8187 err = device_rename(&dev->dev, dev->name);
8188 WARN_ON(err);
8189
8190
8191 list_netdevice(dev);
8192
8193
8194 call_netdevice_notifiers(NETDEV_REGISTER, dev);
8195
8196
8197
8198
8199
8200 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
8201
8202 synchronize_net();
8203 err = 0;
8204out:
8205 return err;
8206}
8207EXPORT_SYMBOL_GPL(dev_change_net_namespace);
8208
8209static int dev_cpu_dead(unsigned int oldcpu)
8210{
8211 struct sk_buff **list_skb;
8212 struct sk_buff *skb;
8213 unsigned int cpu;
8214 struct softnet_data *sd, *oldsd, *remsd = NULL;
8215
8216 local_irq_disable();
8217 cpu = smp_processor_id();
8218 sd = &per_cpu(softnet_data, cpu);
8219 oldsd = &per_cpu(softnet_data, oldcpu);
8220
8221
8222 list_skb = &sd->completion_queue;
8223 while (*list_skb)
8224 list_skb = &(*list_skb)->next;
8225
8226 *list_skb = oldsd->completion_queue;
8227 oldsd->completion_queue = NULL;
8228
8229
8230 if (oldsd->output_queue) {
8231 *sd->output_queue_tailp = oldsd->output_queue;
8232 sd->output_queue_tailp = oldsd->output_queue_tailp;
8233 oldsd->output_queue = NULL;
8234 oldsd->output_queue_tailp = &oldsd->output_queue;
8235 }
8236
8237
8238
8239
8240 while (!list_empty(&oldsd->poll_list)) {
8241 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
8242 struct napi_struct,
8243 poll_list);
8244
8245 list_del_init(&napi->poll_list);
8246 if (napi->poll == process_backlog)
8247 napi->state = 0;
8248 else
8249 ____napi_schedule(sd, napi);
8250 }
8251
8252 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8253 local_irq_enable();
8254
8255#ifdef CONFIG_RPS
8256 remsd = oldsd->rps_ipi_list;
8257 oldsd->rps_ipi_list = NULL;
8258#endif
8259
8260 net_rps_send_ipi(remsd);
8261
8262
8263 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
8264 netif_rx_ni(skb);
8265 input_queue_head_incr(oldsd);
8266 }
8267 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
8268 netif_rx_ni(skb);
8269 input_queue_head_incr(oldsd);
8270 }
8271
8272 return 0;
8273}
8274
8275
8276
8277
8278
8279
8280
8281
8282
8283
8284
8285netdev_features_t netdev_increment_features(netdev_features_t all,
8286 netdev_features_t one, netdev_features_t mask)
8287{
8288 if (mask & NETIF_F_HW_CSUM)
8289 mask |= NETIF_F_CSUM_MASK;
8290 mask |= NETIF_F_VLAN_CHALLENGED;
8291
8292 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
8293 all &= one | ~NETIF_F_ALL_FOR_ALL;
8294
8295
8296 if (all & NETIF_F_HW_CSUM)
8297 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
8298
8299 return all;
8300}
8301EXPORT_SYMBOL(netdev_increment_features);
8302
8303static struct hlist_head * __net_init netdev_create_hash(void)
8304{
8305 int i;
8306 struct hlist_head *hash;
8307
8308 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8309 if (hash != NULL)
8310 for (i = 0; i < NETDEV_HASHENTRIES; i++)
8311 INIT_HLIST_HEAD(&hash[i]);
8312
8313 return hash;
8314}
8315
8316
8317static int __net_init netdev_init(struct net *net)
8318{
8319 if (net != &init_net)
8320 INIT_LIST_HEAD(&net->dev_base_head);
8321
8322 net->dev_name_head = netdev_create_hash();
8323 if (net->dev_name_head == NULL)
8324 goto err_name;
8325
8326 net->dev_index_head = netdev_create_hash();
8327 if (net->dev_index_head == NULL)
8328 goto err_idx;
8329
8330 return 0;
8331
8332err_idx:
8333 kfree(net->dev_name_head);
8334err_name:
8335 return -ENOMEM;
8336}
8337
8338
8339
8340
8341
8342
8343
8344const char *netdev_drivername(const struct net_device *dev)
8345{
8346 const struct device_driver *driver;
8347 const struct device *parent;
8348 const char *empty = "";
8349
8350 parent = dev->dev.parent;
8351 if (!parent)
8352 return empty;
8353
8354 driver = parent->driver;
8355 if (driver && driver->name)
8356 return driver->name;
8357 return empty;
8358}
8359
8360static void __netdev_printk(const char *level, const struct net_device *dev,
8361 struct va_format *vaf)
8362{
8363 if (dev && dev->dev.parent) {
8364 dev_printk_emit(level[1] - '0',
8365 dev->dev.parent,
8366 "%s %s %s%s: %pV",
8367 dev_driver_string(dev->dev.parent),
8368 dev_name(dev->dev.parent),
8369 netdev_name(dev), netdev_reg_state(dev),
8370 vaf);
8371 } else if (dev) {
8372 printk("%s%s%s: %pV",
8373 level, netdev_name(dev), netdev_reg_state(dev), vaf);
8374 } else {
8375 printk("%s(NULL net_device): %pV", level, vaf);
8376 }
8377}
8378
8379void netdev_printk(const char *level, const struct net_device *dev,
8380 const char *format, ...)
8381{
8382 struct va_format vaf;
8383 va_list args;
8384
8385 va_start(args, format);
8386
8387 vaf.fmt = format;
8388 vaf.va = &args;
8389
8390 __netdev_printk(level, dev, &vaf);
8391
8392 va_end(args);
8393}
8394EXPORT_SYMBOL(netdev_printk);
8395
8396#define define_netdev_printk_level(func, level) \
8397void func(const struct net_device *dev, const char *fmt, ...) \
8398{ \
8399 struct va_format vaf; \
8400 va_list args; \
8401 \
8402 va_start(args, fmt); \
8403 \
8404 vaf.fmt = fmt; \
8405 vaf.va = &args; \
8406 \
8407 __netdev_printk(level, dev, &vaf); \
8408 \
8409 va_end(args); \
8410} \
8411EXPORT_SYMBOL(func);
8412
8413define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8414define_netdev_printk_level(netdev_alert, KERN_ALERT);
8415define_netdev_printk_level(netdev_crit, KERN_CRIT);
8416define_netdev_printk_level(netdev_err, KERN_ERR);
8417define_netdev_printk_level(netdev_warn, KERN_WARNING);
8418define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8419define_netdev_printk_level(netdev_info, KERN_INFO);
8420
8421static void __net_exit netdev_exit(struct net *net)
8422{
8423 kfree(net->dev_name_head);
8424 kfree(net->dev_index_head);
8425}
8426
8427static struct pernet_operations __net_initdata netdev_net_ops = {
8428 .init = netdev_init,
8429 .exit = netdev_exit,
8430};
8431
8432static void __net_exit default_device_exit(struct net *net)
8433{
8434 struct net_device *dev, *aux;
8435
8436
8437
8438
8439 rtnl_lock();
8440 for_each_netdev_safe(net, dev, aux) {
8441 int err;
8442 char fb_name[IFNAMSIZ];
8443
8444
8445 if (dev->features & NETIF_F_NETNS_LOCAL)
8446 continue;
8447
8448
8449 if (dev->rtnl_link_ops)
8450 continue;
8451
8452
8453 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8454 err = dev_change_net_namespace(dev, &init_net, fb_name);
8455 if (err) {
8456 pr_emerg("%s: failed to move %s to init_net: %d\n",
8457 __func__, dev->name, err);
8458 BUG();
8459 }
8460 }
8461 rtnl_unlock();
8462}
8463
8464static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8465{
8466
8467
8468
8469 struct net *net;
8470 bool unregistering;
8471 DEFINE_WAIT_FUNC(wait, woken_wake_function);
8472
8473 add_wait_queue(&netdev_unregistering_wq, &wait);
8474 for (;;) {
8475 unregistering = false;
8476 rtnl_lock();
8477 list_for_each_entry(net, net_list, exit_list) {
8478 if (net->dev_unreg_count > 0) {
8479 unregistering = true;
8480 break;
8481 }
8482 }
8483 if (!unregistering)
8484 break;
8485 __rtnl_unlock();
8486
8487 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
8488 }
8489 remove_wait_queue(&netdev_unregistering_wq, &wait);
8490}
8491
8492static void __net_exit default_device_exit_batch(struct list_head *net_list)
8493{
8494
8495
8496
8497
8498
8499 struct net_device *dev;
8500 struct net *net;
8501 LIST_HEAD(dev_kill_list);
8502
8503
8504
8505
8506
8507
8508
8509
8510
8511
8512
8513
8514 rtnl_lock_unregistering(net_list);
8515 list_for_each_entry(net, net_list, exit_list) {
8516 for_each_netdev_reverse(net, dev) {
8517 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
8518 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8519 else
8520 unregister_netdevice_queue(dev, &dev_kill_list);
8521 }
8522 }
8523 unregister_netdevice_many(&dev_kill_list);
8524 rtnl_unlock();
8525}
8526
8527static struct pernet_operations __net_initdata default_device_ops = {
8528 .exit = default_device_exit,
8529 .exit_batch = default_device_exit_batch,
8530};
8531
8532
8533
8534
8535
8536
8537
8538
8539
8540
8541
8542
8543static int __init net_dev_init(void)
8544{
8545 int i, rc = -ENOMEM;
8546
8547 BUG_ON(!dev_boot_phase);
8548
8549 if (dev_proc_init())
8550 goto out;
8551
8552 if (netdev_kobject_init())
8553 goto out;
8554
8555 INIT_LIST_HEAD(&ptype_all);
8556 for (i = 0; i < PTYPE_HASH_SIZE; i++)
8557 INIT_LIST_HEAD(&ptype_base[i]);
8558
8559 INIT_LIST_HEAD(&offload_base);
8560
8561 if (register_pernet_subsys(&netdev_net_ops))
8562 goto out;
8563
8564
8565
8566
8567
8568 for_each_possible_cpu(i) {
8569 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
8570 struct softnet_data *sd = &per_cpu(softnet_data, i);
8571
8572 INIT_WORK(flush, flush_backlog);
8573
8574 skb_queue_head_init(&sd->input_pkt_queue);
8575 skb_queue_head_init(&sd->process_queue);
8576 INIT_LIST_HEAD(&sd->poll_list);
8577 sd->output_queue_tailp = &sd->output_queue;
8578#ifdef CONFIG_RPS
8579 sd->csd.func = rps_trigger_softirq;
8580 sd->csd.info = sd;
8581 sd->cpu = i;
8582#endif
8583
8584 sd->backlog.poll = process_backlog;
8585 sd->backlog.weight = weight_p;
8586 }
8587
8588 dev_boot_phase = 0;
8589
8590
8591
8592
8593
8594
8595
8596
8597
8598
8599 if (register_pernet_device(&loopback_net_ops))
8600 goto out;
8601
8602 if (register_pernet_device(&default_device_ops))
8603 goto out;
8604
8605 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8606 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
8607
8608 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
8609 NULL, dev_cpu_dead);
8610 WARN_ON(rc < 0);
8611 dst_subsys_init();
8612 rc = 0;
8613out:
8614 return rc;
8615}
8616
8617subsys_initcall(net_dev_init);
8618