1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71#include <linux/uaccess.h>
72#include <linux/bitops.h>
73#include <linux/capability.h>
74#include <linux/cpu.h>
75#include <linux/types.h>
76#include <linux/kernel.h>
77#include <linux/hash.h>
78#include <linux/slab.h>
79#include <linux/sched.h>
80#include <linux/sched/mm.h>
81#include <linux/mutex.h>
82#include <linux/string.h>
83#include <linux/mm.h>
84#include <linux/socket.h>
85#include <linux/sockios.h>
86#include <linux/errno.h>
87#include <linux/interrupt.h>
88#include <linux/if_ether.h>
89#include <linux/netdevice.h>
90#include <linux/etherdevice.h>
91#include <linux/ethtool.h>
92#include <linux/skbuff.h>
93#include <linux/bpf.h>
94#include <linux/bpf_trace.h>
95#include <net/net_namespace.h>
96#include <net/sock.h>
97#include <net/busy_poll.h>
98#include <linux/rtnetlink.h>
99#include <linux/stat.h>
100#include <net/dst.h>
101#include <net/dst_metadata.h>
102#include <net/pkt_sched.h>
103#include <net/pkt_cls.h>
104#include <net/checksum.h>
105#include <net/xfrm.h>
106#include <linux/highmem.h>
107#include <linux/init.h>
108#include <linux/module.h>
109#include <linux/netpoll.h>
110#include <linux/rcupdate.h>
111#include <linux/delay.h>
112#include <net/iw_handler.h>
113#include <asm/current.h>
114#include <linux/audit.h>
115#include <linux/dmaengine.h>
116#include <linux/err.h>
117#include <linux/ctype.h>
118#include <linux/if_arp.h>
119#include <linux/if_vlan.h>
120#include <linux/ip.h>
121#include <net/ip.h>
122#include <net/mpls.h>
123#include <linux/ipv6.h>
124#include <linux/in.h>
125#include <linux/jhash.h>
126#include <linux/random.h>
127#include <trace/events/napi.h>
128#include <trace/events/net.h>
129#include <trace/events/skb.h>
130#include <linux/inetdevice.h>
131#include <linux/cpu_rmap.h>
132#include <linux/static_key.h>
133#include <linux/hashtable.h>
134#include <linux/vmalloc.h>
135#include <linux/if_macvlan.h>
136#include <linux/errqueue.h>
137#include <linux/hrtimer.h>
138#include <linux/netfilter_ingress.h>
139#include <linux/crash_dump.h>
140#include <linux/sctp.h>
141#include <net/udp_tunnel.h>
142#include <linux/net_namespace.h>
143#include <linux/indirect_call_wrapper.h>
144#include <net/devlink.h>
145
146#include "net-sysfs.h"
147
148#define MAX_GRO_SKBS 8
149
150
151#define GRO_MAX_HEAD (MAX_HEADER + 128)
152
153static DEFINE_SPINLOCK(ptype_lock);
154static DEFINE_SPINLOCK(offload_lock);
155struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
156struct list_head ptype_all __read_mostly;
157static struct list_head offload_base __read_mostly;
158
159static int netif_rx_internal(struct sk_buff *skb);
160static int call_netdevice_notifiers_info(unsigned long val,
161 struct netdev_notifier_info *info);
162static int call_netdevice_notifiers_extack(unsigned long val,
163 struct net_device *dev,
164 struct netlink_ext_ack *extack);
165static struct napi_struct *napi_by_id(unsigned int napi_id);
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186DEFINE_RWLOCK(dev_base_lock);
187EXPORT_SYMBOL(dev_base_lock);
188
189static DEFINE_MUTEX(ifalias_mutex);
190
191
192static DEFINE_SPINLOCK(napi_hash_lock);
193
194static unsigned int napi_gen_id = NR_CPUS;
195static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
196
197static seqcount_t devnet_rename_seq;
198
199static inline void dev_base_seq_inc(struct net *net)
200{
201 while (++net->dev_base_seq == 0)
202 ;
203}
204
205static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
206{
207 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
208
209 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
210}
211
212static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
213{
214 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
215}
216
217static inline void rps_lock(struct softnet_data *sd)
218{
219#ifdef CONFIG_RPS
220 spin_lock(&sd->input_pkt_queue.lock);
221#endif
222}
223
224static inline void rps_unlock(struct softnet_data *sd)
225{
226#ifdef CONFIG_RPS
227 spin_unlock(&sd->input_pkt_queue.lock);
228#endif
229}
230
231
232static void list_netdevice(struct net_device *dev)
233{
234 struct net *net = dev_net(dev);
235
236 ASSERT_RTNL();
237
238 write_lock_bh(&dev_base_lock);
239 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
240 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
241 hlist_add_head_rcu(&dev->index_hlist,
242 dev_index_hash(net, dev->ifindex));
243 write_unlock_bh(&dev_base_lock);
244
245 dev_base_seq_inc(net);
246}
247
248
249
250
251static void unlist_netdevice(struct net_device *dev)
252{
253 ASSERT_RTNL();
254
255
256 write_lock_bh(&dev_base_lock);
257 list_del_rcu(&dev->dev_list);
258 hlist_del_rcu(&dev->name_hlist);
259 hlist_del_rcu(&dev->index_hlist);
260 write_unlock_bh(&dev_base_lock);
261
262 dev_base_seq_inc(dev_net(dev));
263}
264
265
266
267
268
269static RAW_NOTIFIER_HEAD(netdev_chain);
270
271
272
273
274
275
276DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
277EXPORT_PER_CPU_SYMBOL(softnet_data);
278
279#ifdef CONFIG_LOCKDEP
280
281
282
283
284static const unsigned short netdev_lock_type[] = {
285 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
286 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
287 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
288 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
289 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
290 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
291 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
292 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
293 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
294 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
295 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
296 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
297 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
298 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
299 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
300
301static const char *const netdev_lock_name[] = {
302 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
303 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
304 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
305 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
306 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
307 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
308 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
309 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
310 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
311 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
312 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
313 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
314 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
315 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
316 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
317
318static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
319static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
320
321static inline unsigned short netdev_lock_pos(unsigned short dev_type)
322{
323 int i;
324
325 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
326 if (netdev_lock_type[i] == dev_type)
327 return i;
328
329 return ARRAY_SIZE(netdev_lock_type) - 1;
330}
331
332static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
333 unsigned short dev_type)
334{
335 int i;
336
337 i = netdev_lock_pos(dev_type);
338 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
339 netdev_lock_name[i]);
340}
341
342static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
343{
344 int i;
345
346 i = netdev_lock_pos(dev->type);
347 lockdep_set_class_and_name(&dev->addr_list_lock,
348 &netdev_addr_lock_key[i],
349 netdev_lock_name[i]);
350}
351#else
352static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
353 unsigned short dev_type)
354{
355}
356static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
357{
358}
359#endif
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384static inline struct list_head *ptype_head(const struct packet_type *pt)
385{
386 if (pt->type == htons(ETH_P_ALL))
387 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
388 else
389 return pt->dev ? &pt->dev->ptype_specific :
390 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406void dev_add_pack(struct packet_type *pt)
407{
408 struct list_head *head = ptype_head(pt);
409
410 spin_lock(&ptype_lock);
411 list_add_rcu(&pt->list, head);
412 spin_unlock(&ptype_lock);
413}
414EXPORT_SYMBOL(dev_add_pack);
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429void __dev_remove_pack(struct packet_type *pt)
430{
431 struct list_head *head = ptype_head(pt);
432 struct packet_type *pt1;
433
434 spin_lock(&ptype_lock);
435
436 list_for_each_entry(pt1, head, list) {
437 if (pt == pt1) {
438 list_del_rcu(&pt->list);
439 goto out;
440 }
441 }
442
443 pr_warn("dev_remove_pack: %p not found\n", pt);
444out:
445 spin_unlock(&ptype_lock);
446}
447EXPORT_SYMBOL(__dev_remove_pack);
448
449
450
451
452
453
454
455
456
457
458
459
460
461void dev_remove_pack(struct packet_type *pt)
462{
463 __dev_remove_pack(pt);
464
465 synchronize_net();
466}
467EXPORT_SYMBOL(dev_remove_pack);
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482void dev_add_offload(struct packet_offload *po)
483{
484 struct packet_offload *elem;
485
486 spin_lock(&offload_lock);
487 list_for_each_entry(elem, &offload_base, list) {
488 if (po->priority < elem->priority)
489 break;
490 }
491 list_add_rcu(&po->list, elem->list.prev);
492 spin_unlock(&offload_lock);
493}
494EXPORT_SYMBOL(dev_add_offload);
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509static void __dev_remove_offload(struct packet_offload *po)
510{
511 struct list_head *head = &offload_base;
512 struct packet_offload *po1;
513
514 spin_lock(&offload_lock);
515
516 list_for_each_entry(po1, head, list) {
517 if (po == po1) {
518 list_del_rcu(&po->list);
519 goto out;
520 }
521 }
522
523 pr_warn("dev_remove_offload: %p not found\n", po);
524out:
525 spin_unlock(&offload_lock);
526}
527
528
529
530
531
532
533
534
535
536
537
538
539
540void dev_remove_offload(struct packet_offload *po)
541{
542 __dev_remove_offload(po);
543
544 synchronize_net();
545}
546EXPORT_SYMBOL(dev_remove_offload);
547
548
549
550
551
552
553
554
555static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
556
557
558
559
560
561
562
563
564
565
566static int netdev_boot_setup_add(char *name, struct ifmap *map)
567{
568 struct netdev_boot_setup *s;
569 int i;
570
571 s = dev_boot_setup;
572 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
573 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
574 memset(s[i].name, 0, sizeof(s[i].name));
575 strlcpy(s[i].name, name, IFNAMSIZ);
576 memcpy(&s[i].map, map, sizeof(s[i].map));
577 break;
578 }
579 }
580
581 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
582}
583
584
585
586
587
588
589
590
591
592
593int netdev_boot_setup_check(struct net_device *dev)
594{
595 struct netdev_boot_setup *s = dev_boot_setup;
596 int i;
597
598 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
599 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
600 !strcmp(dev->name, s[i].name)) {
601 dev->irq = s[i].map.irq;
602 dev->base_addr = s[i].map.base_addr;
603 dev->mem_start = s[i].map.mem_start;
604 dev->mem_end = s[i].map.mem_end;
605 return 1;
606 }
607 }
608 return 0;
609}
610EXPORT_SYMBOL(netdev_boot_setup_check);
611
612
613
614
615
616
617
618
619
620
621
622
623unsigned long netdev_boot_base(const char *prefix, int unit)
624{
625 const struct netdev_boot_setup *s = dev_boot_setup;
626 char name[IFNAMSIZ];
627 int i;
628
629 sprintf(name, "%s%d", prefix, unit);
630
631
632
633
634
635 if (__dev_get_by_name(&init_net, name))
636 return 1;
637
638 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
639 if (!strcmp(name, s[i].name))
640 return s[i].map.base_addr;
641 return 0;
642}
643
644
645
646
647int __init netdev_boot_setup(char *str)
648{
649 int ints[5];
650 struct ifmap map;
651
652 str = get_options(str, ARRAY_SIZE(ints), ints);
653 if (!str || !*str)
654 return 0;
655
656
657 memset(&map, 0, sizeof(map));
658 if (ints[0] > 0)
659 map.irq = ints[1];
660 if (ints[0] > 1)
661 map.base_addr = ints[2];
662 if (ints[0] > 2)
663 map.mem_start = ints[3];
664 if (ints[0] > 3)
665 map.mem_end = ints[4];
666
667
668 return netdev_boot_setup_add(str, &map);
669}
670
671__setup("netdev=", netdev_boot_setup);
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687int dev_get_iflink(const struct net_device *dev)
688{
689 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
690 return dev->netdev_ops->ndo_get_iflink(dev);
691
692 return dev->ifindex;
693}
694EXPORT_SYMBOL(dev_get_iflink);
695
696
697
698
699
700
701
702
703
704
705int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
706{
707 struct ip_tunnel_info *info;
708
709 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
710 return -EINVAL;
711
712 info = skb_tunnel_info_unclone(skb);
713 if (!info)
714 return -ENOMEM;
715 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
716 return -EINVAL;
717
718 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
719}
720EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
721
722
723
724
725
726
727
728
729
730
731
732
733
734struct net_device *__dev_get_by_name(struct net *net, const char *name)
735{
736 struct net_device *dev;
737 struct hlist_head *head = dev_name_hash(net, name);
738
739 hlist_for_each_entry(dev, head, name_hlist)
740 if (!strncmp(dev->name, name, IFNAMSIZ))
741 return dev;
742
743 return NULL;
744}
745EXPORT_SYMBOL(__dev_get_by_name);
746
747
748
749
750
751
752
753
754
755
756
757
758
759struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
760{
761 struct net_device *dev;
762 struct hlist_head *head = dev_name_hash(net, name);
763
764 hlist_for_each_entry_rcu(dev, head, name_hlist)
765 if (!strncmp(dev->name, name, IFNAMSIZ))
766 return dev;
767
768 return NULL;
769}
770EXPORT_SYMBOL(dev_get_by_name_rcu);
771
772
773
774
775
776
777
778
779
780
781
782
783
784struct net_device *dev_get_by_name(struct net *net, const char *name)
785{
786 struct net_device *dev;
787
788 rcu_read_lock();
789 dev = dev_get_by_name_rcu(net, name);
790 if (dev)
791 dev_hold(dev);
792 rcu_read_unlock();
793 return dev;
794}
795EXPORT_SYMBOL(dev_get_by_name);
796
797
798
799
800
801
802
803
804
805
806
807
808
809struct net_device *__dev_get_by_index(struct net *net, int ifindex)
810{
811 struct net_device *dev;
812 struct hlist_head *head = dev_index_hash(net, ifindex);
813
814 hlist_for_each_entry(dev, head, index_hlist)
815 if (dev->ifindex == ifindex)
816 return dev;
817
818 return NULL;
819}
820EXPORT_SYMBOL(__dev_get_by_index);
821
822
823
824
825
826
827
828
829
830
831
832
833struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
834{
835 struct net_device *dev;
836 struct hlist_head *head = dev_index_hash(net, ifindex);
837
838 hlist_for_each_entry_rcu(dev, head, index_hlist)
839 if (dev->ifindex == ifindex)
840 return dev;
841
842 return NULL;
843}
844EXPORT_SYMBOL(dev_get_by_index_rcu);
845
846
847
848
849
850
851
852
853
854
855
856
857
858struct net_device *dev_get_by_index(struct net *net, int ifindex)
859{
860 struct net_device *dev;
861
862 rcu_read_lock();
863 dev = dev_get_by_index_rcu(net, ifindex);
864 if (dev)
865 dev_hold(dev);
866 rcu_read_unlock();
867 return dev;
868}
869EXPORT_SYMBOL(dev_get_by_index);
870
871
872
873
874
875
876
877
878
879
880
881struct net_device *dev_get_by_napi_id(unsigned int napi_id)
882{
883 struct napi_struct *napi;
884
885 WARN_ON_ONCE(!rcu_read_lock_held());
886
887 if (napi_id < MIN_NAPI_ID)
888 return NULL;
889
890 napi = napi_by_id(napi_id);
891
892 return napi ? napi->dev : NULL;
893}
894EXPORT_SYMBOL(dev_get_by_napi_id);
895
896
897
898
899
900
901
902
903
904
905
906int netdev_get_name(struct net *net, char *name, int ifindex)
907{
908 struct net_device *dev;
909 unsigned int seq;
910
911retry:
912 seq = raw_seqcount_begin(&devnet_rename_seq);
913 rcu_read_lock();
914 dev = dev_get_by_index_rcu(net, ifindex);
915 if (!dev) {
916 rcu_read_unlock();
917 return -ENODEV;
918 }
919
920 strcpy(name, dev->name);
921 rcu_read_unlock();
922 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
923 cond_resched();
924 goto retry;
925 }
926
927 return 0;
928}
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
945 const char *ha)
946{
947 struct net_device *dev;
948
949 for_each_netdev_rcu(net, dev)
950 if (dev->type == type &&
951 !memcmp(dev->dev_addr, ha, dev->addr_len))
952 return dev;
953
954 return NULL;
955}
956EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
957
958struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
959{
960 struct net_device *dev;
961
962 ASSERT_RTNL();
963 for_each_netdev(net, dev)
964 if (dev->type == type)
965 return dev;
966
967 return NULL;
968}
969EXPORT_SYMBOL(__dev_getfirstbyhwtype);
970
971struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
972{
973 struct net_device *dev, *ret = NULL;
974
975 rcu_read_lock();
976 for_each_netdev_rcu(net, dev)
977 if (dev->type == type) {
978 dev_hold(dev);
979 ret = dev;
980 break;
981 }
982 rcu_read_unlock();
983 return ret;
984}
985EXPORT_SYMBOL(dev_getfirstbyhwtype);
986
987
988
989
990
991
992
993
994
995
996
997
998struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
999 unsigned short mask)
1000{
1001 struct net_device *dev, *ret;
1002
1003 ASSERT_RTNL();
1004
1005 ret = NULL;
1006 for_each_netdev(net, dev) {
1007 if (((dev->flags ^ if_flags) & mask) == 0) {
1008 ret = dev;
1009 break;
1010 }
1011 }
1012 return ret;
1013}
1014EXPORT_SYMBOL(__dev_get_by_flags);
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024bool dev_valid_name(const char *name)
1025{
1026 if (*name == '\0')
1027 return false;
1028 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1029 return false;
1030 if (!strcmp(name, ".") || !strcmp(name, ".."))
1031 return false;
1032
1033 while (*name) {
1034 if (*name == '/' || *name == ':' || isspace(*name))
1035 return false;
1036 name++;
1037 }
1038 return true;
1039}
1040EXPORT_SYMBOL(dev_valid_name);
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1058{
1059 int i = 0;
1060 const char *p;
1061 const int max_netdevices = 8*PAGE_SIZE;
1062 unsigned long *inuse;
1063 struct net_device *d;
1064
1065 if (!dev_valid_name(name))
1066 return -EINVAL;
1067
1068 p = strchr(name, '%');
1069 if (p) {
1070
1071
1072
1073
1074
1075 if (p[1] != 'd' || strchr(p + 2, '%'))
1076 return -EINVAL;
1077
1078
1079 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1080 if (!inuse)
1081 return -ENOMEM;
1082
1083 for_each_netdev(net, d) {
1084 if (!sscanf(d->name, name, &i))
1085 continue;
1086 if (i < 0 || i >= max_netdevices)
1087 continue;
1088
1089
1090 snprintf(buf, IFNAMSIZ, name, i);
1091 if (!strncmp(buf, d->name, IFNAMSIZ))
1092 set_bit(i, inuse);
1093 }
1094
1095 i = find_first_zero_bit(inuse, max_netdevices);
1096 free_page((unsigned long) inuse);
1097 }
1098
1099 snprintf(buf, IFNAMSIZ, name, i);
1100 if (!__dev_get_by_name(net, buf))
1101 return i;
1102
1103
1104
1105
1106
1107 return -ENFILE;
1108}
1109
1110static int dev_alloc_name_ns(struct net *net,
1111 struct net_device *dev,
1112 const char *name)
1113{
1114 char buf[IFNAMSIZ];
1115 int ret;
1116
1117 BUG_ON(!net);
1118 ret = __dev_alloc_name(net, name, buf);
1119 if (ret >= 0)
1120 strlcpy(dev->name, buf, IFNAMSIZ);
1121 return ret;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138int dev_alloc_name(struct net_device *dev, const char *name)
1139{
1140 return dev_alloc_name_ns(dev_net(dev), dev, name);
1141}
1142EXPORT_SYMBOL(dev_alloc_name);
1143
1144int dev_get_valid_name(struct net *net, struct net_device *dev,
1145 const char *name)
1146{
1147 BUG_ON(!net);
1148
1149 if (!dev_valid_name(name))
1150 return -EINVAL;
1151
1152 if (strchr(name, '%'))
1153 return dev_alloc_name_ns(net, dev, name);
1154 else if (__dev_get_by_name(net, name))
1155 return -EEXIST;
1156 else if (dev->name != name)
1157 strlcpy(dev->name, name, IFNAMSIZ);
1158
1159 return 0;
1160}
1161EXPORT_SYMBOL(dev_get_valid_name);
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171int dev_change_name(struct net_device *dev, const char *newname)
1172{
1173 unsigned char old_assign_type;
1174 char oldname[IFNAMSIZ];
1175 int err = 0;
1176 int ret;
1177 struct net *net;
1178
1179 ASSERT_RTNL();
1180 BUG_ON(!dev_net(dev));
1181
1182 net = dev_net(dev);
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 if (dev->flags & IFF_UP &&
1197 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1198 return -EBUSY;
1199
1200 write_seqcount_begin(&devnet_rename_seq);
1201
1202 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1203 write_seqcount_end(&devnet_rename_seq);
1204 return 0;
1205 }
1206
1207 memcpy(oldname, dev->name, IFNAMSIZ);
1208
1209 err = dev_get_valid_name(net, dev, newname);
1210 if (err < 0) {
1211 write_seqcount_end(&devnet_rename_seq);
1212 return err;
1213 }
1214
1215 if (oldname[0] && !strchr(oldname, '%'))
1216 netdev_info(dev, "renamed from %s\n", oldname);
1217
1218 old_assign_type = dev->name_assign_type;
1219 dev->name_assign_type = NET_NAME_RENAMED;
1220
1221rollback:
1222 ret = device_rename(&dev->dev, dev->name);
1223 if (ret) {
1224 memcpy(dev->name, oldname, IFNAMSIZ);
1225 dev->name_assign_type = old_assign_type;
1226 write_seqcount_end(&devnet_rename_seq);
1227 return ret;
1228 }
1229
1230 write_seqcount_end(&devnet_rename_seq);
1231
1232 netdev_adjacent_rename_links(dev, oldname);
1233
1234 write_lock_bh(&dev_base_lock);
1235 hlist_del_rcu(&dev->name_hlist);
1236 write_unlock_bh(&dev_base_lock);
1237
1238 synchronize_rcu();
1239
1240 write_lock_bh(&dev_base_lock);
1241 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1242 write_unlock_bh(&dev_base_lock);
1243
1244 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1245 ret = notifier_to_errno(ret);
1246
1247 if (ret) {
1248
1249 if (err >= 0) {
1250 err = ret;
1251 write_seqcount_begin(&devnet_rename_seq);
1252 memcpy(dev->name, oldname, IFNAMSIZ);
1253 memcpy(oldname, newname, IFNAMSIZ);
1254 dev->name_assign_type = old_assign_type;
1255 old_assign_type = NET_NAME_RENAMED;
1256 goto rollback;
1257 } else {
1258 pr_err("%s: name change rollback failed: %d\n",
1259 dev->name, ret);
1260 }
1261 }
1262
1263 return err;
1264}
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1275{
1276 struct dev_ifalias *new_alias = NULL;
1277
1278 if (len >= IFALIASZ)
1279 return -EINVAL;
1280
1281 if (len) {
1282 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1283 if (!new_alias)
1284 return -ENOMEM;
1285
1286 memcpy(new_alias->ifalias, alias, len);
1287 new_alias->ifalias[len] = 0;
1288 }
1289
1290 mutex_lock(&ifalias_mutex);
1291 rcu_swap_protected(dev->ifalias, new_alias,
1292 mutex_is_locked(&ifalias_mutex));
1293 mutex_unlock(&ifalias_mutex);
1294
1295 if (new_alias)
1296 kfree_rcu(new_alias, rcuhead);
1297
1298 return len;
1299}
1300EXPORT_SYMBOL(dev_set_alias);
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1312{
1313 const struct dev_ifalias *alias;
1314 int ret = 0;
1315
1316 rcu_read_lock();
1317 alias = rcu_dereference(dev->ifalias);
1318 if (alias)
1319 ret = snprintf(name, len, "%s", alias->ifalias);
1320 rcu_read_unlock();
1321
1322 return ret;
1323}
1324
1325
1326
1327
1328
1329
1330
1331void netdev_features_change(struct net_device *dev)
1332{
1333 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1334}
1335EXPORT_SYMBOL(netdev_features_change);
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345void netdev_state_change(struct net_device *dev)
1346{
1347 if (dev->flags & IFF_UP) {
1348 struct netdev_notifier_change_info change_info = {
1349 .info.dev = dev,
1350 };
1351
1352 call_netdevice_notifiers_info(NETDEV_CHANGE,
1353 &change_info.info);
1354 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1355 }
1356}
1357EXPORT_SYMBOL(netdev_state_change);
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369void netdev_notify_peers(struct net_device *dev)
1370{
1371 rtnl_lock();
1372 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1373 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1374 rtnl_unlock();
1375}
1376EXPORT_SYMBOL(netdev_notify_peers);
1377
1378static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1379{
1380 const struct net_device_ops *ops = dev->netdev_ops;
1381 int ret;
1382
1383 ASSERT_RTNL();
1384
1385 if (!netif_device_present(dev))
1386 return -ENODEV;
1387
1388
1389
1390
1391
1392 netpoll_poll_disable(dev);
1393
1394 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1395 ret = notifier_to_errno(ret);
1396 if (ret)
1397 return ret;
1398
1399 set_bit(__LINK_STATE_START, &dev->state);
1400
1401 if (ops->ndo_validate_addr)
1402 ret = ops->ndo_validate_addr(dev);
1403
1404 if (!ret && ops->ndo_open)
1405 ret = ops->ndo_open(dev);
1406
1407 netpoll_poll_enable(dev);
1408
1409 if (ret)
1410 clear_bit(__LINK_STATE_START, &dev->state);
1411 else {
1412 dev->flags |= IFF_UP;
1413 dev_set_rx_mode(dev);
1414 dev_activate(dev);
1415 add_device_randomness(dev->dev_addr, dev->addr_len);
1416 }
1417
1418 return ret;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1435{
1436 int ret;
1437
1438 if (dev->flags & IFF_UP)
1439 return 0;
1440
1441 ret = __dev_open(dev, extack);
1442 if (ret < 0)
1443 return ret;
1444
1445 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1446 call_netdevice_notifiers(NETDEV_UP, dev);
1447
1448 return ret;
1449}
1450EXPORT_SYMBOL(dev_open);
1451
1452static void __dev_close_many(struct list_head *head)
1453{
1454 struct net_device *dev;
1455
1456 ASSERT_RTNL();
1457 might_sleep();
1458
1459 list_for_each_entry(dev, head, close_list) {
1460
1461 netpoll_poll_disable(dev);
1462
1463 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1464
1465 clear_bit(__LINK_STATE_START, &dev->state);
1466
1467
1468
1469
1470
1471
1472
1473 smp_mb__after_atomic();
1474 }
1475
1476 dev_deactivate_many(head);
1477
1478 list_for_each_entry(dev, head, close_list) {
1479 const struct net_device_ops *ops = dev->netdev_ops;
1480
1481
1482
1483
1484
1485
1486
1487
1488 if (ops->ndo_stop)
1489 ops->ndo_stop(dev);
1490
1491 dev->flags &= ~IFF_UP;
1492 netpoll_poll_enable(dev);
1493 }
1494}
1495
1496static void __dev_close(struct net_device *dev)
1497{
1498 LIST_HEAD(single);
1499
1500 list_add(&dev->close_list, &single);
1501 __dev_close_many(&single);
1502 list_del(&single);
1503}
1504
1505void dev_close_many(struct list_head *head, bool unlink)
1506{
1507 struct net_device *dev, *tmp;
1508
1509
1510 list_for_each_entry_safe(dev, tmp, head, close_list)
1511 if (!(dev->flags & IFF_UP))
1512 list_del_init(&dev->close_list);
1513
1514 __dev_close_many(head);
1515
1516 list_for_each_entry_safe(dev, tmp, head, close_list) {
1517 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1518 call_netdevice_notifiers(NETDEV_DOWN, dev);
1519 if (unlink)
1520 list_del_init(&dev->close_list);
1521 }
1522}
1523EXPORT_SYMBOL(dev_close_many);
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534void dev_close(struct net_device *dev)
1535{
1536 if (dev->flags & IFF_UP) {
1537 LIST_HEAD(single);
1538
1539 list_add(&dev->close_list, &single);
1540 dev_close_many(&single, true);
1541 list_del(&single);
1542 }
1543}
1544EXPORT_SYMBOL(dev_close);
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555void dev_disable_lro(struct net_device *dev)
1556{
1557 struct net_device *lower_dev;
1558 struct list_head *iter;
1559
1560 dev->wanted_features &= ~NETIF_F_LRO;
1561 netdev_update_features(dev);
1562
1563 if (unlikely(dev->features & NETIF_F_LRO))
1564 netdev_WARN(dev, "failed to disable LRO!\n");
1565
1566 netdev_for_each_lower_dev(dev, lower_dev, iter)
1567 dev_disable_lro(lower_dev);
1568}
1569EXPORT_SYMBOL(dev_disable_lro);
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static void dev_disable_gro_hw(struct net_device *dev)
1580{
1581 dev->wanted_features &= ~NETIF_F_GRO_HW;
1582 netdev_update_features(dev);
1583
1584 if (unlikely(dev->features & NETIF_F_GRO_HW))
1585 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1586}
1587
1588const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1589{
1590#define N(val) \
1591 case NETDEV_##val: \
1592 return "NETDEV_" __stringify(val);
1593 switch (cmd) {
1594 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1595 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1596 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1597 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1598 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1599 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1600 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1601 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1602 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1603 N(PRE_CHANGEADDR)
1604 }
1605#undef N
1606 return "UNKNOWN_NETDEV_EVENT";
1607}
1608EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1609
1610static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1611 struct net_device *dev)
1612{
1613 struct netdev_notifier_info info = {
1614 .dev = dev,
1615 };
1616
1617 return nb->notifier_call(nb, val, &info);
1618}
1619
1620static int dev_boot_phase = 1;
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636int register_netdevice_notifier(struct notifier_block *nb)
1637{
1638 struct net_device *dev;
1639 struct net_device *last;
1640 struct net *net;
1641 int err;
1642
1643
1644 down_write(&pernet_ops_rwsem);
1645 rtnl_lock();
1646 err = raw_notifier_chain_register(&netdev_chain, nb);
1647 if (err)
1648 goto unlock;
1649 if (dev_boot_phase)
1650 goto unlock;
1651 for_each_net(net) {
1652 for_each_netdev(net, dev) {
1653 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1654 err = notifier_to_errno(err);
1655 if (err)
1656 goto rollback;
1657
1658 if (!(dev->flags & IFF_UP))
1659 continue;
1660
1661 call_netdevice_notifier(nb, NETDEV_UP, dev);
1662 }
1663 }
1664
1665unlock:
1666 rtnl_unlock();
1667 up_write(&pernet_ops_rwsem);
1668 return err;
1669
1670rollback:
1671 last = dev;
1672 for_each_net(net) {
1673 for_each_netdev(net, dev) {
1674 if (dev == last)
1675 goto outroll;
1676
1677 if (dev->flags & IFF_UP) {
1678 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1679 dev);
1680 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1681 }
1682 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1683 }
1684 }
1685
1686outroll:
1687 raw_notifier_chain_unregister(&netdev_chain, nb);
1688 goto unlock;
1689}
1690EXPORT_SYMBOL(register_netdevice_notifier);
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706int unregister_netdevice_notifier(struct notifier_block *nb)
1707{
1708 struct net_device *dev;
1709 struct net *net;
1710 int err;
1711
1712
1713 down_write(&pernet_ops_rwsem);
1714 rtnl_lock();
1715 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1716 if (err)
1717 goto unlock;
1718
1719 for_each_net(net) {
1720 for_each_netdev(net, dev) {
1721 if (dev->flags & IFF_UP) {
1722 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1723 dev);
1724 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1725 }
1726 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1727 }
1728 }
1729unlock:
1730 rtnl_unlock();
1731 up_write(&pernet_ops_rwsem);
1732 return err;
1733}
1734EXPORT_SYMBOL(unregister_netdevice_notifier);
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745static int call_netdevice_notifiers_info(unsigned long val,
1746 struct netdev_notifier_info *info)
1747{
1748 ASSERT_RTNL();
1749 return raw_notifier_call_chain(&netdev_chain, val, info);
1750}
1751
1752static int call_netdevice_notifiers_extack(unsigned long val,
1753 struct net_device *dev,
1754 struct netlink_ext_ack *extack)
1755{
1756 struct netdev_notifier_info info = {
1757 .dev = dev,
1758 .extack = extack,
1759 };
1760
1761 return call_netdevice_notifiers_info(val, &info);
1762}
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1774{
1775 return call_netdevice_notifiers_extack(val, dev, NULL);
1776}
1777EXPORT_SYMBOL(call_netdevice_notifiers);
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788static int call_netdevice_notifiers_mtu(unsigned long val,
1789 struct net_device *dev, u32 arg)
1790{
1791 struct netdev_notifier_info_ext info = {
1792 .info.dev = dev,
1793 .ext.mtu = arg,
1794 };
1795
1796 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
1797
1798 return call_netdevice_notifiers_info(val, &info.info);
1799}
1800
1801#ifdef CONFIG_NET_INGRESS
1802static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
1803
1804void net_inc_ingress_queue(void)
1805{
1806 static_branch_inc(&ingress_needed_key);
1807}
1808EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1809
1810void net_dec_ingress_queue(void)
1811{
1812 static_branch_dec(&ingress_needed_key);
1813}
1814EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1815#endif
1816
1817#ifdef CONFIG_NET_EGRESS
1818static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
1819
1820void net_inc_egress_queue(void)
1821{
1822 static_branch_inc(&egress_needed_key);
1823}
1824EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1825
1826void net_dec_egress_queue(void)
1827{
1828 static_branch_dec(&egress_needed_key);
1829}
1830EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1831#endif
1832
1833static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
1834#ifdef CONFIG_JUMP_LABEL
1835static atomic_t netstamp_needed_deferred;
1836static atomic_t netstamp_wanted;
1837static void netstamp_clear(struct work_struct *work)
1838{
1839 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1840 int wanted;
1841
1842 wanted = atomic_add_return(deferred, &netstamp_wanted);
1843 if (wanted > 0)
1844 static_branch_enable(&netstamp_needed_key);
1845 else
1846 static_branch_disable(&netstamp_needed_key);
1847}
1848static DECLARE_WORK(netstamp_work, netstamp_clear);
1849#endif
1850
1851void net_enable_timestamp(void)
1852{
1853#ifdef CONFIG_JUMP_LABEL
1854 int wanted;
1855
1856 while (1) {
1857 wanted = atomic_read(&netstamp_wanted);
1858 if (wanted <= 0)
1859 break;
1860 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1861 return;
1862 }
1863 atomic_inc(&netstamp_needed_deferred);
1864 schedule_work(&netstamp_work);
1865#else
1866 static_branch_inc(&netstamp_needed_key);
1867#endif
1868}
1869EXPORT_SYMBOL(net_enable_timestamp);
1870
1871void net_disable_timestamp(void)
1872{
1873#ifdef CONFIG_JUMP_LABEL
1874 int wanted;
1875
1876 while (1) {
1877 wanted = atomic_read(&netstamp_wanted);
1878 if (wanted <= 1)
1879 break;
1880 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1881 return;
1882 }
1883 atomic_dec(&netstamp_needed_deferred);
1884 schedule_work(&netstamp_work);
1885#else
1886 static_branch_dec(&netstamp_needed_key);
1887#endif
1888}
1889EXPORT_SYMBOL(net_disable_timestamp);
1890
1891static inline void net_timestamp_set(struct sk_buff *skb)
1892{
1893 skb->tstamp = 0;
1894 if (static_branch_unlikely(&netstamp_needed_key))
1895 __net_timestamp(skb);
1896}
1897
1898#define net_timestamp_check(COND, SKB) \
1899 if (static_branch_unlikely(&netstamp_needed_key)) { \
1900 if ((COND) && !(SKB)->tstamp) \
1901 __net_timestamp(SKB); \
1902 } \
1903
1904bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1905{
1906 unsigned int len;
1907
1908 if (!(dev->flags & IFF_UP))
1909 return false;
1910
1911 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1912 if (skb->len <= len)
1913 return true;
1914
1915
1916
1917
1918 if (skb_is_gso(skb))
1919 return true;
1920
1921 return false;
1922}
1923EXPORT_SYMBOL_GPL(is_skb_forwardable);
1924
1925int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1926{
1927 int ret = ____dev_forward_skb(dev, skb);
1928
1929 if (likely(!ret)) {
1930 skb->protocol = eth_type_trans(skb, dev);
1931 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1932 }
1933
1934 return ret;
1935}
1936EXPORT_SYMBOL_GPL(__dev_forward_skb);
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1957{
1958 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1959}
1960EXPORT_SYMBOL_GPL(dev_forward_skb);
1961
1962static inline int deliver_skb(struct sk_buff *skb,
1963 struct packet_type *pt_prev,
1964 struct net_device *orig_dev)
1965{
1966 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1967 return -ENOMEM;
1968 refcount_inc(&skb->users);
1969 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1970}
1971
1972static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1973 struct packet_type **pt,
1974 struct net_device *orig_dev,
1975 __be16 type,
1976 struct list_head *ptype_list)
1977{
1978 struct packet_type *ptype, *pt_prev = *pt;
1979
1980 list_for_each_entry_rcu(ptype, ptype_list, list) {
1981 if (ptype->type != type)
1982 continue;
1983 if (pt_prev)
1984 deliver_skb(skb, pt_prev, orig_dev);
1985 pt_prev = ptype;
1986 }
1987 *pt = pt_prev;
1988}
1989
1990static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1991{
1992 if (!ptype->af_packet_priv || !skb->sk)
1993 return false;
1994
1995 if (ptype->id_match)
1996 return ptype->id_match(ptype, skb->sk);
1997 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1998 return true;
1999
2000 return false;
2001}
2002
2003
2004
2005
2006
2007
2008bool dev_nit_active(struct net_device *dev)
2009{
2010 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2011}
2012EXPORT_SYMBOL_GPL(dev_nit_active);
2013
2014
2015
2016
2017
2018
2019void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2020{
2021 struct packet_type *ptype;
2022 struct sk_buff *skb2 = NULL;
2023 struct packet_type *pt_prev = NULL;
2024 struct list_head *ptype_list = &ptype_all;
2025
2026 rcu_read_lock();
2027again:
2028 list_for_each_entry_rcu(ptype, ptype_list, list) {
2029 if (ptype->ignore_outgoing)
2030 continue;
2031
2032
2033
2034
2035 if (skb_loop_sk(ptype, skb))
2036 continue;
2037
2038 if (pt_prev) {
2039 deliver_skb(skb2, pt_prev, skb->dev);
2040 pt_prev = ptype;
2041 continue;
2042 }
2043
2044
2045 skb2 = skb_clone(skb, GFP_ATOMIC);
2046 if (!skb2)
2047 goto out_unlock;
2048
2049 net_timestamp_set(skb2);
2050
2051
2052
2053
2054
2055 skb_reset_mac_header(skb2);
2056
2057 if (skb_network_header(skb2) < skb2->data ||
2058 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2059 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2060 ntohs(skb2->protocol),
2061 dev->name);
2062 skb_reset_network_header(skb2);
2063 }
2064
2065 skb2->transport_header = skb2->network_header;
2066 skb2->pkt_type = PACKET_OUTGOING;
2067 pt_prev = ptype;
2068 }
2069
2070 if (ptype_list == &ptype_all) {
2071 ptype_list = &dev->ptype_all;
2072 goto again;
2073 }
2074out_unlock:
2075 if (pt_prev) {
2076 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2077 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2078 else
2079 kfree_skb(skb2);
2080 }
2081 rcu_read_unlock();
2082}
2083EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2099{
2100 int i;
2101 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2102
2103
2104 if (tc->offset + tc->count > txq) {
2105 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2106 dev->num_tc = 0;
2107 return;
2108 }
2109
2110
2111 for (i = 1; i < TC_BITMASK + 1; i++) {
2112 int q = netdev_get_prio_tc_map(dev, i);
2113
2114 tc = &dev->tc_to_txq[q];
2115 if (tc->offset + tc->count > txq) {
2116 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2117 i, q);
2118 netdev_set_prio_tc_map(dev, i, 0);
2119 }
2120 }
2121}
2122
2123int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2124{
2125 if (dev->num_tc) {
2126 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2127 int i;
2128
2129
2130 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2131 if ((txq - tc->offset) < tc->count)
2132 return i;
2133 }
2134
2135
2136 return -1;
2137 }
2138
2139 return 0;
2140}
2141EXPORT_SYMBOL(netdev_txq_to_tc);
2142
2143#ifdef CONFIG_XPS
2144struct static_key xps_needed __read_mostly;
2145EXPORT_SYMBOL(xps_needed);
2146struct static_key xps_rxqs_needed __read_mostly;
2147EXPORT_SYMBOL(xps_rxqs_needed);
2148static DEFINE_MUTEX(xps_map_mutex);
2149#define xmap_dereference(P) \
2150 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2151
2152static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2153 int tci, u16 index)
2154{
2155 struct xps_map *map = NULL;
2156 int pos;
2157
2158 if (dev_maps)
2159 map = xmap_dereference(dev_maps->attr_map[tci]);
2160 if (!map)
2161 return false;
2162
2163 for (pos = map->len; pos--;) {
2164 if (map->queues[pos] != index)
2165 continue;
2166
2167 if (map->len > 1) {
2168 map->queues[pos] = map->queues[--map->len];
2169 break;
2170 }
2171
2172 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2173 kfree_rcu(map, rcu);
2174 return false;
2175 }
2176
2177 return true;
2178}
2179
2180static bool remove_xps_queue_cpu(struct net_device *dev,
2181 struct xps_dev_maps *dev_maps,
2182 int cpu, u16 offset, u16 count)
2183{
2184 int num_tc = dev->num_tc ? : 1;
2185 bool active = false;
2186 int tci;
2187
2188 for (tci = cpu * num_tc; num_tc--; tci++) {
2189 int i, j;
2190
2191 for (i = count, j = offset; i--; j++) {
2192 if (!remove_xps_queue(dev_maps, tci, j))
2193 break;
2194 }
2195
2196 active |= i < 0;
2197 }
2198
2199 return active;
2200}
2201
2202static void reset_xps_maps(struct net_device *dev,
2203 struct xps_dev_maps *dev_maps,
2204 bool is_rxqs_map)
2205{
2206 if (is_rxqs_map) {
2207 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2208 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2209 } else {
2210 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2211 }
2212 static_key_slow_dec_cpuslocked(&xps_needed);
2213 kfree_rcu(dev_maps, rcu);
2214}
2215
2216static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2217 struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2218 u16 offset, u16 count, bool is_rxqs_map)
2219{
2220 bool active = false;
2221 int i, j;
2222
2223 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2224 j < nr_ids;)
2225 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2226 count);
2227 if (!active)
2228 reset_xps_maps(dev, dev_maps, is_rxqs_map);
2229
2230 if (!is_rxqs_map) {
2231 for (i = offset + (count - 1); count--; i--) {
2232 netdev_queue_numa_node_write(
2233 netdev_get_tx_queue(dev, i),
2234 NUMA_NO_NODE);
2235 }
2236 }
2237}
2238
2239static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2240 u16 count)
2241{
2242 const unsigned long *possible_mask = NULL;
2243 struct xps_dev_maps *dev_maps;
2244 unsigned int nr_ids;
2245
2246 if (!static_key_false(&xps_needed))
2247 return;
2248
2249 cpus_read_lock();
2250 mutex_lock(&xps_map_mutex);
2251
2252 if (static_key_false(&xps_rxqs_needed)) {
2253 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2254 if (dev_maps) {
2255 nr_ids = dev->num_rx_queues;
2256 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2257 offset, count, true);
2258 }
2259 }
2260
2261 dev_maps = xmap_dereference(dev->xps_cpus_map);
2262 if (!dev_maps)
2263 goto out_no_maps;
2264
2265 if (num_possible_cpus() > 1)
2266 possible_mask = cpumask_bits(cpu_possible_mask);
2267 nr_ids = nr_cpu_ids;
2268 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2269 false);
2270
2271out_no_maps:
2272 mutex_unlock(&xps_map_mutex);
2273 cpus_read_unlock();
2274}
2275
2276static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2277{
2278 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2279}
2280
2281static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2282 u16 index, bool is_rxqs_map)
2283{
2284 struct xps_map *new_map;
2285 int alloc_len = XPS_MIN_MAP_ALLOC;
2286 int i, pos;
2287
2288 for (pos = 0; map && pos < map->len; pos++) {
2289 if (map->queues[pos] != index)
2290 continue;
2291 return map;
2292 }
2293
2294
2295 if (map) {
2296 if (pos < map->alloc_len)
2297 return map;
2298
2299 alloc_len = map->alloc_len * 2;
2300 }
2301
2302
2303
2304
2305 if (is_rxqs_map)
2306 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2307 else
2308 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2309 cpu_to_node(attr_index));
2310 if (!new_map)
2311 return NULL;
2312
2313 for (i = 0; i < pos; i++)
2314 new_map->queues[i] = map->queues[i];
2315 new_map->alloc_len = alloc_len;
2316 new_map->len = pos;
2317
2318 return new_map;
2319}
2320
2321
2322int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2323 u16 index, bool is_rxqs_map)
2324{
2325 const unsigned long *online_mask = NULL, *possible_mask = NULL;
2326 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2327 int i, j, tci, numa_node_id = -2;
2328 int maps_sz, num_tc = 1, tc = 0;
2329 struct xps_map *map, *new_map;
2330 bool active = false;
2331 unsigned int nr_ids;
2332
2333 if (dev->num_tc) {
2334
2335 num_tc = dev->num_tc;
2336 if (num_tc < 0)
2337 return -EINVAL;
2338
2339
2340 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2341
2342 tc = netdev_txq_to_tc(dev, index);
2343 if (tc < 0)
2344 return -EINVAL;
2345 }
2346
2347 mutex_lock(&xps_map_mutex);
2348 if (is_rxqs_map) {
2349 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2350 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2351 nr_ids = dev->num_rx_queues;
2352 } else {
2353 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2354 if (num_possible_cpus() > 1) {
2355 online_mask = cpumask_bits(cpu_online_mask);
2356 possible_mask = cpumask_bits(cpu_possible_mask);
2357 }
2358 dev_maps = xmap_dereference(dev->xps_cpus_map);
2359 nr_ids = nr_cpu_ids;
2360 }
2361
2362 if (maps_sz < L1_CACHE_BYTES)
2363 maps_sz = L1_CACHE_BYTES;
2364
2365
2366 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2367 j < nr_ids;) {
2368 if (!new_dev_maps)
2369 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2370 if (!new_dev_maps) {
2371 mutex_unlock(&xps_map_mutex);
2372 return -ENOMEM;
2373 }
2374
2375 tci = j * num_tc + tc;
2376 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
2377 NULL;
2378
2379 map = expand_xps_map(map, j, index, is_rxqs_map);
2380 if (!map)
2381 goto error;
2382
2383 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2384 }
2385
2386 if (!new_dev_maps)
2387 goto out_no_new_maps;
2388
2389 if (!dev_maps) {
2390
2391 static_key_slow_inc_cpuslocked(&xps_needed);
2392 if (is_rxqs_map)
2393 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2394 }
2395
2396 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2397 j < nr_ids;) {
2398
2399 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
2400
2401 map = xmap_dereference(dev_maps->attr_map[tci]);
2402 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2403 }
2404
2405
2406
2407
2408 tci = j * num_tc + tc;
2409
2410 if (netif_attr_test_mask(j, mask, nr_ids) &&
2411 netif_attr_test_online(j, online_mask, nr_ids)) {
2412
2413 int pos = 0;
2414
2415 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2416 while ((pos < map->len) && (map->queues[pos] != index))
2417 pos++;
2418
2419 if (pos == map->len)
2420 map->queues[map->len++] = index;
2421#ifdef CONFIG_NUMA
2422 if (!is_rxqs_map) {
2423 if (numa_node_id == -2)
2424 numa_node_id = cpu_to_node(j);
2425 else if (numa_node_id != cpu_to_node(j))
2426 numa_node_id = -1;
2427 }
2428#endif
2429 } else if (dev_maps) {
2430
2431 map = xmap_dereference(dev_maps->attr_map[tci]);
2432 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2433 }
2434
2435
2436 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2437
2438 map = xmap_dereference(dev_maps->attr_map[tci]);
2439 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2440 }
2441 }
2442
2443 if (is_rxqs_map)
2444 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2445 else
2446 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
2447
2448
2449 if (!dev_maps)
2450 goto out_no_old_maps;
2451
2452 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2453 j < nr_ids;) {
2454 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2455 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2456 map = xmap_dereference(dev_maps->attr_map[tci]);
2457 if (map && map != new_map)
2458 kfree_rcu(map, rcu);
2459 }
2460 }
2461
2462 kfree_rcu(dev_maps, rcu);
2463
2464out_no_old_maps:
2465 dev_maps = new_dev_maps;
2466 active = true;
2467
2468out_no_new_maps:
2469 if (!is_rxqs_map) {
2470
2471 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2472 (numa_node_id >= 0) ?
2473 numa_node_id : NUMA_NO_NODE);
2474 }
2475
2476 if (!dev_maps)
2477 goto out_no_maps;
2478
2479
2480 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2481 j < nr_ids;) {
2482 for (i = tc, tci = j * num_tc; i--; tci++)
2483 active |= remove_xps_queue(dev_maps, tci, index);
2484 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2485 !netif_attr_test_online(j, online_mask, nr_ids))
2486 active |= remove_xps_queue(dev_maps, tci, index);
2487 for (i = num_tc - tc, tci++; --i; tci++)
2488 active |= remove_xps_queue(dev_maps, tci, index);
2489 }
2490
2491
2492 if (!active)
2493 reset_xps_maps(dev, dev_maps, is_rxqs_map);
2494
2495out_no_maps:
2496 mutex_unlock(&xps_map_mutex);
2497
2498 return 0;
2499error:
2500
2501 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2502 j < nr_ids;) {
2503 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2504 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2505 map = dev_maps ?
2506 xmap_dereference(dev_maps->attr_map[tci]) :
2507 NULL;
2508 if (new_map && new_map != map)
2509 kfree(new_map);
2510 }
2511 }
2512
2513 mutex_unlock(&xps_map_mutex);
2514
2515 kfree(new_dev_maps);
2516 return -ENOMEM;
2517}
2518EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2519
2520int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2521 u16 index)
2522{
2523 int ret;
2524
2525 cpus_read_lock();
2526 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2527 cpus_read_unlock();
2528
2529 return ret;
2530}
2531EXPORT_SYMBOL(netif_set_xps_queue);
2532
2533#endif
2534static void netdev_unbind_all_sb_channels(struct net_device *dev)
2535{
2536 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2537
2538
2539 while (txq-- != &dev->_tx[0]) {
2540 if (txq->sb_dev)
2541 netdev_unbind_sb_channel(dev, txq->sb_dev);
2542 }
2543}
2544
2545void netdev_reset_tc(struct net_device *dev)
2546{
2547#ifdef CONFIG_XPS
2548 netif_reset_xps_queues_gt(dev, 0);
2549#endif
2550 netdev_unbind_all_sb_channels(dev);
2551
2552
2553 dev->num_tc = 0;
2554 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2555 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2556}
2557EXPORT_SYMBOL(netdev_reset_tc);
2558
2559int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2560{
2561 if (tc >= dev->num_tc)
2562 return -EINVAL;
2563
2564#ifdef CONFIG_XPS
2565 netif_reset_xps_queues(dev, offset, count);
2566#endif
2567 dev->tc_to_txq[tc].count = count;
2568 dev->tc_to_txq[tc].offset = offset;
2569 return 0;
2570}
2571EXPORT_SYMBOL(netdev_set_tc_queue);
2572
2573int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2574{
2575 if (num_tc > TC_MAX_QUEUE)
2576 return -EINVAL;
2577
2578#ifdef CONFIG_XPS
2579 netif_reset_xps_queues_gt(dev, 0);
2580#endif
2581 netdev_unbind_all_sb_channels(dev);
2582
2583 dev->num_tc = num_tc;
2584 return 0;
2585}
2586EXPORT_SYMBOL(netdev_set_num_tc);
2587
2588void netdev_unbind_sb_channel(struct net_device *dev,
2589 struct net_device *sb_dev)
2590{
2591 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2592
2593#ifdef CONFIG_XPS
2594 netif_reset_xps_queues_gt(sb_dev, 0);
2595#endif
2596 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2597 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2598
2599 while (txq-- != &dev->_tx[0]) {
2600 if (txq->sb_dev == sb_dev)
2601 txq->sb_dev = NULL;
2602 }
2603}
2604EXPORT_SYMBOL(netdev_unbind_sb_channel);
2605
2606int netdev_bind_sb_channel_queue(struct net_device *dev,
2607 struct net_device *sb_dev,
2608 u8 tc, u16 count, u16 offset)
2609{
2610
2611 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2612 return -EINVAL;
2613
2614
2615 if ((offset + count) > dev->real_num_tx_queues)
2616 return -EINVAL;
2617
2618
2619 sb_dev->tc_to_txq[tc].count = count;
2620 sb_dev->tc_to_txq[tc].offset = offset;
2621
2622
2623
2624
2625 while (count--)
2626 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2627
2628 return 0;
2629}
2630EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2631
2632int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2633{
2634
2635 if (netif_is_multiqueue(dev))
2636 return -ENODEV;
2637
2638
2639
2640
2641
2642
2643 if (channel > S16_MAX)
2644 return -EINVAL;
2645
2646 dev->num_tc = -channel;
2647
2648 return 0;
2649}
2650EXPORT_SYMBOL(netdev_set_sb_channel);
2651
2652
2653
2654
2655
2656int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2657{
2658 bool disabling;
2659 int rc;
2660
2661 disabling = txq < dev->real_num_tx_queues;
2662
2663 if (txq < 1 || txq > dev->num_tx_queues)
2664 return -EINVAL;
2665
2666 if (dev->reg_state == NETREG_REGISTERED ||
2667 dev->reg_state == NETREG_UNREGISTERING) {
2668 ASSERT_RTNL();
2669
2670 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2671 txq);
2672 if (rc)
2673 return rc;
2674
2675 if (dev->num_tc)
2676 netif_setup_tc(dev, txq);
2677
2678 dev->real_num_tx_queues = txq;
2679
2680 if (disabling) {
2681 synchronize_net();
2682 qdisc_reset_all_tx_gt(dev, txq);
2683#ifdef CONFIG_XPS
2684 netif_reset_xps_queues_gt(dev, txq);
2685#endif
2686 }
2687 } else {
2688 dev->real_num_tx_queues = txq;
2689 }
2690
2691 return 0;
2692}
2693EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2694
2695#ifdef CONFIG_SYSFS
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2707{
2708 int rc;
2709
2710 if (rxq < 1 || rxq > dev->num_rx_queues)
2711 return -EINVAL;
2712
2713 if (dev->reg_state == NETREG_REGISTERED) {
2714 ASSERT_RTNL();
2715
2716 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2717 rxq);
2718 if (rc)
2719 return rc;
2720 }
2721
2722 dev->real_num_rx_queues = rxq;
2723 return 0;
2724}
2725EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2726#endif
2727
2728
2729
2730
2731
2732
2733
2734int netif_get_num_default_rss_queues(void)
2735{
2736 return is_kdump_kernel() ?
2737 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2738}
2739EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2740
2741static void __netif_reschedule(struct Qdisc *q)
2742{
2743 struct softnet_data *sd;
2744 unsigned long flags;
2745
2746 local_irq_save(flags);
2747 sd = this_cpu_ptr(&softnet_data);
2748 q->next_sched = NULL;
2749 *sd->output_queue_tailp = q;
2750 sd->output_queue_tailp = &q->next_sched;
2751 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2752 local_irq_restore(flags);
2753}
2754
2755void __netif_schedule(struct Qdisc *q)
2756{
2757 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2758 __netif_reschedule(q);
2759}
2760EXPORT_SYMBOL(__netif_schedule);
2761
2762struct dev_kfree_skb_cb {
2763 enum skb_free_reason reason;
2764};
2765
2766static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2767{
2768 return (struct dev_kfree_skb_cb *)skb->cb;
2769}
2770
2771void netif_schedule_queue(struct netdev_queue *txq)
2772{
2773 rcu_read_lock();
2774 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2775 struct Qdisc *q = rcu_dereference(txq->qdisc);
2776
2777 __netif_schedule(q);
2778 }
2779 rcu_read_unlock();
2780}
2781EXPORT_SYMBOL(netif_schedule_queue);
2782
2783void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2784{
2785 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2786 struct Qdisc *q;
2787
2788 rcu_read_lock();
2789 q = rcu_dereference(dev_queue->qdisc);
2790 __netif_schedule(q);
2791 rcu_read_unlock();
2792 }
2793}
2794EXPORT_SYMBOL(netif_tx_wake_queue);
2795
2796void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2797{
2798 unsigned long flags;
2799
2800 if (unlikely(!skb))
2801 return;
2802
2803 if (likely(refcount_read(&skb->users) == 1)) {
2804 smp_rmb();
2805 refcount_set(&skb->users, 0);
2806 } else if (likely(!refcount_dec_and_test(&skb->users))) {
2807 return;
2808 }
2809 get_kfree_skb_cb(skb)->reason = reason;
2810 local_irq_save(flags);
2811 skb->next = __this_cpu_read(softnet_data.completion_queue);
2812 __this_cpu_write(softnet_data.completion_queue, skb);
2813 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2814 local_irq_restore(flags);
2815}
2816EXPORT_SYMBOL(__dev_kfree_skb_irq);
2817
2818void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2819{
2820 if (in_irq() || irqs_disabled())
2821 __dev_kfree_skb_irq(skb, reason);
2822 else
2823 dev_kfree_skb(skb);
2824}
2825EXPORT_SYMBOL(__dev_kfree_skb_any);
2826
2827
2828
2829
2830
2831
2832
2833
2834void netif_device_detach(struct net_device *dev)
2835{
2836 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2837 netif_running(dev)) {
2838 netif_tx_stop_all_queues(dev);
2839 }
2840}
2841EXPORT_SYMBOL(netif_device_detach);
2842
2843
2844
2845
2846
2847
2848
2849void netif_device_attach(struct net_device *dev)
2850{
2851 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2852 netif_running(dev)) {
2853 netif_tx_wake_all_queues(dev);
2854 __netdev_watchdog_up(dev);
2855 }
2856}
2857EXPORT_SYMBOL(netif_device_attach);
2858
2859
2860
2861
2862
2863static u16 skb_tx_hash(const struct net_device *dev,
2864 const struct net_device *sb_dev,
2865 struct sk_buff *skb)
2866{
2867 u32 hash;
2868 u16 qoffset = 0;
2869 u16 qcount = dev->real_num_tx_queues;
2870
2871 if (dev->num_tc) {
2872 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2873
2874 qoffset = sb_dev->tc_to_txq[tc].offset;
2875 qcount = sb_dev->tc_to_txq[tc].count;
2876 }
2877
2878 if (skb_rx_queue_recorded(skb)) {
2879 hash = skb_get_rx_queue(skb);
2880 while (unlikely(hash >= qcount))
2881 hash -= qcount;
2882 return hash + qoffset;
2883 }
2884
2885 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2886}
2887
2888static void skb_warn_bad_offload(const struct sk_buff *skb)
2889{
2890 static const netdev_features_t null_features;
2891 struct net_device *dev = skb->dev;
2892 const char *name = "";
2893
2894 if (!net_ratelimit())
2895 return;
2896
2897 if (dev) {
2898 if (dev->dev.parent)
2899 name = dev_driver_string(dev->dev.parent);
2900 else
2901 name = netdev_name(dev);
2902 }
2903 skb_dump(KERN_WARNING, skb, false);
2904 WARN(1, "%s: caps=(%pNF, %pNF)\n",
2905 name, dev ? &dev->features : &null_features,
2906 skb->sk ? &skb->sk->sk_route_caps : &null_features);
2907}
2908
2909
2910
2911
2912
2913int skb_checksum_help(struct sk_buff *skb)
2914{
2915 __wsum csum;
2916 int ret = 0, offset;
2917
2918 if (skb->ip_summed == CHECKSUM_COMPLETE)
2919 goto out_set_summed;
2920
2921 if (unlikely(skb_shinfo(skb)->gso_size)) {
2922 skb_warn_bad_offload(skb);
2923 return -EINVAL;
2924 }
2925
2926
2927
2928
2929 if (skb_has_shared_frag(skb)) {
2930 ret = __skb_linearize(skb);
2931 if (ret)
2932 goto out;
2933 }
2934
2935 offset = skb_checksum_start_offset(skb);
2936 BUG_ON(offset >= skb_headlen(skb));
2937 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2938
2939 offset += skb->csum_offset;
2940 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2941
2942 if (skb_cloned(skb) &&
2943 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2944 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2945 if (ret)
2946 goto out;
2947 }
2948
2949 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2950out_set_summed:
2951 skb->ip_summed = CHECKSUM_NONE;
2952out:
2953 return ret;
2954}
2955EXPORT_SYMBOL(skb_checksum_help);
2956
2957int skb_crc32c_csum_help(struct sk_buff *skb)
2958{
2959 __le32 crc32c_csum;
2960 int ret = 0, offset, start;
2961
2962 if (skb->ip_summed != CHECKSUM_PARTIAL)
2963 goto out;
2964
2965 if (unlikely(skb_is_gso(skb)))
2966 goto out;
2967
2968
2969
2970
2971 if (unlikely(skb_has_shared_frag(skb))) {
2972 ret = __skb_linearize(skb);
2973 if (ret)
2974 goto out;
2975 }
2976 start = skb_checksum_start_offset(skb);
2977 offset = start + offsetof(struct sctphdr, checksum);
2978 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2979 ret = -EINVAL;
2980 goto out;
2981 }
2982 if (skb_cloned(skb) &&
2983 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2984 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2985 if (ret)
2986 goto out;
2987 }
2988 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2989 skb->len - start, ~(__u32)0,
2990 crc32c_csum_stub));
2991 *(__le32 *)(skb->data + offset) = crc32c_csum;
2992 skb->ip_summed = CHECKSUM_NONE;
2993 skb->csum_not_inet = 0;
2994out:
2995 return ret;
2996}
2997
2998__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2999{
3000 __be16 type = skb->protocol;
3001
3002
3003 if (type == htons(ETH_P_TEB)) {
3004 struct ethhdr *eth;
3005
3006 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3007 return 0;
3008
3009 eth = (struct ethhdr *)skb->data;
3010 type = eth->h_proto;
3011 }
3012
3013 return __vlan_get_protocol(skb, type, depth);
3014}
3015
3016
3017
3018
3019
3020
3021struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3022 netdev_features_t features)
3023{
3024 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3025 struct packet_offload *ptype;
3026 int vlan_depth = skb->mac_len;
3027 __be16 type = skb_network_protocol(skb, &vlan_depth);
3028
3029 if (unlikely(!type))
3030 return ERR_PTR(-EINVAL);
3031
3032 __skb_pull(skb, vlan_depth);
3033
3034 rcu_read_lock();
3035 list_for_each_entry_rcu(ptype, &offload_base, list) {
3036 if (ptype->type == type && ptype->callbacks.gso_segment) {
3037 segs = ptype->callbacks.gso_segment(skb, features);
3038 break;
3039 }
3040 }
3041 rcu_read_unlock();
3042
3043 __skb_push(skb, skb->data - skb_mac_header(skb));
3044
3045 return segs;
3046}
3047EXPORT_SYMBOL(skb_mac_gso_segment);
3048
3049
3050
3051
3052static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3053{
3054 if (tx_path)
3055 return skb->ip_summed != CHECKSUM_PARTIAL &&
3056 skb->ip_summed != CHECKSUM_UNNECESSARY;
3057
3058 return skb->ip_summed == CHECKSUM_NONE;
3059}
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3075 netdev_features_t features, bool tx_path)
3076{
3077 struct sk_buff *segs;
3078
3079 if (unlikely(skb_needs_check(skb, tx_path))) {
3080 int err;
3081
3082
3083 err = skb_cow_head(skb, 0);
3084 if (err < 0)
3085 return ERR_PTR(err);
3086 }
3087
3088
3089
3090
3091
3092 if (features & NETIF_F_GSO_PARTIAL) {
3093 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3094 struct net_device *dev = skb->dev;
3095
3096 partial_features |= dev->features & dev->gso_partial_features;
3097 if (!skb_gso_ok(skb, features | partial_features))
3098 features &= ~NETIF_F_GSO_PARTIAL;
3099 }
3100
3101 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
3102 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3103
3104 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3105 SKB_GSO_CB(skb)->encap_level = 0;
3106
3107 skb_reset_mac_header(skb);
3108 skb_reset_mac_len(skb);
3109
3110 segs = skb_mac_gso_segment(skb, features);
3111
3112 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3113 skb_warn_bad_offload(skb);
3114
3115 return segs;
3116}
3117EXPORT_SYMBOL(__skb_gso_segment);
3118
3119
3120#ifdef CONFIG_BUG
3121void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3122{
3123 if (net_ratelimit()) {
3124 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3125 skb_dump(KERN_ERR, skb, true);
3126 dump_stack();
3127 }
3128}
3129EXPORT_SYMBOL(netdev_rx_csum_fault);
3130#endif
3131
3132
3133static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3134{
3135#ifdef CONFIG_HIGHMEM
3136 int i;
3137
3138 if (!(dev->features & NETIF_F_HIGHDMA)) {
3139 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3140 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3141
3142 if (PageHighMem(skb_frag_page(frag)))
3143 return 1;
3144 }
3145 }
3146#endif
3147 return 0;
3148}
3149
3150
3151
3152
3153#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3154static netdev_features_t net_mpls_features(struct sk_buff *skb,
3155 netdev_features_t features,
3156 __be16 type)
3157{
3158 if (eth_p_mpls(type))
3159 features &= skb->dev->mpls_features;
3160
3161 return features;
3162}
3163#else
3164static netdev_features_t net_mpls_features(struct sk_buff *skb,
3165 netdev_features_t features,
3166 __be16 type)
3167{
3168 return features;
3169}
3170#endif
3171
3172static netdev_features_t harmonize_features(struct sk_buff *skb,
3173 netdev_features_t features)
3174{
3175 int tmp;
3176 __be16 type;
3177
3178 type = skb_network_protocol(skb, &tmp);
3179 features = net_mpls_features(skb, features, type);
3180
3181 if (skb->ip_summed != CHECKSUM_NONE &&
3182 !can_checksum_protocol(features, type)) {
3183 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3184 }
3185 if (illegal_highdma(skb->dev, skb))
3186 features &= ~NETIF_F_SG;
3187
3188 return features;
3189}
3190
3191netdev_features_t passthru_features_check(struct sk_buff *skb,
3192 struct net_device *dev,
3193 netdev_features_t features)
3194{
3195 return features;
3196}
3197EXPORT_SYMBOL(passthru_features_check);
3198
3199static netdev_features_t dflt_features_check(struct sk_buff *skb,
3200 struct net_device *dev,
3201 netdev_features_t features)
3202{
3203 return vlan_features_check(skb, features);
3204}
3205
3206static netdev_features_t gso_features_check(const struct sk_buff *skb,
3207 struct net_device *dev,
3208 netdev_features_t features)
3209{
3210 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3211
3212 if (gso_segs > dev->gso_max_segs)
3213 return features & ~NETIF_F_GSO_MASK;
3214
3215
3216
3217
3218
3219
3220
3221 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3222 features &= ~dev->gso_partial_features;
3223
3224
3225
3226
3227 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3228 struct iphdr *iph = skb->encapsulation ?
3229 inner_ip_hdr(skb) : ip_hdr(skb);
3230
3231 if (!(iph->frag_off & htons(IP_DF)))
3232 features &= ~NETIF_F_TSO_MANGLEID;
3233 }
3234
3235 return features;
3236}
3237
3238netdev_features_t netif_skb_features(struct sk_buff *skb)
3239{
3240 struct net_device *dev = skb->dev;
3241 netdev_features_t features = dev->features;
3242
3243 if (skb_is_gso(skb))
3244 features = gso_features_check(skb, dev, features);
3245
3246
3247
3248
3249
3250 if (skb->encapsulation)
3251 features &= dev->hw_enc_features;
3252
3253 if (skb_vlan_tagged(skb))
3254 features = netdev_intersect_features(features,
3255 dev->vlan_features |
3256 NETIF_F_HW_VLAN_CTAG_TX |
3257 NETIF_F_HW_VLAN_STAG_TX);
3258
3259 if (dev->netdev_ops->ndo_features_check)
3260 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3261 features);
3262 else
3263 features &= dflt_features_check(skb, dev, features);
3264
3265 return harmonize_features(skb, features);
3266}
3267EXPORT_SYMBOL(netif_skb_features);
3268
3269static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3270 struct netdev_queue *txq, bool more)
3271{
3272 unsigned int len;
3273 int rc;
3274
3275 if (dev_nit_active(dev))
3276 dev_queue_xmit_nit(skb, dev);
3277
3278 len = skb->len;
3279 trace_net_dev_start_xmit(skb, dev);
3280 rc = netdev_start_xmit(skb, dev, txq, more);
3281 trace_net_dev_xmit(skb, rc, dev, len);
3282
3283 return rc;
3284}
3285
3286struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3287 struct netdev_queue *txq, int *ret)
3288{
3289 struct sk_buff *skb = first;
3290 int rc = NETDEV_TX_OK;
3291
3292 while (skb) {
3293 struct sk_buff *next = skb->next;
3294
3295 skb_mark_not_on_list(skb);
3296 rc = xmit_one(skb, dev, txq, next != NULL);
3297 if (unlikely(!dev_xmit_complete(rc))) {
3298 skb->next = next;
3299 goto out;
3300 }
3301
3302 skb = next;
3303 if (netif_tx_queue_stopped(txq) && skb) {
3304 rc = NETDEV_TX_BUSY;
3305 break;
3306 }
3307 }
3308
3309out:
3310 *ret = rc;
3311 return skb;
3312}
3313
3314static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3315 netdev_features_t features)
3316{
3317 if (skb_vlan_tag_present(skb) &&
3318 !vlan_hw_offload_capable(features, skb->vlan_proto))
3319 skb = __vlan_hwaccel_push_inside(skb);
3320 return skb;
3321}
3322
3323int skb_csum_hwoffload_help(struct sk_buff *skb,
3324 const netdev_features_t features)
3325{
3326 if (unlikely(skb->csum_not_inet))
3327 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3328 skb_crc32c_csum_help(skb);
3329
3330 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3331}
3332EXPORT_SYMBOL(skb_csum_hwoffload_help);
3333
3334static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3335{
3336 netdev_features_t features;
3337
3338 features = netif_skb_features(skb);
3339 skb = validate_xmit_vlan(skb, features);
3340 if (unlikely(!skb))
3341 goto out_null;
3342
3343 skb = sk_validate_xmit_skb(skb, dev);
3344 if (unlikely(!skb))
3345 goto out_null;
3346
3347 if (netif_needs_gso(skb, features)) {
3348 struct sk_buff *segs;
3349
3350 segs = skb_gso_segment(skb, features);
3351 if (IS_ERR(segs)) {
3352 goto out_kfree_skb;
3353 } else if (segs) {
3354 consume_skb(skb);
3355 skb = segs;
3356 }
3357 } else {
3358 if (skb_needs_linearize(skb, features) &&
3359 __skb_linearize(skb))
3360 goto out_kfree_skb;
3361
3362
3363
3364
3365
3366 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3367 if (skb->encapsulation)
3368 skb_set_inner_transport_header(skb,
3369 skb_checksum_start_offset(skb));
3370 else
3371 skb_set_transport_header(skb,
3372 skb_checksum_start_offset(skb));
3373 if (skb_csum_hwoffload_help(skb, features))
3374 goto out_kfree_skb;
3375 }
3376 }
3377
3378 skb = validate_xmit_xfrm(skb, features, again);
3379
3380 return skb;
3381
3382out_kfree_skb:
3383 kfree_skb(skb);
3384out_null:
3385 atomic_long_inc(&dev->tx_dropped);
3386 return NULL;
3387}
3388
3389struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3390{
3391 struct sk_buff *next, *head = NULL, *tail;
3392
3393 for (; skb != NULL; skb = next) {
3394 next = skb->next;
3395 skb_mark_not_on_list(skb);
3396
3397
3398 skb->prev = skb;
3399
3400 skb = validate_xmit_skb(skb, dev, again);
3401 if (!skb)
3402 continue;
3403
3404 if (!head)
3405 head = skb;
3406 else
3407 tail->next = skb;
3408
3409
3410
3411 tail = skb->prev;
3412 }
3413 return head;
3414}
3415EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3416
3417static void qdisc_pkt_len_init(struct sk_buff *skb)
3418{
3419 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3420
3421 qdisc_skb_cb(skb)->pkt_len = skb->len;
3422
3423
3424
3425
3426 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3427 unsigned int hdr_len;
3428 u16 gso_segs = shinfo->gso_segs;
3429
3430
3431 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3432
3433
3434 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3435 const struct tcphdr *th;
3436 struct tcphdr _tcphdr;
3437
3438 th = skb_header_pointer(skb, skb_transport_offset(skb),
3439 sizeof(_tcphdr), &_tcphdr);
3440 if (likely(th))
3441 hdr_len += __tcp_hdrlen(th);
3442 } else {
3443 struct udphdr _udphdr;
3444
3445 if (skb_header_pointer(skb, skb_transport_offset(skb),
3446 sizeof(_udphdr), &_udphdr))
3447 hdr_len += sizeof(struct udphdr);
3448 }
3449
3450 if (shinfo->gso_type & SKB_GSO_DODGY)
3451 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3452 shinfo->gso_size);
3453
3454 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3455 }
3456}
3457
3458static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3459 struct net_device *dev,
3460 struct netdev_queue *txq)
3461{
3462 spinlock_t *root_lock = qdisc_lock(q);
3463 struct sk_buff *to_free = NULL;
3464 bool contended;
3465 int rc;
3466
3467 qdisc_calculate_pkt_len(skb, q);
3468
3469 if (q->flags & TCQ_F_NOLOCK) {
3470 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3471 __qdisc_drop(skb, &to_free);
3472 rc = NET_XMIT_DROP;
3473 } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
3474 qdisc_run_begin(q)) {
3475 qdisc_bstats_cpu_update(q, skb);
3476
3477 if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
3478 __qdisc_run(q);
3479
3480 qdisc_run_end(q);
3481 rc = NET_XMIT_SUCCESS;
3482 } else {
3483 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3484 qdisc_run(q);
3485 }
3486
3487 if (unlikely(to_free))
3488 kfree_skb_list(to_free);
3489 return rc;
3490 }
3491
3492
3493
3494
3495
3496
3497
3498 contended = qdisc_is_running(q);
3499 if (unlikely(contended))
3500 spin_lock(&q->busylock);
3501
3502 spin_lock(root_lock);
3503 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3504 __qdisc_drop(skb, &to_free);
3505 rc = NET_XMIT_DROP;
3506 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3507 qdisc_run_begin(q)) {
3508
3509
3510
3511
3512
3513
3514 qdisc_bstats_update(q, skb);
3515
3516 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3517 if (unlikely(contended)) {
3518 spin_unlock(&q->busylock);
3519 contended = false;
3520 }
3521 __qdisc_run(q);
3522 }
3523
3524 qdisc_run_end(q);
3525 rc = NET_XMIT_SUCCESS;
3526 } else {
3527 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3528 if (qdisc_run_begin(q)) {
3529 if (unlikely(contended)) {
3530 spin_unlock(&q->busylock);
3531 contended = false;
3532 }
3533 __qdisc_run(q);
3534 qdisc_run_end(q);
3535 }
3536 }
3537 spin_unlock(root_lock);
3538 if (unlikely(to_free))
3539 kfree_skb_list(to_free);
3540 if (unlikely(contended))
3541 spin_unlock(&q->busylock);
3542 return rc;
3543}
3544
3545#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3546static void skb_update_prio(struct sk_buff *skb)
3547{
3548 const struct netprio_map *map;
3549 const struct sock *sk;
3550 unsigned int prioidx;
3551
3552 if (skb->priority)
3553 return;
3554 map = rcu_dereference_bh(skb->dev->priomap);
3555 if (!map)
3556 return;
3557 sk = skb_to_full_sk(skb);
3558 if (!sk)
3559 return;
3560
3561 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3562
3563 if (prioidx < map->priomap_len)
3564 skb->priority = map->priomap[prioidx];
3565}
3566#else
3567#define skb_update_prio(skb)
3568#endif
3569
3570
3571
3572
3573
3574
3575
3576int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3577{
3578 skb_reset_mac_header(skb);
3579 __skb_pull(skb, skb_network_offset(skb));
3580 skb->pkt_type = PACKET_LOOPBACK;
3581 skb->ip_summed = CHECKSUM_UNNECESSARY;
3582 WARN_ON(!skb_dst(skb));
3583 skb_dst_force(skb);
3584 netif_rx_ni(skb);
3585 return 0;
3586}
3587EXPORT_SYMBOL(dev_loopback_xmit);
3588
3589#ifdef CONFIG_NET_EGRESS
3590static struct sk_buff *
3591sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3592{
3593 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3594 struct tcf_result cl_res;
3595
3596 if (!miniq)
3597 return skb;
3598
3599
3600 mini_qdisc_bstats_cpu_update(miniq, skb);
3601
3602 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
3603 case TC_ACT_OK:
3604 case TC_ACT_RECLASSIFY:
3605 skb->tc_index = TC_H_MIN(cl_res.classid);
3606 break;
3607 case TC_ACT_SHOT:
3608 mini_qdisc_qstats_cpu_drop(miniq);
3609 *ret = NET_XMIT_DROP;
3610 kfree_skb(skb);
3611 return NULL;
3612 case TC_ACT_STOLEN:
3613 case TC_ACT_QUEUED:
3614 case TC_ACT_TRAP:
3615 *ret = NET_XMIT_SUCCESS;
3616 consume_skb(skb);
3617 return NULL;
3618 case TC_ACT_REDIRECT:
3619
3620 skb_do_redirect(skb);
3621 *ret = NET_XMIT_SUCCESS;
3622 return NULL;
3623 default:
3624 break;
3625 }
3626
3627 return skb;
3628}
3629#endif
3630
3631#ifdef CONFIG_XPS
3632static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3633 struct xps_dev_maps *dev_maps, unsigned int tci)
3634{
3635 struct xps_map *map;
3636 int queue_index = -1;
3637
3638 if (dev->num_tc) {
3639 tci *= dev->num_tc;
3640 tci += netdev_get_prio_tc_map(dev, skb->priority);
3641 }
3642
3643 map = rcu_dereference(dev_maps->attr_map[tci]);
3644 if (map) {
3645 if (map->len == 1)
3646 queue_index = map->queues[0];
3647 else
3648 queue_index = map->queues[reciprocal_scale(
3649 skb_get_hash(skb), map->len)];
3650 if (unlikely(queue_index >= dev->real_num_tx_queues))
3651 queue_index = -1;
3652 }
3653 return queue_index;
3654}
3655#endif
3656
3657static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3658 struct sk_buff *skb)
3659{
3660#ifdef CONFIG_XPS
3661 struct xps_dev_maps *dev_maps;
3662 struct sock *sk = skb->sk;
3663 int queue_index = -1;
3664
3665 if (!static_key_false(&xps_needed))
3666 return -1;
3667
3668 rcu_read_lock();
3669 if (!static_key_false(&xps_rxqs_needed))
3670 goto get_cpus_map;
3671
3672 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
3673 if (dev_maps) {
3674 int tci = sk_rx_queue_get(sk);
3675
3676 if (tci >= 0 && tci < dev->num_rx_queues)
3677 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3678 tci);
3679 }
3680
3681get_cpus_map:
3682 if (queue_index < 0) {
3683 dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
3684 if (dev_maps) {
3685 unsigned int tci = skb->sender_cpu - 1;
3686
3687 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3688 tci);
3689 }
3690 }
3691 rcu_read_unlock();
3692
3693 return queue_index;
3694#else
3695 return -1;
3696#endif
3697}
3698
3699u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3700 struct net_device *sb_dev)
3701{
3702 return 0;
3703}
3704EXPORT_SYMBOL(dev_pick_tx_zero);
3705
3706u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3707 struct net_device *sb_dev)
3708{
3709 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3710}
3711EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3712
3713u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3714 struct net_device *sb_dev)
3715{
3716 struct sock *sk = skb->sk;
3717 int queue_index = sk_tx_queue_get(sk);
3718
3719 sb_dev = sb_dev ? : dev;
3720
3721 if (queue_index < 0 || skb->ooo_okay ||
3722 queue_index >= dev->real_num_tx_queues) {
3723 int new_index = get_xps_queue(dev, sb_dev, skb);
3724
3725 if (new_index < 0)
3726 new_index = skb_tx_hash(dev, sb_dev, skb);
3727
3728 if (queue_index != new_index && sk &&
3729 sk_fullsock(sk) &&
3730 rcu_access_pointer(sk->sk_dst_cache))
3731 sk_tx_queue_set(sk, new_index);
3732
3733 queue_index = new_index;
3734 }
3735
3736 return queue_index;
3737}
3738EXPORT_SYMBOL(netdev_pick_tx);
3739
3740struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
3741 struct sk_buff *skb,
3742 struct net_device *sb_dev)
3743{
3744 int queue_index = 0;
3745
3746#ifdef CONFIG_XPS
3747 u32 sender_cpu = skb->sender_cpu - 1;
3748
3749 if (sender_cpu >= (u32)NR_CPUS)
3750 skb->sender_cpu = raw_smp_processor_id() + 1;
3751#endif
3752
3753 if (dev->real_num_tx_queues != 1) {
3754 const struct net_device_ops *ops = dev->netdev_ops;
3755
3756 if (ops->ndo_select_queue)
3757 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
3758 else
3759 queue_index = netdev_pick_tx(dev, skb, sb_dev);
3760
3761 queue_index = netdev_cap_txqueue(dev, queue_index);
3762 }
3763
3764 skb_set_queue_mapping(skb, queue_index);
3765 return netdev_get_tx_queue(dev, queue_index);
3766}
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
3795{
3796 struct net_device *dev = skb->dev;
3797 struct netdev_queue *txq;
3798 struct Qdisc *q;
3799 int rc = -ENOMEM;
3800 bool again = false;
3801
3802 skb_reset_mac_header(skb);
3803
3804 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3805 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3806
3807
3808
3809
3810 rcu_read_lock_bh();
3811
3812 skb_update_prio(skb);
3813
3814 qdisc_pkt_len_init(skb);
3815#ifdef CONFIG_NET_CLS_ACT
3816 skb->tc_at_ingress = 0;
3817# ifdef CONFIG_NET_EGRESS
3818 if (static_branch_unlikely(&egress_needed_key)) {
3819 skb = sch_handle_egress(skb, &rc, dev);
3820 if (!skb)
3821 goto out;
3822 }
3823# endif
3824#endif
3825
3826
3827
3828 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3829 skb_dst_drop(skb);
3830 else
3831 skb_dst_force(skb);
3832
3833 txq = netdev_core_pick_tx(dev, skb, sb_dev);
3834 q = rcu_dereference_bh(txq->qdisc);
3835
3836 trace_net_dev_queue(skb);
3837 if (q->enqueue) {
3838 rc = __dev_xmit_skb(skb, q, dev, txq);
3839 goto out;
3840 }
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854 if (dev->flags & IFF_UP) {
3855 int cpu = smp_processor_id();
3856
3857 if (txq->xmit_lock_owner != cpu) {
3858 if (dev_xmit_recursion())
3859 goto recursion_alert;
3860
3861 skb = validate_xmit_skb(skb, dev, &again);
3862 if (!skb)
3863 goto out;
3864
3865 HARD_TX_LOCK(dev, txq, cpu);
3866
3867 if (!netif_xmit_stopped(txq)) {
3868 dev_xmit_recursion_inc();
3869 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3870 dev_xmit_recursion_dec();
3871 if (dev_xmit_complete(rc)) {
3872 HARD_TX_UNLOCK(dev, txq);
3873 goto out;
3874 }
3875 }
3876 HARD_TX_UNLOCK(dev, txq);
3877 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3878 dev->name);
3879 } else {
3880
3881
3882
3883recursion_alert:
3884 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3885 dev->name);
3886 }
3887 }
3888
3889 rc = -ENETDOWN;
3890 rcu_read_unlock_bh();
3891
3892 atomic_long_inc(&dev->tx_dropped);
3893 kfree_skb_list(skb);
3894 return rc;
3895out:
3896 rcu_read_unlock_bh();
3897 return rc;
3898}
3899
3900int dev_queue_xmit(struct sk_buff *skb)
3901{
3902 return __dev_queue_xmit(skb, NULL);
3903}
3904EXPORT_SYMBOL(dev_queue_xmit);
3905
3906int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
3907{
3908 return __dev_queue_xmit(skb, sb_dev);
3909}
3910EXPORT_SYMBOL(dev_queue_xmit_accel);
3911
3912int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3913{
3914 struct net_device *dev = skb->dev;
3915 struct sk_buff *orig_skb = skb;
3916 struct netdev_queue *txq;
3917 int ret = NETDEV_TX_BUSY;
3918 bool again = false;
3919
3920 if (unlikely(!netif_running(dev) ||
3921 !netif_carrier_ok(dev)))
3922 goto drop;
3923
3924 skb = validate_xmit_skb_list(skb, dev, &again);
3925 if (skb != orig_skb)
3926 goto drop;
3927
3928 skb_set_queue_mapping(skb, queue_id);
3929 txq = skb_get_tx_queue(dev, skb);
3930
3931 local_bh_disable();
3932
3933 HARD_TX_LOCK(dev, txq, smp_processor_id());
3934 if (!netif_xmit_frozen_or_drv_stopped(txq))
3935 ret = netdev_start_xmit(skb, dev, txq, false);
3936 HARD_TX_UNLOCK(dev, txq);
3937
3938 local_bh_enable();
3939
3940 if (!dev_xmit_complete(ret))
3941 kfree_skb(skb);
3942
3943 return ret;
3944drop:
3945 atomic_long_inc(&dev->tx_dropped);
3946 kfree_skb_list(skb);
3947 return NET_XMIT_DROP;
3948}
3949EXPORT_SYMBOL(dev_direct_xmit);
3950
3951
3952
3953
3954
3955int netdev_max_backlog __read_mostly = 1000;
3956EXPORT_SYMBOL(netdev_max_backlog);
3957
3958int netdev_tstamp_prequeue __read_mostly = 1;
3959int netdev_budget __read_mostly = 300;
3960unsigned int __read_mostly netdev_budget_usecs = 2000;
3961int weight_p __read_mostly = 64;
3962int dev_weight_rx_bias __read_mostly = 1;
3963int dev_weight_tx_bias __read_mostly = 1;
3964int dev_rx_weight __read_mostly = 64;
3965int dev_tx_weight __read_mostly = 64;
3966
3967
3968static inline void ____napi_schedule(struct softnet_data *sd,
3969 struct napi_struct *napi)
3970{
3971 list_add_tail(&napi->poll_list, &sd->poll_list);
3972 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3973}
3974
3975#ifdef CONFIG_RPS
3976
3977
3978struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3979EXPORT_SYMBOL(rps_sock_flow_table);
3980u32 rps_cpu_mask __read_mostly;
3981EXPORT_SYMBOL(rps_cpu_mask);
3982
3983struct static_key_false rps_needed __read_mostly;
3984EXPORT_SYMBOL(rps_needed);
3985struct static_key_false rfs_needed __read_mostly;
3986EXPORT_SYMBOL(rfs_needed);
3987
3988static struct rps_dev_flow *
3989set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3990 struct rps_dev_flow *rflow, u16 next_cpu)
3991{
3992 if (next_cpu < nr_cpu_ids) {
3993#ifdef CONFIG_RFS_ACCEL
3994 struct netdev_rx_queue *rxqueue;
3995 struct rps_dev_flow_table *flow_table;
3996 struct rps_dev_flow *old_rflow;
3997 u32 flow_id;
3998 u16 rxq_index;
3999 int rc;
4000
4001
4002 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4003 !(dev->features & NETIF_F_NTUPLE))
4004 goto out;
4005 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4006 if (rxq_index == skb_get_rx_queue(skb))
4007 goto out;
4008
4009 rxqueue = dev->_rx + rxq_index;
4010 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4011 if (!flow_table)
4012 goto out;
4013 flow_id = skb_get_hash(skb) & flow_table->mask;
4014 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4015 rxq_index, flow_id);
4016 if (rc < 0)
4017 goto out;
4018 old_rflow = rflow;
4019 rflow = &flow_table->flows[flow_id];
4020 rflow->filter = rc;
4021 if (old_rflow->filter == rflow->filter)
4022 old_rflow->filter = RPS_NO_FILTER;
4023 out:
4024#endif
4025 rflow->last_qtail =
4026 per_cpu(softnet_data, next_cpu).input_queue_head;
4027 }
4028
4029 rflow->cpu = next_cpu;
4030 return rflow;
4031}
4032
4033
4034
4035
4036
4037
4038static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4039 struct rps_dev_flow **rflowp)
4040{
4041 const struct rps_sock_flow_table *sock_flow_table;
4042 struct netdev_rx_queue *rxqueue = dev->_rx;
4043 struct rps_dev_flow_table *flow_table;
4044 struct rps_map *map;
4045 int cpu = -1;
4046 u32 tcpu;
4047 u32 hash;
4048
4049 if (skb_rx_queue_recorded(skb)) {
4050 u16 index = skb_get_rx_queue(skb);
4051
4052 if (unlikely(index >= dev->real_num_rx_queues)) {
4053 WARN_ONCE(dev->real_num_rx_queues > 1,
4054 "%s received packet on queue %u, but number "
4055 "of RX queues is %u\n",
4056 dev->name, index, dev->real_num_rx_queues);
4057 goto done;
4058 }
4059 rxqueue += index;
4060 }
4061
4062
4063
4064 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4065 map = rcu_dereference(rxqueue->rps_map);
4066 if (!flow_table && !map)
4067 goto done;
4068
4069 skb_reset_network_header(skb);
4070 hash = skb_get_hash(skb);
4071 if (!hash)
4072 goto done;
4073
4074 sock_flow_table = rcu_dereference(rps_sock_flow_table);
4075 if (flow_table && sock_flow_table) {
4076 struct rps_dev_flow *rflow;
4077 u32 next_cpu;
4078 u32 ident;
4079
4080
4081 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4082 if ((ident ^ hash) & ~rps_cpu_mask)
4083 goto try_rps;
4084
4085 next_cpu = ident & rps_cpu_mask;
4086
4087
4088
4089
4090 rflow = &flow_table->flows[hash & flow_table->mask];
4091 tcpu = rflow->cpu;
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104 if (unlikely(tcpu != next_cpu) &&
4105 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4106 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4107 rflow->last_qtail)) >= 0)) {
4108 tcpu = next_cpu;
4109 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4110 }
4111
4112 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4113 *rflowp = rflow;
4114 cpu = tcpu;
4115 goto done;
4116 }
4117 }
4118
4119try_rps:
4120
4121 if (map) {
4122 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4123 if (cpu_online(tcpu)) {
4124 cpu = tcpu;
4125 goto done;
4126 }
4127 }
4128
4129done:
4130 return cpu;
4131}
4132
4133#ifdef CONFIG_RFS_ACCEL
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4147 u32 flow_id, u16 filter_id)
4148{
4149 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4150 struct rps_dev_flow_table *flow_table;
4151 struct rps_dev_flow *rflow;
4152 bool expire = true;
4153 unsigned int cpu;
4154
4155 rcu_read_lock();
4156 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4157 if (flow_table && flow_id <= flow_table->mask) {
4158 rflow = &flow_table->flows[flow_id];
4159 cpu = READ_ONCE(rflow->cpu);
4160 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4161 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4162 rflow->last_qtail) <
4163 (int)(10 * flow_table->mask)))
4164 expire = false;
4165 }
4166 rcu_read_unlock();
4167 return expire;
4168}
4169EXPORT_SYMBOL(rps_may_expire_flow);
4170
4171#endif
4172
4173
4174static void rps_trigger_softirq(void *data)
4175{
4176 struct softnet_data *sd = data;
4177
4178 ____napi_schedule(sd, &sd->backlog);
4179 sd->received_rps++;
4180}
4181
4182#endif
4183
4184
4185
4186
4187
4188
4189static int rps_ipi_queued(struct softnet_data *sd)
4190{
4191#ifdef CONFIG_RPS
4192 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4193
4194 if (sd != mysd) {
4195 sd->rps_ipi_next = mysd->rps_ipi_list;
4196 mysd->rps_ipi_list = sd;
4197
4198 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4199 return 1;
4200 }
4201#endif
4202 return 0;
4203}
4204
4205#ifdef CONFIG_NET_FLOW_LIMIT
4206int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4207#endif
4208
4209static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4210{
4211#ifdef CONFIG_NET_FLOW_LIMIT
4212 struct sd_flow_limit *fl;
4213 struct softnet_data *sd;
4214 unsigned int old_flow, new_flow;
4215
4216 if (qlen < (netdev_max_backlog >> 1))
4217 return false;
4218
4219 sd = this_cpu_ptr(&softnet_data);
4220
4221 rcu_read_lock();
4222 fl = rcu_dereference(sd->flow_limit);
4223 if (fl) {
4224 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4225 old_flow = fl->history[fl->history_head];
4226 fl->history[fl->history_head] = new_flow;
4227
4228 fl->history_head++;
4229 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4230
4231 if (likely(fl->buckets[old_flow]))
4232 fl->buckets[old_flow]--;
4233
4234 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4235 fl->count++;
4236 rcu_read_unlock();
4237 return true;
4238 }
4239 }
4240 rcu_read_unlock();
4241#endif
4242 return false;
4243}
4244
4245
4246
4247
4248
4249static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4250 unsigned int *qtail)
4251{
4252 struct softnet_data *sd;
4253 unsigned long flags;
4254 unsigned int qlen;
4255
4256 sd = &per_cpu(softnet_data, cpu);
4257
4258 local_irq_save(flags);
4259
4260 rps_lock(sd);
4261 if (!netif_running(skb->dev))
4262 goto drop;
4263 qlen = skb_queue_len(&sd->input_pkt_queue);
4264 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4265 if (qlen) {
4266enqueue:
4267 __skb_queue_tail(&sd->input_pkt_queue, skb);
4268 input_queue_tail_incr_save(sd, qtail);
4269 rps_unlock(sd);
4270 local_irq_restore(flags);
4271 return NET_RX_SUCCESS;
4272 }
4273
4274
4275
4276
4277 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4278 if (!rps_ipi_queued(sd))
4279 ____napi_schedule(sd, &sd->backlog);
4280 }
4281 goto enqueue;
4282 }
4283
4284drop:
4285 sd->dropped++;
4286 rps_unlock(sd);
4287
4288 local_irq_restore(flags);
4289
4290 atomic_long_inc(&skb->dev->rx_dropped);
4291 kfree_skb(skb);
4292 return NET_RX_DROP;
4293}
4294
4295static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4296{
4297 struct net_device *dev = skb->dev;
4298 struct netdev_rx_queue *rxqueue;
4299
4300 rxqueue = dev->_rx;
4301
4302 if (skb_rx_queue_recorded(skb)) {
4303 u16 index = skb_get_rx_queue(skb);
4304
4305 if (unlikely(index >= dev->real_num_rx_queues)) {
4306 WARN_ONCE(dev->real_num_rx_queues > 1,
4307 "%s received packet on queue %u, but number "
4308 "of RX queues is %u\n",
4309 dev->name, index, dev->real_num_rx_queues);
4310
4311 return rxqueue;
4312 }
4313 rxqueue += index;
4314 }
4315 return rxqueue;
4316}
4317
4318static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4319 struct xdp_buff *xdp,
4320 struct bpf_prog *xdp_prog)
4321{
4322 struct netdev_rx_queue *rxqueue;
4323 void *orig_data, *orig_data_end;
4324 u32 metalen, act = XDP_DROP;
4325 __be16 orig_eth_type;
4326 struct ethhdr *eth;
4327 bool orig_bcast;
4328 int hlen, off;
4329 u32 mac_len;
4330
4331
4332
4333
4334 if (skb_cloned(skb) || skb_is_tc_redirected(skb))
4335 return XDP_PASS;
4336
4337
4338
4339
4340
4341 if (skb_is_nonlinear(skb) ||
4342 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4343 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4344 int troom = skb->tail + skb->data_len - skb->end;
4345
4346
4347
4348
4349 if (pskb_expand_head(skb,
4350 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4351 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4352 goto do_drop;
4353 if (skb_linearize(skb))
4354 goto do_drop;
4355 }
4356
4357
4358
4359
4360 mac_len = skb->data - skb_mac_header(skb);
4361 hlen = skb_headlen(skb) + mac_len;
4362 xdp->data = skb->data - mac_len;
4363 xdp->data_meta = xdp->data;
4364 xdp->data_end = xdp->data + hlen;
4365 xdp->data_hard_start = skb->data - skb_headroom(skb);
4366 orig_data_end = xdp->data_end;
4367 orig_data = xdp->data;
4368 eth = (struct ethhdr *)xdp->data;
4369 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4370 orig_eth_type = eth->h_proto;
4371
4372 rxqueue = netif_get_rxqueue(skb);
4373 xdp->rxq = &rxqueue->xdp_rxq;
4374
4375 act = bpf_prog_run_xdp(xdp_prog, xdp);
4376
4377
4378 off = xdp->data - orig_data;
4379 if (off) {
4380 if (off > 0)
4381 __skb_pull(skb, off);
4382 else if (off < 0)
4383 __skb_push(skb, -off);
4384
4385 skb->mac_header += off;
4386 skb_reset_network_header(skb);
4387 }
4388
4389
4390
4391
4392 off = orig_data_end - xdp->data_end;
4393 if (off != 0) {
4394 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4395 skb->len -= off;
4396
4397 }
4398
4399
4400 eth = (struct ethhdr *)xdp->data;
4401 if ((orig_eth_type != eth->h_proto) ||
4402 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4403 __skb_push(skb, ETH_HLEN);
4404 skb->protocol = eth_type_trans(skb, skb->dev);
4405 }
4406
4407 switch (act) {
4408 case XDP_REDIRECT:
4409 case XDP_TX:
4410 __skb_push(skb, mac_len);
4411 break;
4412 case XDP_PASS:
4413 metalen = xdp->data - xdp->data_meta;
4414 if (metalen)
4415 skb_metadata_set(skb, metalen);
4416 break;
4417 default:
4418 bpf_warn_invalid_xdp_action(act);
4419
4420 case XDP_ABORTED:
4421 trace_xdp_exception(skb->dev, xdp_prog, act);
4422
4423 case XDP_DROP:
4424 do_drop:
4425 kfree_skb(skb);
4426 break;
4427 }
4428
4429 return act;
4430}
4431
4432
4433
4434
4435void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4436{
4437 struct net_device *dev = skb->dev;
4438 struct netdev_queue *txq;
4439 bool free_skb = true;
4440 int cpu, rc;
4441
4442 txq = netdev_core_pick_tx(dev, skb, NULL);
4443 cpu = smp_processor_id();
4444 HARD_TX_LOCK(dev, txq, cpu);
4445 if (!netif_xmit_stopped(txq)) {
4446 rc = netdev_start_xmit(skb, dev, txq, 0);
4447 if (dev_xmit_complete(rc))
4448 free_skb = false;
4449 }
4450 HARD_TX_UNLOCK(dev, txq);
4451 if (free_skb) {
4452 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4453 kfree_skb(skb);
4454 }
4455}
4456EXPORT_SYMBOL_GPL(generic_xdp_tx);
4457
4458static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4459
4460int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4461{
4462 if (xdp_prog) {
4463 struct xdp_buff xdp;
4464 u32 act;
4465 int err;
4466
4467 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4468 if (act != XDP_PASS) {
4469 switch (act) {
4470 case XDP_REDIRECT:
4471 err = xdp_do_generic_redirect(skb->dev, skb,
4472 &xdp, xdp_prog);
4473 if (err)
4474 goto out_redir;
4475 break;
4476 case XDP_TX:
4477 generic_xdp_tx(skb, xdp_prog);
4478 break;
4479 }
4480 return XDP_DROP;
4481 }
4482 }
4483 return XDP_PASS;
4484out_redir:
4485 kfree_skb(skb);
4486 return XDP_DROP;
4487}
4488EXPORT_SYMBOL_GPL(do_xdp_generic);
4489
4490static int netif_rx_internal(struct sk_buff *skb)
4491{
4492 int ret;
4493
4494 net_timestamp_check(netdev_tstamp_prequeue, skb);
4495
4496 trace_netif_rx(skb);
4497
4498#ifdef CONFIG_RPS
4499 if (static_branch_unlikely(&rps_needed)) {
4500 struct rps_dev_flow voidflow, *rflow = &voidflow;
4501 int cpu;
4502
4503 preempt_disable();
4504 rcu_read_lock();
4505
4506 cpu = get_rps_cpu(skb->dev, skb, &rflow);
4507 if (cpu < 0)
4508 cpu = smp_processor_id();
4509
4510 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4511
4512 rcu_read_unlock();
4513 preempt_enable();
4514 } else
4515#endif
4516 {
4517 unsigned int qtail;
4518
4519 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4520 put_cpu();
4521 }
4522 return ret;
4523}
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540int netif_rx(struct sk_buff *skb)
4541{
4542 int ret;
4543
4544 trace_netif_rx_entry(skb);
4545
4546 ret = netif_rx_internal(skb);
4547 trace_netif_rx_exit(ret);
4548
4549 return ret;
4550}
4551EXPORT_SYMBOL(netif_rx);
4552
4553int netif_rx_ni(struct sk_buff *skb)
4554{
4555 int err;
4556
4557 trace_netif_rx_ni_entry(skb);
4558
4559 preempt_disable();
4560 err = netif_rx_internal(skb);
4561 if (local_softirq_pending())
4562 do_softirq();
4563 preempt_enable();
4564 trace_netif_rx_ni_exit(err);
4565
4566 return err;
4567}
4568EXPORT_SYMBOL(netif_rx_ni);
4569
4570static __latent_entropy void net_tx_action(struct softirq_action *h)
4571{
4572 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4573
4574 if (sd->completion_queue) {
4575 struct sk_buff *clist;
4576
4577 local_irq_disable();
4578 clist = sd->completion_queue;
4579 sd->completion_queue = NULL;
4580 local_irq_enable();
4581
4582 while (clist) {
4583 struct sk_buff *skb = clist;
4584
4585 clist = clist->next;
4586
4587 WARN_ON(refcount_read(&skb->users));
4588 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4589 trace_consume_skb(skb);
4590 else
4591 trace_kfree_skb(skb, net_tx_action);
4592
4593 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4594 __kfree_skb(skb);
4595 else
4596 __kfree_skb_defer(skb);
4597 }
4598
4599 __kfree_skb_flush();
4600 }
4601
4602 if (sd->output_queue) {
4603 struct Qdisc *head;
4604
4605 local_irq_disable();
4606 head = sd->output_queue;
4607 sd->output_queue = NULL;
4608 sd->output_queue_tailp = &sd->output_queue;
4609 local_irq_enable();
4610
4611 while (head) {
4612 struct Qdisc *q = head;
4613 spinlock_t *root_lock = NULL;
4614
4615 head = head->next_sched;
4616
4617 if (!(q->flags & TCQ_F_NOLOCK)) {
4618 root_lock = qdisc_lock(q);
4619 spin_lock(root_lock);
4620 }
4621
4622
4623
4624 smp_mb__before_atomic();
4625 clear_bit(__QDISC_STATE_SCHED, &q->state);
4626 qdisc_run(q);
4627 if (root_lock)
4628 spin_unlock(root_lock);
4629 }
4630 }
4631
4632 xfrm_dev_backlog(sd);
4633}
4634
4635#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4636
4637int (*br_fdb_test_addr_hook)(struct net_device *dev,
4638 unsigned char *addr) __read_mostly;
4639EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
4640#endif
4641
4642static inline struct sk_buff *
4643sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4644 struct net_device *orig_dev)
4645{
4646#ifdef CONFIG_NET_CLS_ACT
4647 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
4648 struct tcf_result cl_res;
4649
4650
4651
4652
4653
4654
4655 if (!miniq)
4656 return skb;
4657
4658 if (*pt_prev) {
4659 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4660 *pt_prev = NULL;
4661 }
4662
4663 qdisc_skb_cb(skb)->pkt_len = skb->len;
4664 skb->tc_at_ingress = 1;
4665 mini_qdisc_bstats_cpu_update(miniq, skb);
4666
4667 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
4668 case TC_ACT_OK:
4669 case TC_ACT_RECLASSIFY:
4670 skb->tc_index = TC_H_MIN(cl_res.classid);
4671 break;
4672 case TC_ACT_SHOT:
4673 mini_qdisc_qstats_cpu_drop(miniq);
4674 kfree_skb(skb);
4675 return NULL;
4676 case TC_ACT_STOLEN:
4677 case TC_ACT_QUEUED:
4678 case TC_ACT_TRAP:
4679 consume_skb(skb);
4680 return NULL;
4681 case TC_ACT_REDIRECT:
4682
4683
4684
4685
4686 __skb_push(skb, skb->mac_len);
4687 skb_do_redirect(skb);
4688 return NULL;
4689 case TC_ACT_CONSUMED:
4690 return NULL;
4691 default:
4692 break;
4693 }
4694#endif
4695 return skb;
4696}
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707bool netdev_is_rx_handler_busy(struct net_device *dev)
4708{
4709 ASSERT_RTNL();
4710 return dev && rtnl_dereference(dev->rx_handler);
4711}
4712EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728int netdev_rx_handler_register(struct net_device *dev,
4729 rx_handler_func_t *rx_handler,
4730 void *rx_handler_data)
4731{
4732 if (netdev_is_rx_handler_busy(dev))
4733 return -EBUSY;
4734
4735 if (dev->priv_flags & IFF_NO_RX_HANDLER)
4736 return -EINVAL;
4737
4738
4739 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
4740 rcu_assign_pointer(dev->rx_handler, rx_handler);
4741
4742 return 0;
4743}
4744EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754void netdev_rx_handler_unregister(struct net_device *dev)
4755{
4756
4757 ASSERT_RTNL();
4758 RCU_INIT_POINTER(dev->rx_handler, NULL);
4759
4760
4761
4762
4763 synchronize_net();
4764 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
4765}
4766EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4767
4768
4769
4770
4771
4772static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4773{
4774 switch (skb->protocol) {
4775 case htons(ETH_P_ARP):
4776 case htons(ETH_P_IP):
4777 case htons(ETH_P_IPV6):
4778 case htons(ETH_P_8021Q):
4779 case htons(ETH_P_8021AD):
4780 return true;
4781 default:
4782 return false;
4783 }
4784}
4785
4786static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4787 int *ret, struct net_device *orig_dev)
4788{
4789#ifdef CONFIG_NETFILTER_INGRESS
4790 if (nf_hook_ingress_active(skb)) {
4791 int ingress_retval;
4792
4793 if (*pt_prev) {
4794 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4795 *pt_prev = NULL;
4796 }
4797
4798 rcu_read_lock();
4799 ingress_retval = nf_hook_ingress(skb);
4800 rcu_read_unlock();
4801 return ingress_retval;
4802 }
4803#endif
4804 return 0;
4805}
4806
4807static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
4808 struct packet_type **ppt_prev)
4809{
4810 struct packet_type *ptype, *pt_prev;
4811 rx_handler_func_t *rx_handler;
4812 struct net_device *orig_dev;
4813 bool deliver_exact = false;
4814 int ret = NET_RX_DROP;
4815 __be16 type;
4816
4817 net_timestamp_check(!netdev_tstamp_prequeue, skb);
4818
4819 trace_netif_receive_skb(skb);
4820
4821 orig_dev = skb->dev;
4822
4823 skb_reset_network_header(skb);
4824 if (!skb_transport_header_was_set(skb))
4825 skb_reset_transport_header(skb);
4826 skb_reset_mac_len(skb);
4827
4828 pt_prev = NULL;
4829
4830another_round:
4831 skb->skb_iif = skb->dev->ifindex;
4832
4833 __this_cpu_inc(softnet_data.processed);
4834
4835 if (static_branch_unlikely(&generic_xdp_needed_key)) {
4836 int ret2;
4837
4838 preempt_disable();
4839 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4840 preempt_enable();
4841
4842 if (ret2 != XDP_PASS)
4843 return NET_RX_DROP;
4844 skb_reset_mac_len(skb);
4845 }
4846
4847 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4848 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4849 skb = skb_vlan_untag(skb);
4850 if (unlikely(!skb))
4851 goto out;
4852 }
4853
4854 if (skb_skip_tc_classify(skb))
4855 goto skip_classify;
4856
4857 if (pfmemalloc)
4858 goto skip_taps;
4859
4860 list_for_each_entry_rcu(ptype, &ptype_all, list) {
4861 if (pt_prev)
4862 ret = deliver_skb(skb, pt_prev, orig_dev);
4863 pt_prev = ptype;
4864 }
4865
4866 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4867 if (pt_prev)
4868 ret = deliver_skb(skb, pt_prev, orig_dev);
4869 pt_prev = ptype;
4870 }
4871
4872skip_taps:
4873#ifdef CONFIG_NET_INGRESS
4874 if (static_branch_unlikely(&ingress_needed_key)) {
4875 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4876 if (!skb)
4877 goto out;
4878
4879 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
4880 goto out;
4881 }
4882#endif
4883 skb_reset_tc(skb);
4884skip_classify:
4885 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4886 goto drop;
4887
4888 if (skb_vlan_tag_present(skb)) {
4889 if (pt_prev) {
4890 ret = deliver_skb(skb, pt_prev, orig_dev);
4891 pt_prev = NULL;
4892 }
4893 if (vlan_do_receive(&skb))
4894 goto another_round;
4895 else if (unlikely(!skb))
4896 goto out;
4897 }
4898
4899 rx_handler = rcu_dereference(skb->dev->rx_handler);
4900 if (rx_handler) {
4901 if (pt_prev) {
4902 ret = deliver_skb(skb, pt_prev, orig_dev);
4903 pt_prev = NULL;
4904 }
4905 switch (rx_handler(&skb)) {
4906 case RX_HANDLER_CONSUMED:
4907 ret = NET_RX_SUCCESS;
4908 goto out;
4909 case RX_HANDLER_ANOTHER:
4910 goto another_round;
4911 case RX_HANDLER_EXACT:
4912 deliver_exact = true;
4913 case RX_HANDLER_PASS:
4914 break;
4915 default:
4916 BUG();
4917 }
4918 }
4919
4920 if (unlikely(skb_vlan_tag_present(skb))) {
4921check_vlan_id:
4922 if (skb_vlan_tag_get_id(skb)) {
4923
4924
4925
4926 skb->pkt_type = PACKET_OTHERHOST;
4927 } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4928 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4929
4930
4931
4932
4933 __vlan_hwaccel_clear_tag(skb);
4934 skb = skb_vlan_untag(skb);
4935 if (unlikely(!skb))
4936 goto out;
4937 if (vlan_do_receive(&skb))
4938
4939
4940
4941 goto another_round;
4942 else if (unlikely(!skb))
4943 goto out;
4944 else
4945
4946
4947
4948
4949 goto check_vlan_id;
4950 }
4951
4952
4953
4954
4955 __vlan_hwaccel_clear_tag(skb);
4956 }
4957
4958 type = skb->protocol;
4959
4960
4961 if (likely(!deliver_exact)) {
4962 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4963 &ptype_base[ntohs(type) &
4964 PTYPE_HASH_MASK]);
4965 }
4966
4967 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4968 &orig_dev->ptype_specific);
4969
4970 if (unlikely(skb->dev != orig_dev)) {
4971 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4972 &skb->dev->ptype_specific);
4973 }
4974
4975 if (pt_prev) {
4976 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
4977 goto drop;
4978 *ppt_prev = pt_prev;
4979 } else {
4980drop:
4981 if (!deliver_exact)
4982 atomic_long_inc(&skb->dev->rx_dropped);
4983 else
4984 atomic_long_inc(&skb->dev->rx_nohandler);
4985 kfree_skb(skb);
4986
4987
4988
4989 ret = NET_RX_DROP;
4990 }
4991
4992out:
4993 return ret;
4994}
4995
4996static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
4997{
4998 struct net_device *orig_dev = skb->dev;
4999 struct packet_type *pt_prev = NULL;
5000 int ret;
5001
5002 ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
5003 if (pt_prev)
5004 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5005 skb->dev, pt_prev, orig_dev);
5006 return ret;
5007}
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024int netif_receive_skb_core(struct sk_buff *skb)
5025{
5026 int ret;
5027
5028 rcu_read_lock();
5029 ret = __netif_receive_skb_one_core(skb, false);
5030 rcu_read_unlock();
5031
5032 return ret;
5033}
5034EXPORT_SYMBOL(netif_receive_skb_core);
5035
5036static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5037 struct packet_type *pt_prev,
5038 struct net_device *orig_dev)
5039{
5040 struct sk_buff *skb, *next;
5041
5042 if (!pt_prev)
5043 return;
5044 if (list_empty(head))
5045 return;
5046 if (pt_prev->list_func != NULL)
5047 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5048 ip_list_rcv, head, pt_prev, orig_dev);
5049 else
5050 list_for_each_entry_safe(skb, next, head, list) {
5051 skb_list_del_init(skb);
5052 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5053 }
5054}
5055
5056static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5057{
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068 struct packet_type *pt_curr = NULL;
5069
5070 struct net_device *od_curr = NULL;
5071 struct list_head sublist;
5072 struct sk_buff *skb, *next;
5073
5074 INIT_LIST_HEAD(&sublist);
5075 list_for_each_entry_safe(skb, next, head, list) {
5076 struct net_device *orig_dev = skb->dev;
5077 struct packet_type *pt_prev = NULL;
5078
5079 skb_list_del_init(skb);
5080 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
5081 if (!pt_prev)
5082 continue;
5083 if (pt_curr != pt_prev || od_curr != orig_dev) {
5084
5085 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5086
5087 INIT_LIST_HEAD(&sublist);
5088 pt_curr = pt_prev;
5089 od_curr = orig_dev;
5090 }
5091 list_add_tail(&skb->list, &sublist);
5092 }
5093
5094
5095 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5096}
5097
5098static int __netif_receive_skb(struct sk_buff *skb)
5099{
5100 int ret;
5101
5102 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5103 unsigned int noreclaim_flag;
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114 noreclaim_flag = memalloc_noreclaim_save();
5115 ret = __netif_receive_skb_one_core(skb, true);
5116 memalloc_noreclaim_restore(noreclaim_flag);
5117 } else
5118 ret = __netif_receive_skb_one_core(skb, false);
5119
5120 return ret;
5121}
5122
5123static void __netif_receive_skb_list(struct list_head *head)
5124{
5125 unsigned long noreclaim_flag = 0;
5126 struct sk_buff *skb, *next;
5127 bool pfmemalloc = false;
5128
5129 list_for_each_entry_safe(skb, next, head, list) {
5130 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5131 struct list_head sublist;
5132
5133
5134 list_cut_before(&sublist, head, &skb->list);
5135 if (!list_empty(&sublist))
5136 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5137 pfmemalloc = !pfmemalloc;
5138
5139 if (pfmemalloc)
5140 noreclaim_flag = memalloc_noreclaim_save();
5141 else
5142 memalloc_noreclaim_restore(noreclaim_flag);
5143 }
5144 }
5145
5146 if (!list_empty(head))
5147 __netif_receive_skb_list_core(head, pfmemalloc);
5148
5149 if (pfmemalloc)
5150 memalloc_noreclaim_restore(noreclaim_flag);
5151}
5152
5153static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5154{
5155 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5156 struct bpf_prog *new = xdp->prog;
5157 int ret = 0;
5158
5159 switch (xdp->command) {
5160 case XDP_SETUP_PROG:
5161 rcu_assign_pointer(dev->xdp_prog, new);
5162 if (old)
5163 bpf_prog_put(old);
5164
5165 if (old && !new) {
5166 static_branch_dec(&generic_xdp_needed_key);
5167 } else if (new && !old) {
5168 static_branch_inc(&generic_xdp_needed_key);
5169 dev_disable_lro(dev);
5170 dev_disable_gro_hw(dev);
5171 }
5172 break;
5173
5174 case XDP_QUERY_PROG:
5175 xdp->prog_id = old ? old->aux->id : 0;
5176 break;
5177
5178 default:
5179 ret = -EINVAL;
5180 break;
5181 }
5182
5183 return ret;
5184}
5185
5186static int netif_receive_skb_internal(struct sk_buff *skb)
5187{
5188 int ret;
5189
5190 net_timestamp_check(netdev_tstamp_prequeue, skb);
5191
5192 if (skb_defer_rx_timestamp(skb))
5193 return NET_RX_SUCCESS;
5194
5195 rcu_read_lock();
5196#ifdef CONFIG_RPS
5197 if (static_branch_unlikely(&rps_needed)) {
5198 struct rps_dev_flow voidflow, *rflow = &voidflow;
5199 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5200
5201 if (cpu >= 0) {
5202 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5203 rcu_read_unlock();
5204 return ret;
5205 }
5206 }
5207#endif
5208 ret = __netif_receive_skb(skb);
5209 rcu_read_unlock();
5210 return ret;
5211}
5212
5213static void netif_receive_skb_list_internal(struct list_head *head)
5214{
5215 struct sk_buff *skb, *next;
5216 struct list_head sublist;
5217
5218 INIT_LIST_HEAD(&sublist);
5219 list_for_each_entry_safe(skb, next, head, list) {
5220 net_timestamp_check(netdev_tstamp_prequeue, skb);
5221 skb_list_del_init(skb);
5222 if (!skb_defer_rx_timestamp(skb))
5223 list_add_tail(&skb->list, &sublist);
5224 }
5225 list_splice_init(&sublist, head);
5226
5227 rcu_read_lock();
5228#ifdef CONFIG_RPS
5229 if (static_branch_unlikely(&rps_needed)) {
5230 list_for_each_entry_safe(skb, next, head, list) {
5231 struct rps_dev_flow voidflow, *rflow = &voidflow;
5232 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5233
5234 if (cpu >= 0) {
5235
5236 skb_list_del_init(skb);
5237 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5238 }
5239 }
5240 }
5241#endif
5242 __netif_receive_skb_list(head);
5243 rcu_read_unlock();
5244}
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261int netif_receive_skb(struct sk_buff *skb)
5262{
5263 int ret;
5264
5265 trace_netif_receive_skb_entry(skb);
5266
5267 ret = netif_receive_skb_internal(skb);
5268 trace_netif_receive_skb_exit(ret);
5269
5270 return ret;
5271}
5272EXPORT_SYMBOL(netif_receive_skb);
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284void netif_receive_skb_list(struct list_head *head)
5285{
5286 struct sk_buff *skb;
5287
5288 if (list_empty(head))
5289 return;
5290 if (trace_netif_receive_skb_list_entry_enabled()) {
5291 list_for_each_entry(skb, head, list)
5292 trace_netif_receive_skb_list_entry(skb);
5293 }
5294 netif_receive_skb_list_internal(head);
5295 trace_netif_receive_skb_list_exit(0);
5296}
5297EXPORT_SYMBOL(netif_receive_skb_list);
5298
5299DEFINE_PER_CPU(struct work_struct, flush_works);
5300
5301
5302static void flush_backlog(struct work_struct *work)
5303{
5304 struct sk_buff *skb, *tmp;
5305 struct softnet_data *sd;
5306
5307 local_bh_disable();
5308 sd = this_cpu_ptr(&softnet_data);
5309
5310 local_irq_disable();
5311 rps_lock(sd);
5312 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5313 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5314 __skb_unlink(skb, &sd->input_pkt_queue);
5315 kfree_skb(skb);
5316 input_queue_head_incr(sd);
5317 }
5318 }
5319 rps_unlock(sd);
5320 local_irq_enable();
5321
5322 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5323 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5324 __skb_unlink(skb, &sd->process_queue);
5325 kfree_skb(skb);
5326 input_queue_head_incr(sd);
5327 }
5328 }
5329 local_bh_enable();
5330}
5331
5332static void flush_all_backlogs(void)
5333{
5334 unsigned int cpu;
5335
5336 get_online_cpus();
5337
5338 for_each_online_cpu(cpu)
5339 queue_work_on(cpu, system_highpri_wq,
5340 per_cpu_ptr(&flush_works, cpu));
5341
5342 for_each_online_cpu(cpu)
5343 flush_work(per_cpu_ptr(&flush_works, cpu));
5344
5345 put_online_cpus();
5346}
5347
5348INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
5349INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
5350static int napi_gro_complete(struct sk_buff *skb)
5351{
5352 struct packet_offload *ptype;
5353 __be16 type = skb->protocol;
5354 struct list_head *head = &offload_base;
5355 int err = -ENOENT;
5356
5357 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5358
5359 if (NAPI_GRO_CB(skb)->count == 1) {
5360 skb_shinfo(skb)->gso_size = 0;
5361 goto out;
5362 }
5363
5364 rcu_read_lock();
5365 list_for_each_entry_rcu(ptype, head, list) {
5366 if (ptype->type != type || !ptype->callbacks.gro_complete)
5367 continue;
5368
5369 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5370 ipv6_gro_complete, inet_gro_complete,
5371 skb, 0);
5372 break;
5373 }
5374 rcu_read_unlock();
5375
5376 if (err) {
5377 WARN_ON(&ptype->list == head);
5378 kfree_skb(skb);
5379 return NET_RX_SUCCESS;
5380 }
5381
5382out:
5383 return netif_receive_skb_internal(skb);
5384}
5385
5386static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5387 bool flush_old)
5388{
5389 struct list_head *head = &napi->gro_hash[index].list;
5390 struct sk_buff *skb, *p;
5391
5392 list_for_each_entry_safe_reverse(skb, p, head, list) {
5393 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5394 return;
5395 skb_list_del_init(skb);
5396 napi_gro_complete(skb);
5397 napi->gro_hash[index].count--;
5398 }
5399
5400 if (!napi->gro_hash[index].count)
5401 __clear_bit(index, &napi->gro_bitmask);
5402}
5403
5404
5405
5406
5407
5408void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5409{
5410 unsigned long bitmask = napi->gro_bitmask;
5411 unsigned int i, base = ~0U;
5412
5413 while ((i = ffs(bitmask)) != 0) {
5414 bitmask >>= i;
5415 base += i;
5416 __napi_gro_flush_chain(napi, base, flush_old);
5417 }
5418}
5419EXPORT_SYMBOL(napi_gro_flush);
5420
5421static struct list_head *gro_list_prepare(struct napi_struct *napi,
5422 struct sk_buff *skb)
5423{
5424 unsigned int maclen = skb->dev->hard_header_len;
5425 u32 hash = skb_get_hash_raw(skb);
5426 struct list_head *head;
5427 struct sk_buff *p;
5428
5429 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
5430 list_for_each_entry(p, head, list) {
5431 unsigned long diffs;
5432
5433 NAPI_GRO_CB(p)->flush = 0;
5434
5435 if (hash != skb_get_hash_raw(p)) {
5436 NAPI_GRO_CB(p)->same_flow = 0;
5437 continue;
5438 }
5439
5440 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
5441 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
5442 if (skb_vlan_tag_present(p))
5443 diffs |= p->vlan_tci ^ skb->vlan_tci;
5444 diffs |= skb_metadata_dst_cmp(p, skb);
5445 diffs |= skb_metadata_differs(p, skb);
5446 if (maclen == ETH_HLEN)
5447 diffs |= compare_ether_header(skb_mac_header(p),
5448 skb_mac_header(skb));
5449 else if (!diffs)
5450 diffs = memcmp(skb_mac_header(p),
5451 skb_mac_header(skb),
5452 maclen);
5453 NAPI_GRO_CB(p)->same_flow = !diffs;
5454 }
5455
5456 return head;
5457}
5458
5459static void skb_gro_reset_offset(struct sk_buff *skb)
5460{
5461 const struct skb_shared_info *pinfo = skb_shinfo(skb);
5462 const skb_frag_t *frag0 = &pinfo->frags[0];
5463
5464 NAPI_GRO_CB(skb)->data_offset = 0;
5465 NAPI_GRO_CB(skb)->frag0 = NULL;
5466 NAPI_GRO_CB(skb)->frag0_len = 0;
5467
5468 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
5469 pinfo->nr_frags &&
5470 !PageHighMem(skb_frag_page(frag0))) {
5471 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
5472 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5473 skb_frag_size(frag0),
5474 skb->end - skb->tail);
5475 }
5476}
5477
5478static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5479{
5480 struct skb_shared_info *pinfo = skb_shinfo(skb);
5481
5482 BUG_ON(skb->end - skb->tail < grow);
5483
5484 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5485
5486 skb->data_len -= grow;
5487 skb->tail += grow;
5488
5489 pinfo->frags[0].page_offset += grow;
5490 skb_frag_size_sub(&pinfo->frags[0], grow);
5491
5492 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5493 skb_frag_unref(skb, 0);
5494 memmove(pinfo->frags, pinfo->frags + 1,
5495 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
5496 }
5497}
5498
5499static void gro_flush_oldest(struct list_head *head)
5500{
5501 struct sk_buff *oldest;
5502
5503 oldest = list_last_entry(head, struct sk_buff, list);
5504
5505
5506
5507
5508 if (WARN_ON_ONCE(!oldest))
5509 return;
5510
5511
5512
5513
5514 skb_list_del_init(oldest);
5515 napi_gro_complete(oldest);
5516}
5517
5518INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
5519 struct sk_buff *));
5520INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
5521 struct sk_buff *));
5522static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5523{
5524 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
5525 struct list_head *head = &offload_base;
5526 struct packet_offload *ptype;
5527 __be16 type = skb->protocol;
5528 struct list_head *gro_head;
5529 struct sk_buff *pp = NULL;
5530 enum gro_result ret;
5531 int same_flow;
5532 int grow;
5533
5534 if (netif_elide_gro(skb->dev))
5535 goto normal;
5536
5537 gro_head = gro_list_prepare(napi, skb);
5538
5539 rcu_read_lock();
5540 list_for_each_entry_rcu(ptype, head, list) {
5541 if (ptype->type != type || !ptype->callbacks.gro_receive)
5542 continue;
5543
5544 skb_set_network_header(skb, skb_gro_offset(skb));
5545 skb_reset_mac_len(skb);
5546 NAPI_GRO_CB(skb)->same_flow = 0;
5547 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5548 NAPI_GRO_CB(skb)->free = 0;
5549 NAPI_GRO_CB(skb)->encap_mark = 0;
5550 NAPI_GRO_CB(skb)->recursion_counter = 0;
5551 NAPI_GRO_CB(skb)->is_fou = 0;
5552 NAPI_GRO_CB(skb)->is_atomic = 1;
5553 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
5554
5555
5556 switch (skb->ip_summed) {
5557 case CHECKSUM_COMPLETE:
5558 NAPI_GRO_CB(skb)->csum = skb->csum;
5559 NAPI_GRO_CB(skb)->csum_valid = 1;
5560 NAPI_GRO_CB(skb)->csum_cnt = 0;
5561 break;
5562 case CHECKSUM_UNNECESSARY:
5563 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5564 NAPI_GRO_CB(skb)->csum_valid = 0;
5565 break;
5566 default:
5567 NAPI_GRO_CB(skb)->csum_cnt = 0;
5568 NAPI_GRO_CB(skb)->csum_valid = 0;
5569 }
5570
5571 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
5572 ipv6_gro_receive, inet_gro_receive,
5573 gro_head, skb);
5574 break;
5575 }
5576 rcu_read_unlock();
5577
5578 if (&ptype->list == head)
5579 goto normal;
5580
5581 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
5582 ret = GRO_CONSUMED;
5583 goto ok;
5584 }
5585
5586 same_flow = NAPI_GRO_CB(skb)->same_flow;
5587 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
5588
5589 if (pp) {
5590 skb_list_del_init(pp);
5591 napi_gro_complete(pp);
5592 napi->gro_hash[hash].count--;
5593 }
5594
5595 if (same_flow)
5596 goto ok;
5597
5598 if (NAPI_GRO_CB(skb)->flush)
5599 goto normal;
5600
5601 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
5602 gro_flush_oldest(gro_head);
5603 } else {
5604 napi->gro_hash[hash].count++;
5605 }
5606 NAPI_GRO_CB(skb)->count = 1;
5607 NAPI_GRO_CB(skb)->age = jiffies;
5608 NAPI_GRO_CB(skb)->last = skb;
5609 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
5610 list_add(&skb->list, gro_head);
5611 ret = GRO_HELD;
5612
5613pull:
5614 grow = skb_gro_offset(skb) - skb_headlen(skb);
5615 if (grow > 0)
5616 gro_pull_from_frag0(skb, grow);
5617ok:
5618 if (napi->gro_hash[hash].count) {
5619 if (!test_bit(hash, &napi->gro_bitmask))
5620 __set_bit(hash, &napi->gro_bitmask);
5621 } else if (test_bit(hash, &napi->gro_bitmask)) {
5622 __clear_bit(hash, &napi->gro_bitmask);
5623 }
5624
5625 return ret;
5626
5627normal:
5628 ret = GRO_NORMAL;
5629 goto pull;
5630}
5631
5632struct packet_offload *gro_find_receive_by_type(__be16 type)
5633{
5634 struct list_head *offload_head = &offload_base;
5635 struct packet_offload *ptype;
5636
5637 list_for_each_entry_rcu(ptype, offload_head, list) {
5638 if (ptype->type != type || !ptype->callbacks.gro_receive)
5639 continue;
5640 return ptype;
5641 }
5642 return NULL;
5643}
5644EXPORT_SYMBOL(gro_find_receive_by_type);
5645
5646struct packet_offload *gro_find_complete_by_type(__be16 type)
5647{
5648 struct list_head *offload_head = &offload_base;
5649 struct packet_offload *ptype;
5650
5651 list_for_each_entry_rcu(ptype, offload_head, list) {
5652 if (ptype->type != type || !ptype->callbacks.gro_complete)
5653 continue;
5654 return ptype;
5655 }
5656 return NULL;
5657}
5658EXPORT_SYMBOL(gro_find_complete_by_type);
5659
5660static void napi_skb_free_stolen_head(struct sk_buff *skb)
5661{
5662 skb_dst_drop(skb);
5663 secpath_reset(skb);
5664 kmem_cache_free(skbuff_head_cache, skb);
5665}
5666
5667static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5668{
5669 switch (ret) {
5670 case GRO_NORMAL:
5671 if (netif_receive_skb_internal(skb))
5672 ret = GRO_DROP;
5673 break;
5674
5675 case GRO_DROP:
5676 kfree_skb(skb);
5677 break;
5678
5679 case GRO_MERGED_FREE:
5680 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5681 napi_skb_free_stolen_head(skb);
5682 else
5683 __kfree_skb(skb);
5684 break;
5685
5686 case GRO_HELD:
5687 case GRO_MERGED:
5688 case GRO_CONSUMED:
5689 break;
5690 }
5691
5692 return ret;
5693}
5694
5695gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5696{
5697 gro_result_t ret;
5698
5699 skb_mark_napi_id(skb, napi);
5700 trace_napi_gro_receive_entry(skb);
5701
5702 skb_gro_reset_offset(skb);
5703
5704 ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
5705 trace_napi_gro_receive_exit(ret);
5706
5707 return ret;
5708}
5709EXPORT_SYMBOL(napi_gro_receive);
5710
5711static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
5712{
5713 if (unlikely(skb->pfmemalloc)) {
5714 consume_skb(skb);
5715 return;
5716 }
5717 __skb_pull(skb, skb_headlen(skb));
5718
5719 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
5720 __vlan_hwaccel_clear_tag(skb);
5721 skb->dev = napi->dev;
5722 skb->skb_iif = 0;
5723
5724
5725 skb->pkt_type = PACKET_HOST;
5726
5727 skb->encapsulation = 0;
5728 skb_shinfo(skb)->gso_type = 0;
5729 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5730 secpath_reset(skb);
5731
5732 napi->skb = skb;
5733}
5734
5735struct sk_buff *napi_get_frags(struct napi_struct *napi)
5736{
5737 struct sk_buff *skb = napi->skb;
5738
5739 if (!skb) {
5740 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
5741 if (skb) {
5742 napi->skb = skb;
5743 skb_mark_napi_id(skb, napi);
5744 }
5745 }
5746 return skb;
5747}
5748EXPORT_SYMBOL(napi_get_frags);
5749
5750static gro_result_t napi_frags_finish(struct napi_struct *napi,
5751 struct sk_buff *skb,
5752 gro_result_t ret)
5753{
5754 switch (ret) {
5755 case GRO_NORMAL:
5756 case GRO_HELD:
5757 __skb_push(skb, ETH_HLEN);
5758 skb->protocol = eth_type_trans(skb, skb->dev);
5759 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
5760 ret = GRO_DROP;
5761 break;
5762
5763 case GRO_DROP:
5764 napi_reuse_skb(napi, skb);
5765 break;
5766
5767 case GRO_MERGED_FREE:
5768 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5769 napi_skb_free_stolen_head(skb);
5770 else
5771 napi_reuse_skb(napi, skb);
5772 break;
5773
5774 case GRO_MERGED:
5775 case GRO_CONSUMED:
5776 break;
5777 }
5778
5779 return ret;
5780}
5781
5782
5783
5784
5785
5786static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
5787{
5788 struct sk_buff *skb = napi->skb;
5789 const struct ethhdr *eth;
5790 unsigned int hlen = sizeof(*eth);
5791
5792 napi->skb = NULL;
5793
5794 skb_reset_mac_header(skb);
5795 skb_gro_reset_offset(skb);
5796
5797 if (unlikely(skb_gro_header_hard(skb, hlen))) {
5798 eth = skb_gro_header_slow(skb, hlen, 0);
5799 if (unlikely(!eth)) {
5800 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5801 __func__, napi->dev->name);
5802 napi_reuse_skb(napi, skb);
5803 return NULL;
5804 }
5805 } else {
5806 eth = (const struct ethhdr *)skb->data;
5807 gro_pull_from_frag0(skb, hlen);
5808 NAPI_GRO_CB(skb)->frag0 += hlen;
5809 NAPI_GRO_CB(skb)->frag0_len -= hlen;
5810 }
5811 __skb_pull(skb, hlen);
5812
5813
5814
5815
5816
5817
5818 skb->protocol = eth->h_proto;
5819
5820 return skb;
5821}
5822
5823gro_result_t napi_gro_frags(struct napi_struct *napi)
5824{
5825 gro_result_t ret;
5826 struct sk_buff *skb = napi_frags_skb(napi);
5827
5828 if (!skb)
5829 return GRO_DROP;
5830
5831 trace_napi_gro_frags_entry(skb);
5832
5833 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5834 trace_napi_gro_frags_exit(ret);
5835
5836 return ret;
5837}
5838EXPORT_SYMBOL(napi_gro_frags);
5839
5840
5841
5842
5843__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5844{
5845 __wsum wsum;
5846 __sum16 sum;
5847
5848 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5849
5850
5851 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5852
5853 if (likely(!sum)) {
5854 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5855 !skb->csum_complete_sw)
5856 netdev_rx_csum_fault(skb->dev, skb);
5857 }
5858
5859 NAPI_GRO_CB(skb)->csum = wsum;
5860 NAPI_GRO_CB(skb)->csum_valid = 1;
5861
5862 return sum;
5863}
5864EXPORT_SYMBOL(__skb_gro_checksum_complete);
5865
5866static void net_rps_send_ipi(struct softnet_data *remsd)
5867{
5868#ifdef CONFIG_RPS
5869 while (remsd) {
5870 struct softnet_data *next = remsd->rps_ipi_next;
5871
5872 if (cpu_online(remsd->cpu))
5873 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5874 remsd = next;
5875 }
5876#endif
5877}
5878
5879
5880
5881
5882
5883static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5884{
5885#ifdef CONFIG_RPS
5886 struct softnet_data *remsd = sd->rps_ipi_list;
5887
5888 if (remsd) {
5889 sd->rps_ipi_list = NULL;
5890
5891 local_irq_enable();
5892
5893
5894 net_rps_send_ipi(remsd);
5895 } else
5896#endif
5897 local_irq_enable();
5898}
5899
5900static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5901{
5902#ifdef CONFIG_RPS
5903 return sd->rps_ipi_list != NULL;
5904#else
5905 return false;
5906#endif
5907}
5908
5909static int process_backlog(struct napi_struct *napi, int quota)
5910{
5911 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5912 bool again = true;
5913 int work = 0;
5914
5915
5916
5917
5918 if (sd_has_rps_ipi_waiting(sd)) {
5919 local_irq_disable();
5920 net_rps_action_and_irq_enable(sd);
5921 }
5922
5923 napi->weight = dev_rx_weight;
5924 while (again) {
5925 struct sk_buff *skb;
5926
5927 while ((skb = __skb_dequeue(&sd->process_queue))) {
5928 rcu_read_lock();
5929 __netif_receive_skb(skb);
5930 rcu_read_unlock();
5931 input_queue_head_incr(sd);
5932 if (++work >= quota)
5933 return work;
5934
5935 }
5936
5937 local_irq_disable();
5938 rps_lock(sd);
5939 if (skb_queue_empty(&sd->input_pkt_queue)) {
5940
5941
5942
5943
5944
5945
5946
5947
5948 napi->state = 0;
5949 again = false;
5950 } else {
5951 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5952 &sd->process_queue);
5953 }
5954 rps_unlock(sd);
5955 local_irq_enable();
5956 }
5957
5958 return work;
5959}
5960
5961
5962
5963
5964
5965
5966
5967
5968void __napi_schedule(struct napi_struct *n)
5969{
5970 unsigned long flags;
5971
5972 local_irq_save(flags);
5973 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5974 local_irq_restore(flags);
5975}
5976EXPORT_SYMBOL(__napi_schedule);
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987bool napi_schedule_prep(struct napi_struct *n)
5988{
5989 unsigned long val, new;
5990
5991 do {
5992 val = READ_ONCE(n->state);
5993 if (unlikely(val & NAPIF_STATE_DISABLE))
5994 return false;
5995 new = val | NAPIF_STATE_SCHED;
5996
5997
5998
5999
6000
6001
6002
6003 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6004 NAPIF_STATE_MISSED;
6005 } while (cmpxchg(&n->state, val, new) != val);
6006
6007 return !(val & NAPIF_STATE_SCHED);
6008}
6009EXPORT_SYMBOL(napi_schedule_prep);
6010
6011
6012
6013
6014
6015
6016
6017void __napi_schedule_irqoff(struct napi_struct *n)
6018{
6019 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6020}
6021EXPORT_SYMBOL(__napi_schedule_irqoff);
6022
6023bool napi_complete_done(struct napi_struct *n, int work_done)
6024{
6025 unsigned long flags, val, new;
6026
6027
6028
6029
6030
6031
6032
6033 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6034 NAPIF_STATE_IN_BUSY_POLL)))
6035 return false;
6036
6037 if (n->gro_bitmask) {
6038 unsigned long timeout = 0;
6039
6040 if (work_done)
6041 timeout = n->dev->gro_flush_timeout;
6042
6043
6044
6045
6046
6047 napi_gro_flush(n, !!timeout);
6048 if (timeout)
6049 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6050 HRTIMER_MODE_REL_PINNED);
6051 }
6052 if (unlikely(!list_empty(&n->poll_list))) {
6053
6054 local_irq_save(flags);
6055 list_del_init(&n->poll_list);
6056 local_irq_restore(flags);
6057 }
6058
6059 do {
6060 val = READ_ONCE(n->state);
6061
6062 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6063
6064 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
6065
6066
6067
6068
6069
6070 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6071 NAPIF_STATE_SCHED;
6072 } while (cmpxchg(&n->state, val, new) != val);
6073
6074 if (unlikely(val & NAPIF_STATE_MISSED)) {
6075 __napi_schedule(n);
6076 return false;
6077 }
6078
6079 return true;
6080}
6081EXPORT_SYMBOL(napi_complete_done);
6082
6083
6084static struct napi_struct *napi_by_id(unsigned int napi_id)
6085{
6086 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6087 struct napi_struct *napi;
6088
6089 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6090 if (napi->napi_id == napi_id)
6091 return napi;
6092
6093 return NULL;
6094}
6095
6096#if defined(CONFIG_NET_RX_BUSY_POLL)
6097
6098#define BUSY_POLL_BUDGET 8
6099
6100static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
6101{
6102 int rc;
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113 clear_bit(NAPI_STATE_MISSED, &napi->state);
6114 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6115
6116 local_bh_disable();
6117
6118
6119
6120
6121 rc = napi->poll(napi, BUSY_POLL_BUDGET);
6122 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
6123 netpoll_poll_unlock(have_poll_lock);
6124 if (rc == BUSY_POLL_BUDGET)
6125 __napi_schedule(napi);
6126 local_bh_enable();
6127}
6128
6129void napi_busy_loop(unsigned int napi_id,
6130 bool (*loop_end)(void *, unsigned long),
6131 void *loop_end_arg)
6132{
6133 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6134 int (*napi_poll)(struct napi_struct *napi, int budget);
6135 void *have_poll_lock = NULL;
6136 struct napi_struct *napi;
6137
6138restart:
6139 napi_poll = NULL;
6140
6141 rcu_read_lock();
6142
6143 napi = napi_by_id(napi_id);
6144 if (!napi)
6145 goto out;
6146
6147 preempt_disable();
6148 for (;;) {
6149 int work = 0;
6150
6151 local_bh_disable();
6152 if (!napi_poll) {
6153 unsigned long val = READ_ONCE(napi->state);
6154
6155
6156
6157
6158 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6159 NAPIF_STATE_IN_BUSY_POLL))
6160 goto count;
6161 if (cmpxchg(&napi->state, val,
6162 val | NAPIF_STATE_IN_BUSY_POLL |
6163 NAPIF_STATE_SCHED) != val)
6164 goto count;
6165 have_poll_lock = netpoll_poll_lock(napi);
6166 napi_poll = napi->poll;
6167 }
6168 work = napi_poll(napi, BUSY_POLL_BUDGET);
6169 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
6170count:
6171 if (work > 0)
6172 __NET_ADD_STATS(dev_net(napi->dev),
6173 LINUX_MIB_BUSYPOLLRXPACKETS, work);
6174 local_bh_enable();
6175
6176 if (!loop_end || loop_end(loop_end_arg, start_time))
6177 break;
6178
6179 if (unlikely(need_resched())) {
6180 if (napi_poll)
6181 busy_poll_stop(napi, have_poll_lock);
6182 preempt_enable();
6183 rcu_read_unlock();
6184 cond_resched();
6185 if (loop_end(loop_end_arg, start_time))
6186 return;
6187 goto restart;
6188 }
6189 cpu_relax();
6190 }
6191 if (napi_poll)
6192 busy_poll_stop(napi, have_poll_lock);
6193 preempt_enable();
6194out:
6195 rcu_read_unlock();
6196}
6197EXPORT_SYMBOL(napi_busy_loop);
6198
6199#endif
6200
6201static void napi_hash_add(struct napi_struct *napi)
6202{
6203 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
6204 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
6205 return;
6206
6207 spin_lock(&napi_hash_lock);
6208
6209
6210 do {
6211 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6212 napi_gen_id = MIN_NAPI_ID;
6213 } while (napi_by_id(napi_gen_id));
6214 napi->napi_id = napi_gen_id;
6215
6216 hlist_add_head_rcu(&napi->napi_hash_node,
6217 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6218
6219 spin_unlock(&napi_hash_lock);
6220}
6221
6222
6223
6224
6225bool napi_hash_del(struct napi_struct *napi)
6226{
6227 bool rcu_sync_needed = false;
6228
6229 spin_lock(&napi_hash_lock);
6230
6231 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
6232 rcu_sync_needed = true;
6233 hlist_del_rcu(&napi->napi_hash_node);
6234 }
6235 spin_unlock(&napi_hash_lock);
6236 return rcu_sync_needed;
6237}
6238EXPORT_SYMBOL_GPL(napi_hash_del);
6239
6240static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6241{
6242 struct napi_struct *napi;
6243
6244 napi = container_of(timer, struct napi_struct, timer);
6245
6246
6247
6248
6249 if (napi->gro_bitmask && !napi_disable_pending(napi) &&
6250 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
6251 __napi_schedule_irqoff(napi);
6252
6253 return HRTIMER_NORESTART;
6254}
6255
6256static void init_gro_hash(struct napi_struct *napi)
6257{
6258 int i;
6259
6260 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6261 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6262 napi->gro_hash[i].count = 0;
6263 }
6264 napi->gro_bitmask = 0;
6265}
6266
6267void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6268 int (*poll)(struct napi_struct *, int), int weight)
6269{
6270 INIT_LIST_HEAD(&napi->poll_list);
6271 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6272 napi->timer.function = napi_watchdog;
6273 init_gro_hash(napi);
6274 napi->skb = NULL;
6275 napi->poll = poll;
6276 if (weight > NAPI_POLL_WEIGHT)
6277 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6278 weight);
6279 napi->weight = weight;
6280 list_add(&napi->dev_list, &dev->napi_list);
6281 napi->dev = dev;
6282#ifdef CONFIG_NETPOLL
6283 napi->poll_owner = -1;
6284#endif
6285 set_bit(NAPI_STATE_SCHED, &napi->state);
6286 napi_hash_add(napi);
6287}
6288EXPORT_SYMBOL(netif_napi_add);
6289
6290void napi_disable(struct napi_struct *n)
6291{
6292 might_sleep();
6293 set_bit(NAPI_STATE_DISABLE, &n->state);
6294
6295 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6296 msleep(1);
6297 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6298 msleep(1);
6299
6300 hrtimer_cancel(&n->timer);
6301
6302 clear_bit(NAPI_STATE_DISABLE, &n->state);
6303}
6304EXPORT_SYMBOL(napi_disable);
6305
6306static void flush_gro_hash(struct napi_struct *napi)
6307{
6308 int i;
6309
6310 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6311 struct sk_buff *skb, *n;
6312
6313 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6314 kfree_skb(skb);
6315 napi->gro_hash[i].count = 0;
6316 }
6317}
6318
6319
6320void netif_napi_del(struct napi_struct *napi)
6321{
6322 might_sleep();
6323 if (napi_hash_del(napi))
6324 synchronize_net();
6325 list_del_init(&napi->dev_list);
6326 napi_free_frags(napi);
6327
6328 flush_gro_hash(napi);
6329 napi->gro_bitmask = 0;
6330}
6331EXPORT_SYMBOL(netif_napi_del);
6332
6333static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6334{
6335 void *have;
6336 int work, weight;
6337
6338 list_del_init(&n->poll_list);
6339
6340 have = netpoll_poll_lock(n);
6341
6342 weight = n->weight;
6343
6344
6345
6346
6347
6348
6349
6350 work = 0;
6351 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6352 work = n->poll(n, weight);
6353 trace_napi_poll(n, work, weight);
6354 }
6355
6356 WARN_ON_ONCE(work > weight);
6357
6358 if (likely(work < weight))
6359 goto out_unlock;
6360
6361
6362
6363
6364
6365
6366 if (unlikely(napi_disable_pending(n))) {
6367 napi_complete(n);
6368 goto out_unlock;
6369 }
6370
6371 if (n->gro_bitmask) {
6372
6373
6374
6375 napi_gro_flush(n, HZ >= 1000);
6376 }
6377
6378
6379
6380
6381 if (unlikely(!list_empty(&n->poll_list))) {
6382 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6383 n->dev ? n->dev->name : "backlog");
6384 goto out_unlock;
6385 }
6386
6387 list_add_tail(&n->poll_list, repoll);
6388
6389out_unlock:
6390 netpoll_poll_unlock(have);
6391
6392 return work;
6393}
6394
6395static __latent_entropy void net_rx_action(struct softirq_action *h)
6396{
6397 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6398 unsigned long time_limit = jiffies +
6399 usecs_to_jiffies(netdev_budget_usecs);
6400 int budget = netdev_budget;
6401 LIST_HEAD(list);
6402 LIST_HEAD(repoll);
6403
6404 local_irq_disable();
6405 list_splice_init(&sd->poll_list, &list);
6406 local_irq_enable();
6407
6408 for (;;) {
6409 struct napi_struct *n;
6410
6411 if (list_empty(&list)) {
6412 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
6413 goto out;
6414 break;
6415 }
6416
6417 n = list_first_entry(&list, struct napi_struct, poll_list);
6418 budget -= napi_poll(n, &repoll);
6419
6420
6421
6422
6423
6424 if (unlikely(budget <= 0 ||
6425 time_after_eq(jiffies, time_limit))) {
6426 sd->time_squeeze++;
6427 break;
6428 }
6429 }
6430
6431 local_irq_disable();
6432
6433 list_splice_tail_init(&sd->poll_list, &list);
6434 list_splice_tail(&repoll, &list);
6435 list_splice(&list, &sd->poll_list);
6436 if (!list_empty(&sd->poll_list))
6437 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6438
6439 net_rps_action_and_irq_enable(sd);
6440out:
6441 __kfree_skb_flush();
6442}
6443
6444struct netdev_adjacent {
6445 struct net_device *dev;
6446
6447
6448 bool master;
6449
6450
6451 u16 ref_nr;
6452
6453
6454 void *private;
6455
6456 struct list_head list;
6457 struct rcu_head rcu;
6458};
6459
6460static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6461 struct list_head *adj_list)
6462{
6463 struct netdev_adjacent *adj;
6464
6465 list_for_each_entry(adj, adj_list, list) {
6466 if (adj->dev == adj_dev)
6467 return adj;
6468 }
6469 return NULL;
6470}
6471
6472static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6473{
6474 struct net_device *dev = data;
6475
6476 return upper_dev == dev;
6477}
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488bool netdev_has_upper_dev(struct net_device *dev,
6489 struct net_device *upper_dev)
6490{
6491 ASSERT_RTNL();
6492
6493 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6494 upper_dev);
6495}
6496EXPORT_SYMBOL(netdev_has_upper_dev);
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6509 struct net_device *upper_dev)
6510{
6511 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6512 upper_dev);
6513}
6514EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6515
6516
6517
6518
6519
6520
6521
6522
6523bool netdev_has_any_upper_dev(struct net_device *dev)
6524{
6525 ASSERT_RTNL();
6526
6527 return !list_empty(&dev->adj_list.upper);
6528}
6529EXPORT_SYMBOL(netdev_has_any_upper_dev);
6530
6531
6532
6533
6534
6535
6536
6537
6538struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6539{
6540 struct netdev_adjacent *upper;
6541
6542 ASSERT_RTNL();
6543
6544 if (list_empty(&dev->adj_list.upper))
6545 return NULL;
6546
6547 upper = list_first_entry(&dev->adj_list.upper,
6548 struct netdev_adjacent, list);
6549 if (likely(upper->master))
6550 return upper->dev;
6551 return NULL;
6552}
6553EXPORT_SYMBOL(netdev_master_upper_dev_get);
6554
6555
6556
6557
6558
6559
6560
6561
6562static bool netdev_has_any_lower_dev(struct net_device *dev)
6563{
6564 ASSERT_RTNL();
6565
6566 return !list_empty(&dev->adj_list.lower);
6567}
6568
6569void *netdev_adjacent_get_private(struct list_head *adj_list)
6570{
6571 struct netdev_adjacent *adj;
6572
6573 adj = list_entry(adj_list, struct netdev_adjacent, list);
6574
6575 return adj->private;
6576}
6577EXPORT_SYMBOL(netdev_adjacent_get_private);
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6588 struct list_head **iter)
6589{
6590 struct netdev_adjacent *upper;
6591
6592 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6593
6594 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6595
6596 if (&upper->list == &dev->adj_list.upper)
6597 return NULL;
6598
6599 *iter = &upper->list;
6600
6601 return upper->dev;
6602}
6603EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6604
6605static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6606 struct list_head **iter)
6607{
6608 struct netdev_adjacent *upper;
6609
6610 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6611
6612 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6613
6614 if (&upper->list == &dev->adj_list.upper)
6615 return NULL;
6616
6617 *iter = &upper->list;
6618
6619 return upper->dev;
6620}
6621
6622int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6623 int (*fn)(struct net_device *dev,
6624 void *data),
6625 void *data)
6626{
6627 struct net_device *udev;
6628 struct list_head *iter;
6629 int ret;
6630
6631 for (iter = &dev->adj_list.upper,
6632 udev = netdev_next_upper_dev_rcu(dev, &iter);
6633 udev;
6634 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
6635
6636 ret = fn(udev, data);
6637 if (ret)
6638 return ret;
6639
6640
6641 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
6642 if (ret)
6643 return ret;
6644 }
6645
6646 return 0;
6647}
6648EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6649
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661void *netdev_lower_get_next_private(struct net_device *dev,
6662 struct list_head **iter)
6663{
6664 struct netdev_adjacent *lower;
6665
6666 lower = list_entry(*iter, struct netdev_adjacent, list);
6667
6668 if (&lower->list == &dev->adj_list.lower)
6669 return NULL;
6670
6671 *iter = lower->list.next;
6672
6673 return lower->private;
6674}
6675EXPORT_SYMBOL(netdev_lower_get_next_private);
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687void *netdev_lower_get_next_private_rcu(struct net_device *dev,
6688 struct list_head **iter)
6689{
6690 struct netdev_adjacent *lower;
6691
6692 WARN_ON_ONCE(!rcu_read_lock_held());
6693
6694 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6695
6696 if (&lower->list == &dev->adj_list.lower)
6697 return NULL;
6698
6699 *iter = &lower->list;
6700
6701 return lower->private;
6702}
6703EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
6717{
6718 struct netdev_adjacent *lower;
6719
6720 lower = list_entry(*iter, struct netdev_adjacent, list);
6721
6722 if (&lower->list == &dev->adj_list.lower)
6723 return NULL;
6724
6725 *iter = lower->list.next;
6726
6727 return lower->dev;
6728}
6729EXPORT_SYMBOL(netdev_lower_get_next);
6730
6731static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6732 struct list_head **iter)
6733{
6734 struct netdev_adjacent *lower;
6735
6736 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
6737
6738 if (&lower->list == &dev->adj_list.lower)
6739 return NULL;
6740
6741 *iter = &lower->list;
6742
6743 return lower->dev;
6744}
6745
6746int netdev_walk_all_lower_dev(struct net_device *dev,
6747 int (*fn)(struct net_device *dev,
6748 void *data),
6749 void *data)
6750{
6751 struct net_device *ldev;
6752 struct list_head *iter;
6753 int ret;
6754
6755 for (iter = &dev->adj_list.lower,
6756 ldev = netdev_next_lower_dev(dev, &iter);
6757 ldev;
6758 ldev = netdev_next_lower_dev(dev, &iter)) {
6759
6760 ret = fn(ldev, data);
6761 if (ret)
6762 return ret;
6763
6764
6765 ret = netdev_walk_all_lower_dev(ldev, fn, data);
6766 if (ret)
6767 return ret;
6768 }
6769
6770 return 0;
6771}
6772EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6773
6774static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6775 struct list_head **iter)
6776{
6777 struct netdev_adjacent *lower;
6778
6779 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6780 if (&lower->list == &dev->adj_list.lower)
6781 return NULL;
6782
6783 *iter = &lower->list;
6784
6785 return lower->dev;
6786}
6787
6788int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6789 int (*fn)(struct net_device *dev,
6790 void *data),
6791 void *data)
6792{
6793 struct net_device *ldev;
6794 struct list_head *iter;
6795 int ret;
6796
6797 for (iter = &dev->adj_list.lower,
6798 ldev = netdev_next_lower_dev_rcu(dev, &iter);
6799 ldev;
6800 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6801
6802 ret = fn(ldev, data);
6803 if (ret)
6804 return ret;
6805
6806
6807 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6808 if (ret)
6809 return ret;
6810 }
6811
6812 return 0;
6813}
6814EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6815
6816
6817
6818
6819
6820
6821
6822
6823
6824
6825void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6826{
6827 struct netdev_adjacent *lower;
6828
6829 lower = list_first_or_null_rcu(&dev->adj_list.lower,
6830 struct netdev_adjacent, list);
6831 if (lower)
6832 return lower->private;
6833 return NULL;
6834}
6835EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6836
6837
6838
6839
6840
6841
6842
6843
6844struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6845{
6846 struct netdev_adjacent *upper;
6847
6848 upper = list_first_or_null_rcu(&dev->adj_list.upper,
6849 struct netdev_adjacent, list);
6850 if (upper && likely(upper->master))
6851 return upper->dev;
6852 return NULL;
6853}
6854EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6855
6856static int netdev_adjacent_sysfs_add(struct net_device *dev,
6857 struct net_device *adj_dev,
6858 struct list_head *dev_list)
6859{
6860 char linkname[IFNAMSIZ+7];
6861
6862 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6863 "upper_%s" : "lower_%s", adj_dev->name);
6864 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6865 linkname);
6866}
6867static void netdev_adjacent_sysfs_del(struct net_device *dev,
6868 char *name,
6869 struct list_head *dev_list)
6870{
6871 char linkname[IFNAMSIZ+7];
6872
6873 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6874 "upper_%s" : "lower_%s", name);
6875 sysfs_remove_link(&(dev->dev.kobj), linkname);
6876}
6877
6878static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6879 struct net_device *adj_dev,
6880 struct list_head *dev_list)
6881{
6882 return (dev_list == &dev->adj_list.upper ||
6883 dev_list == &dev->adj_list.lower) &&
6884 net_eq(dev_net(dev), dev_net(adj_dev));
6885}
6886
6887static int __netdev_adjacent_dev_insert(struct net_device *dev,
6888 struct net_device *adj_dev,
6889 struct list_head *dev_list,
6890 void *private, bool master)
6891{
6892 struct netdev_adjacent *adj;
6893 int ret;
6894
6895 adj = __netdev_find_adj(adj_dev, dev_list);
6896
6897 if (adj) {
6898 adj->ref_nr += 1;
6899 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6900 dev->name, adj_dev->name, adj->ref_nr);
6901
6902 return 0;
6903 }
6904
6905 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6906 if (!adj)
6907 return -ENOMEM;
6908
6909 adj->dev = adj_dev;
6910 adj->master = master;
6911 adj->ref_nr = 1;
6912 adj->private = private;
6913 dev_hold(adj_dev);
6914
6915 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6916 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
6917
6918 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
6919 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
6920 if (ret)
6921 goto free_adj;
6922 }
6923
6924
6925 if (master) {
6926 ret = sysfs_create_link(&(dev->dev.kobj),
6927 &(adj_dev->dev.kobj), "master");
6928 if (ret)
6929 goto remove_symlinks;
6930
6931 list_add_rcu(&adj->list, dev_list);
6932 } else {
6933 list_add_tail_rcu(&adj->list, dev_list);
6934 }
6935
6936 return 0;
6937
6938remove_symlinks:
6939 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6940 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6941free_adj:
6942 kfree(adj);
6943 dev_put(adj_dev);
6944
6945 return ret;
6946}
6947
6948static void __netdev_adjacent_dev_remove(struct net_device *dev,
6949 struct net_device *adj_dev,
6950 u16 ref_nr,
6951 struct list_head *dev_list)
6952{
6953 struct netdev_adjacent *adj;
6954
6955 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6956 dev->name, adj_dev->name, ref_nr);
6957
6958 adj = __netdev_find_adj(adj_dev, dev_list);
6959
6960 if (!adj) {
6961 pr_err("Adjacency does not exist for device %s from %s\n",
6962 dev->name, adj_dev->name);
6963 WARN_ON(1);
6964 return;
6965 }
6966
6967 if (adj->ref_nr > ref_nr) {
6968 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6969 dev->name, adj_dev->name, ref_nr,
6970 adj->ref_nr - ref_nr);
6971 adj->ref_nr -= ref_nr;
6972 return;
6973 }
6974
6975 if (adj->master)
6976 sysfs_remove_link(&(dev->dev.kobj), "master");
6977
6978 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6979 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6980
6981 list_del_rcu(&adj->list);
6982 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6983 adj_dev->name, dev->name, adj_dev->name);
6984 dev_put(adj_dev);
6985 kfree_rcu(adj, rcu);
6986}
6987
6988static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6989 struct net_device *upper_dev,
6990 struct list_head *up_list,
6991 struct list_head *down_list,
6992 void *private, bool master)
6993{
6994 int ret;
6995
6996 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
6997 private, master);
6998 if (ret)
6999 return ret;
7000
7001 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7002 private, false);
7003 if (ret) {
7004 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7005 return ret;
7006 }
7007
7008 return 0;
7009}
7010
7011static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7012 struct net_device *upper_dev,
7013 u16 ref_nr,
7014 struct list_head *up_list,
7015 struct list_head *down_list)
7016{
7017 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7018 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7019}
7020
7021static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7022 struct net_device *upper_dev,
7023 void *private, bool master)
7024{
7025 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7026 &dev->adj_list.upper,
7027 &upper_dev->adj_list.lower,
7028 private, master);
7029}
7030
7031static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7032 struct net_device *upper_dev)
7033{
7034 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7035 &dev->adj_list.upper,
7036 &upper_dev->adj_list.lower);
7037}
7038
7039static int __netdev_upper_dev_link(struct net_device *dev,
7040 struct net_device *upper_dev, bool master,
7041 void *upper_priv, void *upper_info,
7042 struct netlink_ext_ack *extack)
7043{
7044 struct netdev_notifier_changeupper_info changeupper_info = {
7045 .info = {
7046 .dev = dev,
7047 .extack = extack,
7048 },
7049 .upper_dev = upper_dev,
7050 .master = master,
7051 .linking = true,
7052 .upper_info = upper_info,
7053 };
7054 struct net_device *master_dev;
7055 int ret = 0;
7056
7057 ASSERT_RTNL();
7058
7059 if (dev == upper_dev)
7060 return -EBUSY;
7061
7062
7063 if (netdev_has_upper_dev(upper_dev, dev))
7064 return -EBUSY;
7065
7066 if (!master) {
7067 if (netdev_has_upper_dev(dev, upper_dev))
7068 return -EEXIST;
7069 } else {
7070 master_dev = netdev_master_upper_dev_get(dev);
7071 if (master_dev)
7072 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7073 }
7074
7075 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7076 &changeupper_info.info);
7077 ret = notifier_to_errno(ret);
7078 if (ret)
7079 return ret;
7080
7081 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7082 master);
7083 if (ret)
7084 return ret;
7085
7086 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7087 &changeupper_info.info);
7088 ret = notifier_to_errno(ret);
7089 if (ret)
7090 goto rollback;
7091
7092 return 0;
7093
7094rollback:
7095 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7096
7097 return ret;
7098}
7099
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109
7110
7111int netdev_upper_dev_link(struct net_device *dev,
7112 struct net_device *upper_dev,
7113 struct netlink_ext_ack *extack)
7114{
7115 return __netdev_upper_dev_link(dev, upper_dev, false,
7116 NULL, NULL, extack);
7117}
7118EXPORT_SYMBOL(netdev_upper_dev_link);
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134int netdev_master_upper_dev_link(struct net_device *dev,
7135 struct net_device *upper_dev,
7136 void *upper_priv, void *upper_info,
7137 struct netlink_ext_ack *extack)
7138{
7139 return __netdev_upper_dev_link(dev, upper_dev, true,
7140 upper_priv, upper_info, extack);
7141}
7142EXPORT_SYMBOL(netdev_master_upper_dev_link);
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152void netdev_upper_dev_unlink(struct net_device *dev,
7153 struct net_device *upper_dev)
7154{
7155 struct netdev_notifier_changeupper_info changeupper_info = {
7156 .info = {
7157 .dev = dev,
7158 },
7159 .upper_dev = upper_dev,
7160 .linking = false,
7161 };
7162
7163 ASSERT_RTNL();
7164
7165 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7166
7167 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7168 &changeupper_info.info);
7169
7170 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7171
7172 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7173 &changeupper_info.info);
7174}
7175EXPORT_SYMBOL(netdev_upper_dev_unlink);
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185void netdev_bonding_info_change(struct net_device *dev,
7186 struct netdev_bonding_info *bonding_info)
7187{
7188 struct netdev_notifier_bonding_info info = {
7189 .info.dev = dev,
7190 };
7191
7192 memcpy(&info.bonding_info, bonding_info,
7193 sizeof(struct netdev_bonding_info));
7194 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7195 &info.info);
7196}
7197EXPORT_SYMBOL(netdev_bonding_info_change);
7198
7199static void netdev_adjacent_add_links(struct net_device *dev)
7200{
7201 struct netdev_adjacent *iter;
7202
7203 struct net *net = dev_net(dev);
7204
7205 list_for_each_entry(iter, &dev->adj_list.upper, list) {
7206 if (!net_eq(net, dev_net(iter->dev)))
7207 continue;
7208 netdev_adjacent_sysfs_add(iter->dev, dev,
7209 &iter->dev->adj_list.lower);
7210 netdev_adjacent_sysfs_add(dev, iter->dev,
7211 &dev->adj_list.upper);
7212 }
7213
7214 list_for_each_entry(iter, &dev->adj_list.lower, list) {
7215 if (!net_eq(net, dev_net(iter->dev)))
7216 continue;
7217 netdev_adjacent_sysfs_add(iter->dev, dev,
7218 &iter->dev->adj_list.upper);
7219 netdev_adjacent_sysfs_add(dev, iter->dev,
7220 &dev->adj_list.lower);
7221 }
7222}
7223
7224static void netdev_adjacent_del_links(struct net_device *dev)
7225{
7226 struct netdev_adjacent *iter;
7227
7228 struct net *net = dev_net(dev);
7229
7230 list_for_each_entry(iter, &dev->adj_list.upper, list) {
7231 if (!net_eq(net, dev_net(iter->dev)))
7232 continue;
7233 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7234 &iter->dev->adj_list.lower);
7235 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7236 &dev->adj_list.upper);
7237 }
7238
7239 list_for_each_entry(iter, &dev->adj_list.lower, list) {
7240 if (!net_eq(net, dev_net(iter->dev)))
7241 continue;
7242 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7243 &iter->dev->adj_list.upper);
7244 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7245 &dev->adj_list.lower);
7246 }
7247}
7248
7249void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
7250{
7251 struct netdev_adjacent *iter;
7252
7253 struct net *net = dev_net(dev);
7254
7255 list_for_each_entry(iter, &dev->adj_list.upper, list) {
7256 if (!net_eq(net, dev_net(iter->dev)))
7257 continue;
7258 netdev_adjacent_sysfs_del(iter->dev, oldname,
7259 &iter->dev->adj_list.lower);
7260 netdev_adjacent_sysfs_add(iter->dev, dev,
7261 &iter->dev->adj_list.lower);
7262 }
7263
7264 list_for_each_entry(iter, &dev->adj_list.lower, list) {
7265 if (!net_eq(net, dev_net(iter->dev)))
7266 continue;
7267 netdev_adjacent_sysfs_del(iter->dev, oldname,
7268 &iter->dev->adj_list.upper);
7269 netdev_adjacent_sysfs_add(iter->dev, dev,
7270 &iter->dev->adj_list.upper);
7271 }
7272}
7273
7274void *netdev_lower_dev_get_private(struct net_device *dev,
7275 struct net_device *lower_dev)
7276{
7277 struct netdev_adjacent *lower;
7278
7279 if (!lower_dev)
7280 return NULL;
7281 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
7282 if (!lower)
7283 return NULL;
7284
7285 return lower->private;
7286}
7287EXPORT_SYMBOL(netdev_lower_dev_get_private);
7288
7289
7290int dev_get_nest_level(struct net_device *dev)
7291{
7292 struct net_device *lower = NULL;
7293 struct list_head *iter;
7294 int max_nest = -1;
7295 int nest;
7296
7297 ASSERT_RTNL();
7298
7299 netdev_for_each_lower_dev(dev, lower, iter) {
7300 nest = dev_get_nest_level(lower);
7301 if (max_nest < nest)
7302 max_nest = nest;
7303 }
7304
7305 return max_nest + 1;
7306}
7307EXPORT_SYMBOL(dev_get_nest_level);
7308
7309
7310
7311
7312
7313
7314
7315
7316
7317void netdev_lower_state_changed(struct net_device *lower_dev,
7318 void *lower_state_info)
7319{
7320 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
7321 .info.dev = lower_dev,
7322 };
7323
7324 ASSERT_RTNL();
7325 changelowerstate_info.lower_state_info = lower_state_info;
7326 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
7327 &changelowerstate_info.info);
7328}
7329EXPORT_SYMBOL(netdev_lower_state_changed);
7330
7331static void dev_change_rx_flags(struct net_device *dev, int flags)
7332{
7333 const struct net_device_ops *ops = dev->netdev_ops;
7334
7335 if (ops->ndo_change_rx_flags)
7336 ops->ndo_change_rx_flags(dev, flags);
7337}
7338
7339static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
7340{
7341 unsigned int old_flags = dev->flags;
7342 kuid_t uid;
7343 kgid_t gid;
7344
7345 ASSERT_RTNL();
7346
7347 dev->flags |= IFF_PROMISC;
7348 dev->promiscuity += inc;
7349 if (dev->promiscuity == 0) {
7350
7351
7352
7353
7354 if (inc < 0)
7355 dev->flags &= ~IFF_PROMISC;
7356 else {
7357 dev->promiscuity -= inc;
7358 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7359 dev->name);
7360 return -EOVERFLOW;
7361 }
7362 }
7363 if (dev->flags != old_flags) {
7364 pr_info("device %s %s promiscuous mode\n",
7365 dev->name,
7366 dev->flags & IFF_PROMISC ? "entered" : "left");
7367 if (audit_enabled) {
7368 current_uid_gid(&uid, &gid);
7369 audit_log(audit_context(), GFP_ATOMIC,
7370 AUDIT_ANOM_PROMISCUOUS,
7371 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7372 dev->name, (dev->flags & IFF_PROMISC),
7373 (old_flags & IFF_PROMISC),
7374 from_kuid(&init_user_ns, audit_get_loginuid(current)),
7375 from_kuid(&init_user_ns, uid),
7376 from_kgid(&init_user_ns, gid),
7377 audit_get_sessionid(current));
7378 }
7379
7380 dev_change_rx_flags(dev, IFF_PROMISC);
7381 }
7382 if (notify)
7383 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
7384 return 0;
7385}
7386
7387
7388
7389
7390
7391
7392
7393
7394
7395
7396
7397
7398int dev_set_promiscuity(struct net_device *dev, int inc)
7399{
7400 unsigned int old_flags = dev->flags;
7401 int err;
7402
7403 err = __dev_set_promiscuity(dev, inc, true);
7404 if (err < 0)
7405 return err;
7406 if (dev->flags != old_flags)
7407 dev_set_rx_mode(dev);
7408 return err;
7409}
7410EXPORT_SYMBOL(dev_set_promiscuity);
7411
7412static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
7413{
7414 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
7415
7416 ASSERT_RTNL();
7417
7418 dev->flags |= IFF_ALLMULTI;
7419 dev->allmulti += inc;
7420 if (dev->allmulti == 0) {
7421
7422
7423
7424
7425 if (inc < 0)
7426 dev->flags &= ~IFF_ALLMULTI;
7427 else {
7428 dev->allmulti -= inc;
7429 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
7430 dev->name);
7431 return -EOVERFLOW;
7432 }
7433 }
7434 if (dev->flags ^ old_flags) {
7435 dev_change_rx_flags(dev, IFF_ALLMULTI);
7436 dev_set_rx_mode(dev);
7437 if (notify)
7438 __dev_notify_flags(dev, old_flags,
7439 dev->gflags ^ old_gflags);
7440 }
7441 return 0;
7442}
7443
7444
7445
7446
7447
7448
7449
7450
7451
7452
7453
7454
7455
7456
7457int dev_set_allmulti(struct net_device *dev, int inc)
7458{
7459 return __dev_set_allmulti(dev, inc, true);
7460}
7461EXPORT_SYMBOL(dev_set_allmulti);
7462
7463
7464
7465
7466
7467
7468
7469void __dev_set_rx_mode(struct net_device *dev)
7470{
7471 const struct net_device_ops *ops = dev->netdev_ops;
7472
7473
7474 if (!(dev->flags&IFF_UP))
7475 return;
7476
7477 if (!netif_device_present(dev))
7478 return;
7479
7480 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
7481
7482
7483
7484 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
7485 __dev_set_promiscuity(dev, 1, false);
7486 dev->uc_promisc = true;
7487 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
7488 __dev_set_promiscuity(dev, -1, false);
7489 dev->uc_promisc = false;
7490 }
7491 }
7492
7493 if (ops->ndo_set_rx_mode)
7494 ops->ndo_set_rx_mode(dev);
7495}
7496
7497void dev_set_rx_mode(struct net_device *dev)
7498{
7499 netif_addr_lock_bh(dev);
7500 __dev_set_rx_mode(dev);
7501 netif_addr_unlock_bh(dev);
7502}
7503
7504
7505
7506
7507
7508
7509
7510unsigned int dev_get_flags(const struct net_device *dev)
7511{
7512 unsigned int flags;
7513
7514 flags = (dev->flags & ~(IFF_PROMISC |
7515 IFF_ALLMULTI |
7516 IFF_RUNNING |
7517 IFF_LOWER_UP |
7518 IFF_DORMANT)) |
7519 (dev->gflags & (IFF_PROMISC |
7520 IFF_ALLMULTI));
7521
7522 if (netif_running(dev)) {
7523 if (netif_oper_up(dev))
7524 flags |= IFF_RUNNING;
7525 if (netif_carrier_ok(dev))
7526 flags |= IFF_LOWER_UP;
7527 if (netif_dormant(dev))
7528 flags |= IFF_DORMANT;
7529 }
7530
7531 return flags;
7532}
7533EXPORT_SYMBOL(dev_get_flags);
7534
7535int __dev_change_flags(struct net_device *dev, unsigned int flags,
7536 struct netlink_ext_ack *extack)
7537{
7538 unsigned int old_flags = dev->flags;
7539 int ret;
7540
7541 ASSERT_RTNL();
7542
7543
7544
7545
7546
7547 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
7548 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
7549 IFF_AUTOMEDIA)) |
7550 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
7551 IFF_ALLMULTI));
7552
7553
7554
7555
7556
7557 if ((old_flags ^ flags) & IFF_MULTICAST)
7558 dev_change_rx_flags(dev, IFF_MULTICAST);
7559
7560 dev_set_rx_mode(dev);
7561
7562
7563
7564
7565
7566
7567
7568 ret = 0;
7569 if ((old_flags ^ flags) & IFF_UP) {
7570 if (old_flags & IFF_UP)
7571 __dev_close(dev);
7572 else
7573 ret = __dev_open(dev, extack);
7574 }
7575
7576 if ((flags ^ dev->gflags) & IFF_PROMISC) {
7577 int inc = (flags & IFF_PROMISC) ? 1 : -1;
7578 unsigned int old_flags = dev->flags;
7579
7580 dev->gflags ^= IFF_PROMISC;
7581
7582 if (__dev_set_promiscuity(dev, inc, false) >= 0)
7583 if (dev->flags != old_flags)
7584 dev_set_rx_mode(dev);
7585 }
7586
7587
7588
7589
7590
7591 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
7592 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
7593
7594 dev->gflags ^= IFF_ALLMULTI;
7595 __dev_set_allmulti(dev, inc, false);
7596 }
7597
7598 return ret;
7599}
7600
7601void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
7602 unsigned int gchanges)
7603{
7604 unsigned int changes = dev->flags ^ old_flags;
7605
7606 if (gchanges)
7607 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
7608
7609 if (changes & IFF_UP) {
7610 if (dev->flags & IFF_UP)
7611 call_netdevice_notifiers(NETDEV_UP, dev);
7612 else
7613 call_netdevice_notifiers(NETDEV_DOWN, dev);
7614 }
7615
7616 if (dev->flags & IFF_UP &&
7617 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
7618 struct netdev_notifier_change_info change_info = {
7619 .info = {
7620 .dev = dev,
7621 },
7622 .flags_changed = changes,
7623 };
7624
7625 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
7626 }
7627}
7628
7629
7630
7631
7632
7633
7634
7635
7636
7637
7638int dev_change_flags(struct net_device *dev, unsigned int flags,
7639 struct netlink_ext_ack *extack)
7640{
7641 int ret;
7642 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
7643
7644 ret = __dev_change_flags(dev, flags, extack);
7645 if (ret < 0)
7646 return ret;
7647
7648 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
7649 __dev_notify_flags(dev, old_flags, changes);
7650 return ret;
7651}
7652EXPORT_SYMBOL(dev_change_flags);
7653
7654int __dev_set_mtu(struct net_device *dev, int new_mtu)
7655{
7656 const struct net_device_ops *ops = dev->netdev_ops;
7657
7658 if (ops->ndo_change_mtu)
7659 return ops->ndo_change_mtu(dev, new_mtu);
7660
7661 dev->mtu = new_mtu;
7662 return 0;
7663}
7664EXPORT_SYMBOL(__dev_set_mtu);
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
7675 struct netlink_ext_ack *extack)
7676{
7677 int err, orig_mtu;
7678
7679 if (new_mtu == dev->mtu)
7680 return 0;
7681
7682
7683 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
7684 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
7685 return -EINVAL;
7686 }
7687
7688 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
7689 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
7690 return -EINVAL;
7691 }
7692
7693 if (!netif_device_present(dev))
7694 return -ENODEV;
7695
7696 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
7697 err = notifier_to_errno(err);
7698 if (err)
7699 return err;
7700
7701 orig_mtu = dev->mtu;
7702 err = __dev_set_mtu(dev, new_mtu);
7703
7704 if (!err) {
7705 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
7706 orig_mtu);
7707 err = notifier_to_errno(err);
7708 if (err) {
7709
7710
7711
7712 __dev_set_mtu(dev, orig_mtu);
7713 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
7714 new_mtu);
7715 }
7716 }
7717 return err;
7718}
7719
7720int dev_set_mtu(struct net_device *dev, int new_mtu)
7721{
7722 struct netlink_ext_ack extack;
7723 int err;
7724
7725 memset(&extack, 0, sizeof(extack));
7726 err = dev_set_mtu_ext(dev, new_mtu, &extack);
7727 if (err && extack._msg)
7728 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
7729 return err;
7730}
7731EXPORT_SYMBOL(dev_set_mtu);
7732
7733
7734
7735
7736
7737
7738int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
7739{
7740 unsigned int orig_len = dev->tx_queue_len;
7741 int res;
7742
7743 if (new_len != (unsigned int)new_len)
7744 return -ERANGE;
7745
7746 if (new_len != orig_len) {
7747 dev->tx_queue_len = new_len;
7748 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
7749 res = notifier_to_errno(res);
7750 if (res)
7751 goto err_rollback;
7752 res = dev_qdisc_change_tx_queue_len(dev);
7753 if (res)
7754 goto err_rollback;
7755 }
7756
7757 return 0;
7758
7759err_rollback:
7760 netdev_err(dev, "refused to change device tx_queue_len\n");
7761 dev->tx_queue_len = orig_len;
7762 return res;
7763}
7764
7765
7766
7767
7768
7769
7770void dev_set_group(struct net_device *dev, int new_group)
7771{
7772 dev->group = new_group;
7773}
7774EXPORT_SYMBOL(dev_set_group);
7775
7776
7777
7778
7779
7780
7781
7782int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
7783 struct netlink_ext_ack *extack)
7784{
7785 struct netdev_notifier_pre_changeaddr_info info = {
7786 .info.dev = dev,
7787 .info.extack = extack,
7788 .dev_addr = addr,
7789 };
7790 int rc;
7791
7792 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
7793 return notifier_to_errno(rc);
7794}
7795EXPORT_SYMBOL(dev_pre_changeaddr_notify);
7796
7797
7798
7799
7800
7801
7802
7803
7804
7805int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
7806 struct netlink_ext_ack *extack)
7807{
7808 const struct net_device_ops *ops = dev->netdev_ops;
7809 int err;
7810
7811 if (!ops->ndo_set_mac_address)
7812 return -EOPNOTSUPP;
7813 if (sa->sa_family != dev->type)
7814 return -EINVAL;
7815 if (!netif_device_present(dev))
7816 return -ENODEV;
7817 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
7818 if (err)
7819 return err;
7820 err = ops->ndo_set_mac_address(dev, sa);
7821 if (err)
7822 return err;
7823 dev->addr_assign_type = NET_ADDR_SET;
7824 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7825 add_device_randomness(dev->dev_addr, dev->addr_len);
7826 return 0;
7827}
7828EXPORT_SYMBOL(dev_set_mac_address);
7829
7830
7831
7832
7833
7834
7835
7836
7837int dev_change_carrier(struct net_device *dev, bool new_carrier)
7838{
7839 const struct net_device_ops *ops = dev->netdev_ops;
7840
7841 if (!ops->ndo_change_carrier)
7842 return -EOPNOTSUPP;
7843 if (!netif_device_present(dev))
7844 return -ENODEV;
7845 return ops->ndo_change_carrier(dev, new_carrier);
7846}
7847EXPORT_SYMBOL(dev_change_carrier);
7848
7849
7850
7851
7852
7853
7854
7855
7856int dev_get_phys_port_id(struct net_device *dev,
7857 struct netdev_phys_item_id *ppid)
7858{
7859 const struct net_device_ops *ops = dev->netdev_ops;
7860
7861 if (!ops->ndo_get_phys_port_id)
7862 return -EOPNOTSUPP;
7863 return ops->ndo_get_phys_port_id(dev, ppid);
7864}
7865EXPORT_SYMBOL(dev_get_phys_port_id);
7866
7867
7868
7869
7870
7871
7872
7873
7874
7875int dev_get_phys_port_name(struct net_device *dev,
7876 char *name, size_t len)
7877{
7878 const struct net_device_ops *ops = dev->netdev_ops;
7879 int err;
7880
7881 if (ops->ndo_get_phys_port_name) {
7882 err = ops->ndo_get_phys_port_name(dev, name, len);
7883 if (err != -EOPNOTSUPP)
7884 return err;
7885 }
7886 return devlink_compat_phys_port_name_get(dev, name, len);
7887}
7888EXPORT_SYMBOL(dev_get_phys_port_name);
7889
7890
7891
7892
7893
7894
7895
7896
7897
7898int dev_get_port_parent_id(struct net_device *dev,
7899 struct netdev_phys_item_id *ppid,
7900 bool recurse)
7901{
7902 const struct net_device_ops *ops = dev->netdev_ops;
7903 struct netdev_phys_item_id first = { };
7904 struct net_device *lower_dev;
7905 struct list_head *iter;
7906 int err;
7907
7908 if (ops->ndo_get_port_parent_id) {
7909 err = ops->ndo_get_port_parent_id(dev, ppid);
7910 if (err != -EOPNOTSUPP)
7911 return err;
7912 }
7913
7914 err = devlink_compat_switch_id_get(dev, ppid);
7915 if (!err || err != -EOPNOTSUPP)
7916 return err;
7917
7918 if (!recurse)
7919 return -EOPNOTSUPP;
7920
7921 netdev_for_each_lower_dev(dev, lower_dev, iter) {
7922 err = dev_get_port_parent_id(lower_dev, ppid, recurse);
7923 if (err)
7924 break;
7925 if (!first.id_len)
7926 first = *ppid;
7927 else if (memcmp(&first, ppid, sizeof(*ppid)))
7928 return -ENODATA;
7929 }
7930
7931 return err;
7932}
7933EXPORT_SYMBOL(dev_get_port_parent_id);
7934
7935
7936
7937
7938
7939
7940
7941bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
7942{
7943 struct netdev_phys_item_id a_id = { };
7944 struct netdev_phys_item_id b_id = { };
7945
7946 if (dev_get_port_parent_id(a, &a_id, true) ||
7947 dev_get_port_parent_id(b, &b_id, true))
7948 return false;
7949
7950 return netdev_phys_item_id_same(&a_id, &b_id);
7951}
7952EXPORT_SYMBOL(netdev_port_same_parent_id);
7953
7954
7955
7956
7957
7958
7959
7960
7961
7962int dev_change_proto_down(struct net_device *dev, bool proto_down)
7963{
7964 const struct net_device_ops *ops = dev->netdev_ops;
7965
7966 if (!ops->ndo_change_proto_down)
7967 return -EOPNOTSUPP;
7968 if (!netif_device_present(dev))
7969 return -ENODEV;
7970 return ops->ndo_change_proto_down(dev, proto_down);
7971}
7972EXPORT_SYMBOL(dev_change_proto_down);
7973
7974
7975
7976
7977
7978
7979
7980
7981
7982int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
7983{
7984 if (proto_down)
7985 netif_carrier_off(dev);
7986 else
7987 netif_carrier_on(dev);
7988 dev->proto_down = proto_down;
7989 return 0;
7990}
7991EXPORT_SYMBOL(dev_change_proto_down_generic);
7992
7993u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
7994 enum bpf_netdev_command cmd)
7995{
7996 struct netdev_bpf xdp;
7997
7998 if (!bpf_op)
7999 return 0;
8000
8001 memset(&xdp, 0, sizeof(xdp));
8002 xdp.command = cmd;
8003
8004
8005 WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
8006
8007 return xdp.prog_id;
8008}
8009
8010static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
8011 struct netlink_ext_ack *extack, u32 flags,
8012 struct bpf_prog *prog)
8013{
8014 struct netdev_bpf xdp;
8015
8016 memset(&xdp, 0, sizeof(xdp));
8017 if (flags & XDP_FLAGS_HW_MODE)
8018 xdp.command = XDP_SETUP_PROG_HW;
8019 else
8020 xdp.command = XDP_SETUP_PROG;
8021 xdp.extack = extack;
8022 xdp.flags = flags;
8023 xdp.prog = prog;
8024
8025 return bpf_op(dev, &xdp);
8026}
8027
8028static void dev_xdp_uninstall(struct net_device *dev)
8029{
8030 struct netdev_bpf xdp;
8031 bpf_op_t ndo_bpf;
8032
8033
8034 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
8035
8036
8037 ndo_bpf = dev->netdev_ops->ndo_bpf;
8038 if (!ndo_bpf)
8039 return;
8040
8041 memset(&xdp, 0, sizeof(xdp));
8042 xdp.command = XDP_QUERY_PROG;
8043 WARN_ON(ndo_bpf(dev, &xdp));
8044 if (xdp.prog_id)
8045 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8046 NULL));
8047
8048
8049 memset(&xdp, 0, sizeof(xdp));
8050 xdp.command = XDP_QUERY_PROG_HW;
8051 if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
8052 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8053 NULL));
8054}
8055
8056
8057
8058
8059
8060
8061
8062
8063
8064
8065int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
8066 int fd, u32 flags)
8067{
8068 const struct net_device_ops *ops = dev->netdev_ops;
8069 enum bpf_netdev_command query;
8070 struct bpf_prog *prog = NULL;
8071 bpf_op_t bpf_op, bpf_chk;
8072 bool offload;
8073 int err;
8074
8075 ASSERT_RTNL();
8076
8077 offload = flags & XDP_FLAGS_HW_MODE;
8078 query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
8079
8080 bpf_op = bpf_chk = ops->ndo_bpf;
8081 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) {
8082 NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode");
8083 return -EOPNOTSUPP;
8084 }
8085 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
8086 bpf_op = generic_xdp_install;
8087 if (bpf_op == bpf_chk)
8088 bpf_chk = generic_xdp_install;
8089
8090 if (fd >= 0) {
8091 if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) {
8092 NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time");
8093 return -EEXIST;
8094 }
8095 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
8096 __dev_xdp_query(dev, bpf_op, query)) {
8097 NL_SET_ERR_MSG(extack, "XDP program already attached");
8098 return -EBUSY;
8099 }
8100
8101 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
8102 bpf_op == ops->ndo_bpf);
8103 if (IS_ERR(prog))
8104 return PTR_ERR(prog);
8105
8106 if (!offload && bpf_prog_is_dev_bound(prog->aux)) {
8107 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
8108 bpf_prog_put(prog);
8109 return -EINVAL;
8110 }
8111 }
8112
8113 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
8114 if (err < 0 && prog)
8115 bpf_prog_put(prog);
8116
8117 return err;
8118}
8119
8120
8121
8122
8123
8124
8125
8126
8127
8128static int dev_new_index(struct net *net)
8129{
8130 int ifindex = net->ifindex;
8131
8132 for (;;) {
8133 if (++ifindex <= 0)
8134 ifindex = 1;
8135 if (!__dev_get_by_index(net, ifindex))
8136 return net->ifindex = ifindex;
8137 }
8138}
8139
8140
8141static LIST_HEAD(net_todo_list);
8142DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
8143
8144static void net_set_todo(struct net_device *dev)
8145{
8146 list_add_tail(&dev->todo_list, &net_todo_list);
8147 dev_net(dev)->dev_unreg_count++;
8148}
8149
8150static void rollback_registered_many(struct list_head *head)
8151{
8152 struct net_device *dev, *tmp;
8153 LIST_HEAD(close_head);
8154
8155 BUG_ON(dev_boot_phase);
8156 ASSERT_RTNL();
8157
8158 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
8159
8160
8161
8162
8163 if (dev->reg_state == NETREG_UNINITIALIZED) {
8164 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
8165 dev->name, dev);
8166
8167 WARN_ON(1);
8168 list_del(&dev->unreg_list);
8169 continue;
8170 }
8171 dev->dismantle = true;
8172 BUG_ON(dev->reg_state != NETREG_REGISTERED);
8173 }
8174
8175
8176 list_for_each_entry(dev, head, unreg_list)
8177 list_add_tail(&dev->close_list, &close_head);
8178 dev_close_many(&close_head, true);
8179
8180 list_for_each_entry(dev, head, unreg_list) {
8181
8182 unlist_netdevice(dev);
8183
8184 dev->reg_state = NETREG_UNREGISTERING;
8185 }
8186 flush_all_backlogs();
8187
8188 synchronize_net();
8189
8190 list_for_each_entry(dev, head, unreg_list) {
8191 struct sk_buff *skb = NULL;
8192
8193
8194 dev_shutdown(dev);
8195
8196 dev_xdp_uninstall(dev);
8197
8198
8199
8200
8201 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8202
8203 if (!dev->rtnl_link_ops ||
8204 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
8205 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
8206 GFP_KERNEL, NULL, 0);
8207
8208
8209
8210
8211 dev_uc_flush(dev);
8212 dev_mc_flush(dev);
8213
8214 if (dev->netdev_ops->ndo_uninit)
8215 dev->netdev_ops->ndo_uninit(dev);
8216
8217 if (skb)
8218 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
8219
8220
8221 WARN_ON(netdev_has_any_upper_dev(dev));
8222 WARN_ON(netdev_has_any_lower_dev(dev));
8223
8224
8225 netdev_unregister_kobject(dev);
8226#ifdef CONFIG_XPS
8227
8228 netif_reset_xps_queues_gt(dev, 0);
8229#endif
8230 }
8231
8232 synchronize_net();
8233
8234 list_for_each_entry(dev, head, unreg_list)
8235 dev_put(dev);
8236}
8237
8238static void rollback_registered(struct net_device *dev)
8239{
8240 LIST_HEAD(single);
8241
8242 list_add(&dev->unreg_list, &single);
8243 rollback_registered_many(&single);
8244 list_del(&single);
8245}
8246
8247static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
8248 struct net_device *upper, netdev_features_t features)
8249{
8250 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
8251 netdev_features_t feature;
8252 int feature_bit;
8253
8254 for_each_netdev_feature(upper_disables, feature_bit) {
8255 feature = __NETIF_F_BIT(feature_bit);
8256 if (!(upper->wanted_features & feature)
8257 && (features & feature)) {
8258 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
8259 &feature, upper->name);
8260 features &= ~feature;
8261 }
8262 }
8263
8264 return features;
8265}
8266
8267static void netdev_sync_lower_features(struct net_device *upper,
8268 struct net_device *lower, netdev_features_t features)
8269{
8270 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
8271 netdev_features_t feature;
8272 int feature_bit;
8273
8274 for_each_netdev_feature(upper_disables, feature_bit) {
8275 feature = __NETIF_F_BIT(feature_bit);
8276 if (!(features & feature) && (lower->features & feature)) {
8277 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
8278 &feature, lower->name);
8279 lower->wanted_features &= ~feature;
8280 netdev_update_features(lower);
8281
8282 if (unlikely(lower->features & feature))
8283 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
8284 &feature, lower->name);
8285 }
8286 }
8287}
8288
8289static netdev_features_t netdev_fix_features(struct net_device *dev,
8290 netdev_features_t features)
8291{
8292
8293 if ((features & NETIF_F_HW_CSUM) &&
8294 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
8295 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
8296 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
8297 }
8298
8299
8300 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
8301 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
8302 features &= ~NETIF_F_ALL_TSO;
8303 }
8304
8305 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
8306 !(features & NETIF_F_IP_CSUM)) {
8307 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
8308 features &= ~NETIF_F_TSO;
8309 features &= ~NETIF_F_TSO_ECN;
8310 }
8311
8312 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
8313 !(features & NETIF_F_IPV6_CSUM)) {
8314 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
8315 features &= ~NETIF_F_TSO6;
8316 }
8317
8318
8319 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
8320 features &= ~NETIF_F_TSO_MANGLEID;
8321
8322
8323 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
8324 features &= ~NETIF_F_TSO_ECN;
8325
8326
8327 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
8328 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
8329 features &= ~NETIF_F_GSO;
8330 }
8331
8332
8333 if ((features & dev->gso_partial_features) &&
8334 !(features & NETIF_F_GSO_PARTIAL)) {
8335 netdev_dbg(dev,
8336 "Dropping partially supported GSO features since no GSO partial.\n");
8337 features &= ~dev->gso_partial_features;
8338 }
8339
8340 if (!(features & NETIF_F_RXCSUM)) {
8341
8342
8343
8344
8345
8346 if (features & NETIF_F_GRO_HW) {
8347 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
8348 features &= ~NETIF_F_GRO_HW;
8349 }
8350 }
8351
8352
8353 if (features & NETIF_F_RXFCS) {
8354 if (features & NETIF_F_LRO) {
8355 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
8356 features &= ~NETIF_F_LRO;
8357 }
8358
8359 if (features & NETIF_F_GRO_HW) {
8360 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
8361 features &= ~NETIF_F_GRO_HW;
8362 }
8363 }
8364
8365 return features;
8366}
8367
8368int __netdev_update_features(struct net_device *dev)
8369{
8370 struct net_device *upper, *lower;
8371 netdev_features_t features;
8372 struct list_head *iter;
8373 int err = -1;
8374
8375 ASSERT_RTNL();
8376
8377 features = netdev_get_wanted_features(dev);
8378
8379 if (dev->netdev_ops->ndo_fix_features)
8380 features = dev->netdev_ops->ndo_fix_features(dev, features);
8381
8382
8383 features = netdev_fix_features(dev, features);
8384
8385
8386 netdev_for_each_upper_dev_rcu(dev, upper, iter)
8387 features = netdev_sync_upper_features(dev, upper, features);
8388
8389 if (dev->features == features)
8390 goto sync_lower;
8391
8392 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
8393 &dev->features, &features);
8394
8395 if (dev->netdev_ops->ndo_set_features)
8396 err = dev->netdev_ops->ndo_set_features(dev, features);
8397 else
8398 err = 0;
8399
8400 if (unlikely(err < 0)) {
8401 netdev_err(dev,
8402 "set_features() failed (%d); wanted %pNF, left %pNF\n",
8403 err, &features, &dev->features);
8404
8405
8406
8407 return -1;
8408 }
8409
8410sync_lower:
8411
8412
8413
8414 netdev_for_each_lower_dev(dev, lower, iter)
8415 netdev_sync_lower_features(dev, lower, features);
8416
8417 if (!err) {
8418 netdev_features_t diff = features ^ dev->features;
8419
8420 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
8421
8422
8423
8424
8425
8426
8427
8428 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
8429 dev->features = features;
8430 udp_tunnel_get_rx_info(dev);
8431 } else {
8432 udp_tunnel_drop_rx_info(dev);
8433 }
8434 }
8435
8436 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
8437 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
8438 dev->features = features;
8439 err |= vlan_get_rx_ctag_filter_info(dev);
8440 } else {
8441 vlan_drop_rx_ctag_filter_info(dev);
8442 }
8443 }
8444
8445 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
8446 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
8447 dev->features = features;
8448 err |= vlan_get_rx_stag_filter_info(dev);
8449 } else {
8450 vlan_drop_rx_stag_filter_info(dev);
8451 }
8452 }
8453
8454 dev->features = features;
8455 }
8456
8457 return err < 0 ? 0 : 1;
8458}
8459
8460
8461
8462
8463
8464
8465
8466
8467
8468void netdev_update_features(struct net_device *dev)
8469{
8470 if (__netdev_update_features(dev))
8471 netdev_features_change(dev);
8472}
8473EXPORT_SYMBOL(netdev_update_features);
8474
8475
8476
8477
8478
8479
8480
8481
8482
8483
8484
8485void netdev_change_features(struct net_device *dev)
8486{
8487 __netdev_update_features(dev);
8488 netdev_features_change(dev);
8489}
8490EXPORT_SYMBOL(netdev_change_features);
8491
8492
8493
8494
8495
8496
8497
8498
8499
8500
8501void netif_stacked_transfer_operstate(const struct net_device *rootdev,
8502 struct net_device *dev)
8503{
8504 if (rootdev->operstate == IF_OPER_DORMANT)
8505 netif_dormant_on(dev);
8506 else
8507 netif_dormant_off(dev);
8508
8509 if (netif_carrier_ok(rootdev))
8510 netif_carrier_on(dev);
8511 else
8512 netif_carrier_off(dev);
8513}
8514EXPORT_SYMBOL(netif_stacked_transfer_operstate);
8515
8516static int netif_alloc_rx_queues(struct net_device *dev)
8517{
8518 unsigned int i, count = dev->num_rx_queues;
8519 struct netdev_rx_queue *rx;
8520 size_t sz = count * sizeof(*rx);
8521 int err = 0;
8522
8523 BUG_ON(count < 1);
8524
8525 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8526 if (!rx)
8527 return -ENOMEM;
8528
8529 dev->_rx = rx;
8530
8531 for (i = 0; i < count; i++) {
8532 rx[i].dev = dev;
8533
8534
8535 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
8536 if (err < 0)
8537 goto err_rxq_info;
8538 }
8539 return 0;
8540
8541err_rxq_info:
8542
8543 while (i--)
8544 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
8545 kvfree(dev->_rx);
8546 dev->_rx = NULL;
8547 return err;
8548}
8549
8550static void netif_free_rx_queues(struct net_device *dev)
8551{
8552 unsigned int i, count = dev->num_rx_queues;
8553
8554
8555 if (!dev->_rx)
8556 return;
8557
8558 for (i = 0; i < count; i++)
8559 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
8560
8561 kvfree(dev->_rx);
8562}
8563
8564static void netdev_init_one_queue(struct net_device *dev,
8565 struct netdev_queue *queue, void *_unused)
8566{
8567
8568 spin_lock_init(&queue->_xmit_lock);
8569 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
8570 queue->xmit_lock_owner = -1;
8571 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
8572 queue->dev = dev;
8573#ifdef CONFIG_BQL
8574 dql_init(&queue->dql, HZ);
8575#endif
8576}
8577
8578static void netif_free_tx_queues(struct net_device *dev)
8579{
8580 kvfree(dev->_tx);
8581}
8582
8583static int netif_alloc_netdev_queues(struct net_device *dev)
8584{
8585 unsigned int count = dev->num_tx_queues;
8586 struct netdev_queue *tx;
8587 size_t sz = count * sizeof(*tx);
8588
8589 if (count < 1 || count > 0xffff)
8590 return -EINVAL;
8591
8592 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8593 if (!tx)
8594 return -ENOMEM;
8595
8596 dev->_tx = tx;
8597
8598 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
8599 spin_lock_init(&dev->tx_global_lock);
8600
8601 return 0;
8602}
8603
8604void netif_tx_stop_all_queues(struct net_device *dev)
8605{
8606 unsigned int i;
8607
8608 for (i = 0; i < dev->num_tx_queues; i++) {
8609 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
8610
8611 netif_tx_stop_queue(txq);
8612 }
8613}
8614EXPORT_SYMBOL(netif_tx_stop_all_queues);
8615
8616
8617
8618
8619
8620
8621
8622
8623
8624
8625
8626
8627
8628
8629
8630
8631
8632
8633int register_netdevice(struct net_device *dev)
8634{
8635 int ret;
8636 struct net *net = dev_net(dev);
8637
8638 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
8639 NETDEV_FEATURE_COUNT);
8640 BUG_ON(dev_boot_phase);
8641 ASSERT_RTNL();
8642
8643 might_sleep();
8644
8645
8646 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
8647 BUG_ON(!net);
8648
8649 spin_lock_init(&dev->addr_list_lock);
8650 netdev_set_addr_lockdep_class(dev);
8651
8652 ret = dev_get_valid_name(net, dev, dev->name);
8653 if (ret < 0)
8654 goto out;
8655
8656
8657 if (dev->netdev_ops->ndo_init) {
8658 ret = dev->netdev_ops->ndo_init(dev);
8659 if (ret) {
8660 if (ret > 0)
8661 ret = -EIO;
8662 goto out;
8663 }
8664 }
8665
8666 if (((dev->hw_features | dev->features) &
8667 NETIF_F_HW_VLAN_CTAG_FILTER) &&
8668 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
8669 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
8670 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
8671 ret = -EINVAL;
8672 goto err_uninit;
8673 }
8674
8675 ret = -EBUSY;
8676 if (!dev->ifindex)
8677 dev->ifindex = dev_new_index(net);
8678 else if (__dev_get_by_index(net, dev->ifindex))
8679 goto err_uninit;
8680
8681
8682
8683
8684 dev->hw_features |= NETIF_F_SOFT_FEATURES;
8685 dev->features |= NETIF_F_SOFT_FEATURES;
8686
8687 if (dev->netdev_ops->ndo_udp_tunnel_add) {
8688 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8689 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8690 }
8691
8692 dev->wanted_features = dev->features & dev->hw_features;
8693
8694 if (!(dev->flags & IFF_LOOPBACK))
8695 dev->hw_features |= NETIF_F_NOCACHE_COPY;
8696
8697
8698
8699
8700
8701
8702 if (dev->hw_features & NETIF_F_TSO)
8703 dev->hw_features |= NETIF_F_TSO_MANGLEID;
8704 if (dev->vlan_features & NETIF_F_TSO)
8705 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
8706 if (dev->mpls_features & NETIF_F_TSO)
8707 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
8708 if (dev->hw_enc_features & NETIF_F_TSO)
8709 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
8710
8711
8712
8713 dev->vlan_features |= NETIF_F_HIGHDMA;
8714
8715
8716
8717 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
8718
8719
8720
8721 dev->mpls_features |= NETIF_F_SG;
8722
8723 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
8724 ret = notifier_to_errno(ret);
8725 if (ret)
8726 goto err_uninit;
8727
8728 ret = netdev_register_kobject(dev);
8729 if (ret)
8730 goto err_uninit;
8731 dev->reg_state = NETREG_REGISTERED;
8732
8733 __netdev_update_features(dev);
8734
8735
8736
8737
8738
8739
8740 set_bit(__LINK_STATE_PRESENT, &dev->state);
8741
8742 linkwatch_init_dev(dev);
8743
8744 dev_init_scheduler(dev);
8745 dev_hold(dev);
8746 list_netdevice(dev);
8747 add_device_randomness(dev->dev_addr, dev->addr_len);
8748
8749
8750
8751
8752
8753 if (dev->addr_assign_type == NET_ADDR_PERM)
8754 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
8755
8756
8757 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
8758 ret = notifier_to_errno(ret);
8759 if (ret) {
8760 rollback_registered(dev);
8761 rcu_barrier();
8762
8763 dev->reg_state = NETREG_UNREGISTERED;
8764 }
8765
8766
8767
8768
8769 if (!dev->rtnl_link_ops ||
8770 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
8771 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
8772
8773out:
8774 return ret;
8775
8776err_uninit:
8777 if (dev->netdev_ops->ndo_uninit)
8778 dev->netdev_ops->ndo_uninit(dev);
8779 if (dev->priv_destructor)
8780 dev->priv_destructor(dev);
8781 goto out;
8782}
8783EXPORT_SYMBOL(register_netdevice);
8784
8785
8786
8787
8788
8789
8790
8791
8792
8793
8794
8795int init_dummy_netdev(struct net_device *dev)
8796{
8797
8798
8799
8800
8801
8802 memset(dev, 0, sizeof(struct net_device));
8803
8804
8805
8806
8807 dev->reg_state = NETREG_DUMMY;
8808
8809
8810 INIT_LIST_HEAD(&dev->napi_list);
8811
8812
8813 set_bit(__LINK_STATE_PRESENT, &dev->state);
8814 set_bit(__LINK_STATE_START, &dev->state);
8815
8816
8817 dev_net_set(dev, &init_net);
8818
8819
8820
8821
8822
8823
8824 return 0;
8825}
8826EXPORT_SYMBOL_GPL(init_dummy_netdev);
8827
8828
8829
8830
8831
8832
8833
8834
8835
8836
8837
8838
8839
8840
8841
8842int register_netdev(struct net_device *dev)
8843{
8844 int err;
8845
8846 if (rtnl_lock_killable())
8847 return -EINTR;
8848 err = register_netdevice(dev);
8849 rtnl_unlock();
8850 return err;
8851}
8852EXPORT_SYMBOL(register_netdev);
8853
8854int netdev_refcnt_read(const struct net_device *dev)
8855{
8856 int i, refcnt = 0;
8857
8858 for_each_possible_cpu(i)
8859 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
8860 return refcnt;
8861}
8862EXPORT_SYMBOL(netdev_refcnt_read);
8863
8864
8865
8866
8867
8868
8869
8870
8871
8872
8873
8874
8875
8876static void netdev_wait_allrefs(struct net_device *dev)
8877{
8878 unsigned long rebroadcast_time, warning_time;
8879 int refcnt;
8880
8881 linkwatch_forget_dev(dev);
8882
8883 rebroadcast_time = warning_time = jiffies;
8884 refcnt = netdev_refcnt_read(dev);
8885
8886 while (refcnt != 0) {
8887 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
8888 rtnl_lock();
8889
8890
8891 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8892
8893 __rtnl_unlock();
8894 rcu_barrier();
8895 rtnl_lock();
8896
8897 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
8898 &dev->state)) {
8899
8900
8901
8902
8903
8904
8905 linkwatch_run_queue();
8906 }
8907
8908 __rtnl_unlock();
8909
8910 rebroadcast_time = jiffies;
8911 }
8912
8913 msleep(250);
8914
8915 refcnt = netdev_refcnt_read(dev);
8916
8917 if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
8918 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
8919 dev->name, refcnt);
8920 warning_time = jiffies;
8921 }
8922 }
8923}
8924
8925
8926
8927
8928
8929
8930
8931
8932
8933
8934
8935
8936
8937
8938
8939
8940
8941
8942
8943
8944
8945
8946
8947
8948
8949void netdev_run_todo(void)
8950{
8951 struct list_head list;
8952
8953
8954 list_replace_init(&net_todo_list, &list);
8955
8956 __rtnl_unlock();
8957
8958
8959
8960 if (!list_empty(&list))
8961 rcu_barrier();
8962
8963 while (!list_empty(&list)) {
8964 struct net_device *dev
8965 = list_first_entry(&list, struct net_device, todo_list);
8966 list_del(&dev->todo_list);
8967
8968 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
8969 pr_err("network todo '%s' but state %d\n",
8970 dev->name, dev->reg_state);
8971 dump_stack();
8972 continue;
8973 }
8974
8975 dev->reg_state = NETREG_UNREGISTERED;
8976
8977 netdev_wait_allrefs(dev);
8978
8979
8980 BUG_ON(netdev_refcnt_read(dev));
8981 BUG_ON(!list_empty(&dev->ptype_all));
8982 BUG_ON(!list_empty(&dev->ptype_specific));
8983 WARN_ON(rcu_access_pointer(dev->ip_ptr));
8984 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
8985#if IS_ENABLED(CONFIG_DECNET)
8986 WARN_ON(dev->dn_ptr);
8987#endif
8988 if (dev->priv_destructor)
8989 dev->priv_destructor(dev);
8990 if (dev->needs_free_netdev)
8991 free_netdev(dev);
8992
8993
8994 rtnl_lock();
8995 dev_net(dev)->dev_unreg_count--;
8996 __rtnl_unlock();
8997 wake_up(&netdev_unregistering_wq);
8998
8999
9000 kobject_put(&dev->dev.kobj);
9001 }
9002}
9003
9004
9005
9006
9007
9008
9009void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
9010 const struct net_device_stats *netdev_stats)
9011{
9012#if BITS_PER_LONG == 64
9013 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
9014 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
9015
9016 memset((char *)stats64 + sizeof(*netdev_stats), 0,
9017 sizeof(*stats64) - sizeof(*netdev_stats));
9018#else
9019 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
9020 const unsigned long *src = (const unsigned long *)netdev_stats;
9021 u64 *dst = (u64 *)stats64;
9022
9023 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
9024 for (i = 0; i < n; i++)
9025 dst[i] = src[i];
9026
9027 memset((char *)stats64 + n * sizeof(u64), 0,
9028 sizeof(*stats64) - n * sizeof(u64));
9029#endif
9030}
9031EXPORT_SYMBOL(netdev_stats_to_stats64);
9032
9033
9034
9035
9036
9037
9038
9039
9040
9041
9042
9043struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
9044 struct rtnl_link_stats64 *storage)
9045{
9046 const struct net_device_ops *ops = dev->netdev_ops;
9047
9048 if (ops->ndo_get_stats64) {
9049 memset(storage, 0, sizeof(*storage));
9050 ops->ndo_get_stats64(dev, storage);
9051 } else if (ops->ndo_get_stats) {
9052 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
9053 } else {
9054 netdev_stats_to_stats64(storage, &dev->stats);
9055 }
9056 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
9057 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
9058 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
9059 return storage;
9060}
9061EXPORT_SYMBOL(dev_get_stats);
9062
9063struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
9064{
9065 struct netdev_queue *queue = dev_ingress_queue(dev);
9066
9067#ifdef CONFIG_NET_CLS_ACT
9068 if (queue)
9069 return queue;
9070 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
9071 if (!queue)
9072 return NULL;
9073 netdev_init_one_queue(dev, queue, NULL);
9074 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
9075 queue->qdisc_sleeping = &noop_qdisc;
9076 rcu_assign_pointer(dev->ingress_queue, queue);
9077#endif
9078 return queue;
9079}
9080
9081static const struct ethtool_ops default_ethtool_ops;
9082
9083void netdev_set_default_ethtool_ops(struct net_device *dev,
9084 const struct ethtool_ops *ops)
9085{
9086 if (dev->ethtool_ops == &default_ethtool_ops)
9087 dev->ethtool_ops = ops;
9088}
9089EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
9090
9091void netdev_freemem(struct net_device *dev)
9092{
9093 char *addr = (char *)dev - dev->padded;
9094
9095 kvfree(addr);
9096}
9097
9098
9099
9100
9101
9102
9103
9104
9105
9106
9107
9108
9109
9110
9111struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
9112 unsigned char name_assign_type,
9113 void (*setup)(struct net_device *),
9114 unsigned int txqs, unsigned int rxqs)
9115{
9116 struct net_device *dev;
9117 unsigned int alloc_size;
9118 struct net_device *p;
9119
9120 BUG_ON(strlen(name) >= sizeof(dev->name));
9121
9122 if (txqs < 1) {
9123 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
9124 return NULL;
9125 }
9126
9127 if (rxqs < 1) {
9128 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
9129 return NULL;
9130 }
9131
9132 alloc_size = sizeof(struct net_device);
9133 if (sizeof_priv) {
9134
9135 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
9136 alloc_size += sizeof_priv;
9137 }
9138
9139 alloc_size += NETDEV_ALIGN - 1;
9140
9141 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
9142 if (!p)
9143 return NULL;
9144
9145 dev = PTR_ALIGN(p, NETDEV_ALIGN);
9146 dev->padded = (char *)dev - (char *)p;
9147
9148 dev->pcpu_refcnt = alloc_percpu(int);
9149 if (!dev->pcpu_refcnt)
9150 goto free_dev;
9151
9152 if (dev_addr_init(dev))
9153 goto free_pcpu;
9154
9155 dev_mc_init(dev);
9156 dev_uc_init(dev);
9157
9158 dev_net_set(dev, &init_net);
9159
9160 dev->gso_max_size = GSO_MAX_SIZE;
9161 dev->gso_max_segs = GSO_MAX_SEGS;
9162
9163 INIT_LIST_HEAD(&dev->napi_list);
9164 INIT_LIST_HEAD(&dev->unreg_list);
9165 INIT_LIST_HEAD(&dev->close_list);
9166 INIT_LIST_HEAD(&dev->link_watch_list);
9167 INIT_LIST_HEAD(&dev->adj_list.upper);
9168 INIT_LIST_HEAD(&dev->adj_list.lower);
9169 INIT_LIST_HEAD(&dev->ptype_all);
9170 INIT_LIST_HEAD(&dev->ptype_specific);
9171#ifdef CONFIG_NET_SCHED
9172 hash_init(dev->qdisc_hash);
9173#endif
9174 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
9175 setup(dev);
9176
9177 if (!dev->tx_queue_len) {
9178 dev->priv_flags |= IFF_NO_QUEUE;
9179 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
9180 }
9181
9182 dev->num_tx_queues = txqs;
9183 dev->real_num_tx_queues = txqs;
9184 if (netif_alloc_netdev_queues(dev))
9185 goto free_all;
9186
9187 dev->num_rx_queues = rxqs;
9188 dev->real_num_rx_queues = rxqs;
9189 if (netif_alloc_rx_queues(dev))
9190 goto free_all;
9191
9192 strcpy(dev->name, name);
9193 dev->name_assign_type = name_assign_type;
9194 dev->group = INIT_NETDEV_GROUP;
9195 if (!dev->ethtool_ops)
9196 dev->ethtool_ops = &default_ethtool_ops;
9197
9198 nf_hook_ingress_init(dev);
9199
9200 return dev;
9201
9202free_all:
9203 free_netdev(dev);
9204 return NULL;
9205
9206free_pcpu:
9207 free_percpu(dev->pcpu_refcnt);
9208free_dev:
9209 netdev_freemem(dev);
9210 return NULL;
9211}
9212EXPORT_SYMBOL(alloc_netdev_mqs);
9213
9214
9215
9216
9217
9218
9219
9220
9221
9222
9223void free_netdev(struct net_device *dev)
9224{
9225 struct napi_struct *p, *n;
9226
9227 might_sleep();
9228 netif_free_tx_queues(dev);
9229 netif_free_rx_queues(dev);
9230
9231 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
9232
9233
9234 dev_addr_flush(dev);
9235
9236 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
9237 netif_napi_del(p);
9238
9239 free_percpu(dev->pcpu_refcnt);
9240 dev->pcpu_refcnt = NULL;
9241
9242
9243 if (dev->reg_state == NETREG_UNINITIALIZED) {
9244 netdev_freemem(dev);
9245 return;
9246 }
9247
9248 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
9249 dev->reg_state = NETREG_RELEASED;
9250
9251
9252 put_device(&dev->dev);
9253}
9254EXPORT_SYMBOL(free_netdev);
9255
9256
9257
9258
9259
9260
9261
9262void synchronize_net(void)
9263{
9264 might_sleep();
9265 if (rtnl_is_locked())
9266 synchronize_rcu_expedited();
9267 else
9268 synchronize_rcu();
9269}
9270EXPORT_SYMBOL(synchronize_net);
9271
9272
9273
9274
9275
9276
9277
9278
9279
9280
9281
9282
9283
9284
9285void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
9286{
9287 ASSERT_RTNL();
9288
9289 if (head) {
9290 list_move_tail(&dev->unreg_list, head);
9291 } else {
9292 rollback_registered(dev);
9293
9294 net_set_todo(dev);
9295 }
9296}
9297EXPORT_SYMBOL(unregister_netdevice_queue);
9298
9299
9300
9301
9302
9303
9304
9305
9306void unregister_netdevice_many(struct list_head *head)
9307{
9308 struct net_device *dev;
9309
9310 if (!list_empty(head)) {
9311 rollback_registered_many(head);
9312 list_for_each_entry(dev, head, unreg_list)
9313 net_set_todo(dev);
9314 list_del(head);
9315 }
9316}
9317EXPORT_SYMBOL(unregister_netdevice_many);
9318
9319
9320
9321
9322
9323
9324
9325
9326
9327
9328
9329
9330void unregister_netdev(struct net_device *dev)
9331{
9332 rtnl_lock();
9333 unregister_netdevice(dev);
9334 rtnl_unlock();
9335}
9336EXPORT_SYMBOL(unregister_netdev);
9337
9338
9339
9340
9341
9342
9343
9344
9345
9346
9347
9348
9349
9350
9351
9352int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
9353{
9354 int err, new_nsid, new_ifindex;
9355
9356 ASSERT_RTNL();
9357
9358
9359 err = -EINVAL;
9360 if (dev->features & NETIF_F_NETNS_LOCAL)
9361 goto out;
9362
9363
9364 if (dev->reg_state != NETREG_REGISTERED)
9365 goto out;
9366
9367
9368 err = 0;
9369 if (net_eq(dev_net(dev), net))
9370 goto out;
9371
9372
9373
9374
9375 err = -EEXIST;
9376 if (__dev_get_by_name(net, dev->name)) {
9377
9378 if (!pat)
9379 goto out;
9380 err = dev_get_valid_name(net, dev, pat);
9381 if (err < 0)
9382 goto out;
9383 }
9384
9385
9386
9387
9388
9389
9390 dev_close(dev);
9391
9392
9393 unlist_netdevice(dev);
9394
9395 synchronize_net();
9396
9397
9398 dev_shutdown(dev);
9399
9400
9401
9402
9403
9404
9405
9406
9407 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
9408 rcu_barrier();
9409
9410 new_nsid = peernet2id_alloc(dev_net(dev), net);
9411
9412 if (__dev_get_by_index(net, dev->ifindex))
9413 new_ifindex = dev_new_index(net);
9414 else
9415 new_ifindex = dev->ifindex;
9416
9417 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
9418 new_ifindex);
9419
9420
9421
9422
9423 dev_uc_flush(dev);
9424 dev_mc_flush(dev);
9425
9426
9427 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
9428 netdev_adjacent_del_links(dev);
9429
9430
9431 dev_net_set(dev, net);
9432 dev->ifindex = new_ifindex;
9433
9434
9435 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
9436 netdev_adjacent_add_links(dev);
9437
9438
9439 err = device_rename(&dev->dev, dev->name);
9440 WARN_ON(err);
9441
9442
9443 list_netdevice(dev);
9444
9445
9446 call_netdevice_notifiers(NETDEV_REGISTER, dev);
9447
9448
9449
9450
9451
9452 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
9453
9454 synchronize_net();
9455 err = 0;
9456out:
9457 return err;
9458}
9459EXPORT_SYMBOL_GPL(dev_change_net_namespace);
9460
9461static int dev_cpu_dead(unsigned int oldcpu)
9462{
9463 struct sk_buff **list_skb;
9464 struct sk_buff *skb;
9465 unsigned int cpu;
9466 struct softnet_data *sd, *oldsd, *remsd = NULL;
9467
9468 local_irq_disable();
9469 cpu = smp_processor_id();
9470 sd = &per_cpu(softnet_data, cpu);
9471 oldsd = &per_cpu(softnet_data, oldcpu);
9472
9473
9474 list_skb = &sd->completion_queue;
9475 while (*list_skb)
9476 list_skb = &(*list_skb)->next;
9477
9478 *list_skb = oldsd->completion_queue;
9479 oldsd->completion_queue = NULL;
9480
9481
9482 if (oldsd->output_queue) {
9483 *sd->output_queue_tailp = oldsd->output_queue;
9484 sd->output_queue_tailp = oldsd->output_queue_tailp;
9485 oldsd->output_queue = NULL;
9486 oldsd->output_queue_tailp = &oldsd->output_queue;
9487 }
9488
9489
9490
9491
9492 while (!list_empty(&oldsd->poll_list)) {
9493 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
9494 struct napi_struct,
9495 poll_list);
9496
9497 list_del_init(&napi->poll_list);
9498 if (napi->poll == process_backlog)
9499 napi->state = 0;
9500 else
9501 ____napi_schedule(sd, napi);
9502 }
9503
9504 raise_softirq_irqoff(NET_TX_SOFTIRQ);
9505 local_irq_enable();
9506
9507#ifdef CONFIG_RPS
9508 remsd = oldsd->rps_ipi_list;
9509 oldsd->rps_ipi_list = NULL;
9510#endif
9511
9512 net_rps_send_ipi(remsd);
9513
9514
9515 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
9516 netif_rx_ni(skb);
9517 input_queue_head_incr(oldsd);
9518 }
9519 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
9520 netif_rx_ni(skb);
9521 input_queue_head_incr(oldsd);
9522 }
9523
9524 return 0;
9525}
9526
9527
9528
9529
9530
9531
9532
9533
9534
9535
9536
9537netdev_features_t netdev_increment_features(netdev_features_t all,
9538 netdev_features_t one, netdev_features_t mask)
9539{
9540 if (mask & NETIF_F_HW_CSUM)
9541 mask |= NETIF_F_CSUM_MASK;
9542 mask |= NETIF_F_VLAN_CHALLENGED;
9543
9544 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
9545 all &= one | ~NETIF_F_ALL_FOR_ALL;
9546
9547
9548 if (all & NETIF_F_HW_CSUM)
9549 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
9550
9551 return all;
9552}
9553EXPORT_SYMBOL(netdev_increment_features);
9554
9555static struct hlist_head * __net_init netdev_create_hash(void)
9556{
9557 int i;
9558 struct hlist_head *hash;
9559
9560 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
9561 if (hash != NULL)
9562 for (i = 0; i < NETDEV_HASHENTRIES; i++)
9563 INIT_HLIST_HEAD(&hash[i]);
9564
9565 return hash;
9566}
9567
9568
9569static int __net_init netdev_init(struct net *net)
9570{
9571 BUILD_BUG_ON(GRO_HASH_BUCKETS >
9572 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
9573
9574 if (net != &init_net)
9575 INIT_LIST_HEAD(&net->dev_base_head);
9576
9577 net->dev_name_head = netdev_create_hash();
9578 if (net->dev_name_head == NULL)
9579 goto err_name;
9580
9581 net->dev_index_head = netdev_create_hash();
9582 if (net->dev_index_head == NULL)
9583 goto err_idx;
9584
9585 return 0;
9586
9587err_idx:
9588 kfree(net->dev_name_head);
9589err_name:
9590 return -ENOMEM;
9591}
9592
9593
9594
9595
9596
9597
9598
9599const char *netdev_drivername(const struct net_device *dev)
9600{
9601 const struct device_driver *driver;
9602 const struct device *parent;
9603 const char *empty = "";
9604
9605 parent = dev->dev.parent;
9606 if (!parent)
9607 return empty;
9608
9609 driver = parent->driver;
9610 if (driver && driver->name)
9611 return driver->name;
9612 return empty;
9613}
9614
9615static void __netdev_printk(const char *level, const struct net_device *dev,
9616 struct va_format *vaf)
9617{
9618 if (dev && dev->dev.parent) {
9619 dev_printk_emit(level[1] - '0',
9620 dev->dev.parent,
9621 "%s %s %s%s: %pV",
9622 dev_driver_string(dev->dev.parent),
9623 dev_name(dev->dev.parent),
9624 netdev_name(dev), netdev_reg_state(dev),
9625 vaf);
9626 } else if (dev) {
9627 printk("%s%s%s: %pV",
9628 level, netdev_name(dev), netdev_reg_state(dev), vaf);
9629 } else {
9630 printk("%s(NULL net_device): %pV", level, vaf);
9631 }
9632}
9633
9634void netdev_printk(const char *level, const struct net_device *dev,
9635 const char *format, ...)
9636{
9637 struct va_format vaf;
9638 va_list args;
9639
9640 va_start(args, format);
9641
9642 vaf.fmt = format;
9643 vaf.va = &args;
9644
9645 __netdev_printk(level, dev, &vaf);
9646
9647 va_end(args);
9648}
9649EXPORT_SYMBOL(netdev_printk);
9650
9651#define define_netdev_printk_level(func, level) \
9652void func(const struct net_device *dev, const char *fmt, ...) \
9653{ \
9654 struct va_format vaf; \
9655 va_list args; \
9656 \
9657 va_start(args, fmt); \
9658 \
9659 vaf.fmt = fmt; \
9660 vaf.va = &args; \
9661 \
9662 __netdev_printk(level, dev, &vaf); \
9663 \
9664 va_end(args); \
9665} \
9666EXPORT_SYMBOL(func);
9667
9668define_netdev_printk_level(netdev_emerg, KERN_EMERG);
9669define_netdev_printk_level(netdev_alert, KERN_ALERT);
9670define_netdev_printk_level(netdev_crit, KERN_CRIT);
9671define_netdev_printk_level(netdev_err, KERN_ERR);
9672define_netdev_printk_level(netdev_warn, KERN_WARNING);
9673define_netdev_printk_level(netdev_notice, KERN_NOTICE);
9674define_netdev_printk_level(netdev_info, KERN_INFO);
9675
9676static void __net_exit netdev_exit(struct net *net)
9677{
9678 kfree(net->dev_name_head);
9679 kfree(net->dev_index_head);
9680 if (net != &init_net)
9681 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
9682}
9683
9684static struct pernet_operations __net_initdata netdev_net_ops = {
9685 .init = netdev_init,
9686 .exit = netdev_exit,
9687};
9688
9689static void __net_exit default_device_exit(struct net *net)
9690{
9691 struct net_device *dev, *aux;
9692
9693
9694
9695
9696 rtnl_lock();
9697 for_each_netdev_safe(net, dev, aux) {
9698 int err;
9699 char fb_name[IFNAMSIZ];
9700
9701
9702 if (dev->features & NETIF_F_NETNS_LOCAL)
9703 continue;
9704
9705
9706 if (dev->rtnl_link_ops)
9707 continue;
9708
9709
9710 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
9711 if (__dev_get_by_name(&init_net, fb_name))
9712 snprintf(fb_name, IFNAMSIZ, "dev%%d");
9713 err = dev_change_net_namespace(dev, &init_net, fb_name);
9714 if (err) {
9715 pr_emerg("%s: failed to move %s to init_net: %d\n",
9716 __func__, dev->name, err);
9717 BUG();
9718 }
9719 }
9720 rtnl_unlock();
9721}
9722
9723static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
9724{
9725
9726
9727
9728 struct net *net;
9729 bool unregistering;
9730 DEFINE_WAIT_FUNC(wait, woken_wake_function);
9731
9732 add_wait_queue(&netdev_unregistering_wq, &wait);
9733 for (;;) {
9734 unregistering = false;
9735 rtnl_lock();
9736 list_for_each_entry(net, net_list, exit_list) {
9737 if (net->dev_unreg_count > 0) {
9738 unregistering = true;
9739 break;
9740 }
9741 }
9742 if (!unregistering)
9743 break;
9744 __rtnl_unlock();
9745
9746 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
9747 }
9748 remove_wait_queue(&netdev_unregistering_wq, &wait);
9749}
9750
9751static void __net_exit default_device_exit_batch(struct list_head *net_list)
9752{
9753
9754
9755
9756
9757
9758 struct net_device *dev;
9759 struct net *net;
9760 LIST_HEAD(dev_kill_list);
9761
9762
9763
9764
9765
9766
9767
9768
9769
9770
9771
9772
9773 rtnl_lock_unregistering(net_list);
9774 list_for_each_entry(net, net_list, exit_list) {
9775 for_each_netdev_reverse(net, dev) {
9776 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
9777 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
9778 else
9779 unregister_netdevice_queue(dev, &dev_kill_list);
9780 }
9781 }
9782 unregister_netdevice_many(&dev_kill_list);
9783 rtnl_unlock();
9784}
9785
9786static struct pernet_operations __net_initdata default_device_ops = {
9787 .exit = default_device_exit,
9788 .exit_batch = default_device_exit_batch,
9789};
9790
9791
9792
9793
9794
9795
9796
9797
9798
9799
9800
9801
9802static int __init net_dev_init(void)
9803{
9804 int i, rc = -ENOMEM;
9805
9806 BUG_ON(!dev_boot_phase);
9807
9808 if (dev_proc_init())
9809 goto out;
9810
9811 if (netdev_kobject_init())
9812 goto out;
9813
9814 INIT_LIST_HEAD(&ptype_all);
9815 for (i = 0; i < PTYPE_HASH_SIZE; i++)
9816 INIT_LIST_HEAD(&ptype_base[i]);
9817
9818 INIT_LIST_HEAD(&offload_base);
9819
9820 if (register_pernet_subsys(&netdev_net_ops))
9821 goto out;
9822
9823
9824
9825
9826
9827 for_each_possible_cpu(i) {
9828 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
9829 struct softnet_data *sd = &per_cpu(softnet_data, i);
9830
9831 INIT_WORK(flush, flush_backlog);
9832
9833 skb_queue_head_init(&sd->input_pkt_queue);
9834 skb_queue_head_init(&sd->process_queue);
9835#ifdef CONFIG_XFRM_OFFLOAD
9836 skb_queue_head_init(&sd->xfrm_backlog);
9837#endif
9838 INIT_LIST_HEAD(&sd->poll_list);
9839 sd->output_queue_tailp = &sd->output_queue;
9840#ifdef CONFIG_RPS
9841 sd->csd.func = rps_trigger_softirq;
9842 sd->csd.info = sd;
9843 sd->cpu = i;
9844#endif
9845
9846 init_gro_hash(&sd->backlog);
9847 sd->backlog.poll = process_backlog;
9848 sd->backlog.weight = weight_p;
9849 }
9850
9851 dev_boot_phase = 0;
9852
9853
9854
9855
9856
9857
9858
9859
9860
9861
9862 if (register_pernet_device(&loopback_net_ops))
9863 goto out;
9864
9865 if (register_pernet_device(&default_device_ops))
9866 goto out;
9867
9868 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
9869 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
9870
9871 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
9872 NULL, dev_cpu_dead);
9873 WARN_ON(rc < 0);
9874 rc = 0;
9875out:
9876 return rc;
9877}
9878
9879subsys_initcall(net_dev_init);
9880