1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#include <linux/uaccess.h>
76#include <linux/bitops.h>
77#include <linux/capability.h>
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
81#include <linux/hash.h>
82#include <linux/slab.h>
83#include <linux/sched.h>
84#include <linux/sched/mm.h>
85#include <linux/mutex.h>
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
95#include <linux/ethtool.h>
96#include <linux/skbuff.h>
97#include <linux/bpf.h>
98#include <linux/bpf_trace.h>
99#include <net/net_namespace.h>
100#include <net/sock.h>
101#include <net/busy_poll.h>
102#include <linux/rtnetlink.h>
103#include <linux/stat.h>
104#include <net/dst.h>
105#include <net/dst_metadata.h>
106#include <net/pkt_sched.h>
107#include <net/pkt_cls.h>
108#include <net/checksum.h>
109#include <net/xfrm.h>
110#include <linux/highmem.h>
111#include <linux/init.h>
112#include <linux/module.h>
113#include <linux/netpoll.h>
114#include <linux/rcupdate.h>
115#include <linux/delay.h>
116#include <net/iw_handler.h>
117#include <asm/current.h>
118#include <linux/audit.h>
119#include <linux/dmaengine.h>
120#include <linux/err.h>
121#include <linux/ctype.h>
122#include <linux/if_arp.h>
123#include <linux/if_vlan.h>
124#include <linux/ip.h>
125#include <net/ip.h>
126#include <net/mpls.h>
127#include <linux/ipv6.h>
128#include <linux/in.h>
129#include <linux/jhash.h>
130#include <linux/random.h>
131#include <trace/events/napi.h>
132#include <trace/events/net.h>
133#include <trace/events/skb.h>
134#include <linux/pci.h>
135#include <linux/inetdevice.h>
136#include <linux/cpu_rmap.h>
137#include <linux/static_key.h>
138#include <linux/hashtable.h>
139#include <linux/vmalloc.h>
140#include <linux/if_macvlan.h>
141#include <linux/errqueue.h>
142#include <linux/hrtimer.h>
143#include <linux/netfilter_ingress.h>
144#include <linux/crash_dump.h>
145#include <linux/sctp.h>
146#include <net/udp_tunnel.h>
147#include <linux/net_namespace.h>
148#include <linux/indirect_call_wrapper.h>
149
150#include "net-sysfs.h"
151
152#define MAX_GRO_SKBS 8
153
154
155#define GRO_MAX_HEAD (MAX_HEADER + 128)
156
157static DEFINE_SPINLOCK(ptype_lock);
158static DEFINE_SPINLOCK(offload_lock);
159struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160struct list_head ptype_all __read_mostly;
161static struct list_head offload_base __read_mostly;
162
163static int netif_rx_internal(struct sk_buff *skb);
164static int call_netdevice_notifiers_info(unsigned long val,
165 struct netdev_notifier_info *info);
166static int call_netdevice_notifiers_extack(unsigned long val,
167 struct net_device *dev,
168 struct netlink_ext_ack *extack);
169static struct napi_struct *napi_by_id(unsigned int napi_id);
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190DEFINE_RWLOCK(dev_base_lock);
191EXPORT_SYMBOL(dev_base_lock);
192
193static DEFINE_MUTEX(ifalias_mutex);
194
195
196static DEFINE_SPINLOCK(napi_hash_lock);
197
198static unsigned int napi_gen_id = NR_CPUS;
199static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
200
201static seqcount_t devnet_rename_seq;
202
203static inline void dev_base_seq_inc(struct net *net)
204{
205 while (++net->dev_base_seq == 0)
206 ;
207}
208
209static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
210{
211 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
212
213 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
214}
215
216static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
217{
218 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
219}
220
221static inline void rps_lock(struct softnet_data *sd)
222{
223#ifdef CONFIG_RPS
224 spin_lock(&sd->input_pkt_queue.lock);
225#endif
226}
227
228static inline void rps_unlock(struct softnet_data *sd)
229{
230#ifdef CONFIG_RPS
231 spin_unlock(&sd->input_pkt_queue.lock);
232#endif
233}
234
235
236static void list_netdevice(struct net_device *dev)
237{
238 struct net *net = dev_net(dev);
239
240 ASSERT_RTNL();
241
242 write_lock_bh(&dev_base_lock);
243 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
244 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
245 hlist_add_head_rcu(&dev->index_hlist,
246 dev_index_hash(net, dev->ifindex));
247 write_unlock_bh(&dev_base_lock);
248
249 dev_base_seq_inc(net);
250}
251
252
253
254
255static void unlist_netdevice(struct net_device *dev)
256{
257 ASSERT_RTNL();
258
259
260 write_lock_bh(&dev_base_lock);
261 list_del_rcu(&dev->dev_list);
262 hlist_del_rcu(&dev->name_hlist);
263 hlist_del_rcu(&dev->index_hlist);
264 write_unlock_bh(&dev_base_lock);
265
266 dev_base_seq_inc(dev_net(dev));
267}
268
269
270
271
272
273static RAW_NOTIFIER_HEAD(netdev_chain);
274
275
276
277
278
279
280DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
281EXPORT_PER_CPU_SYMBOL(softnet_data);
282
283#ifdef CONFIG_LOCKDEP
284
285
286
287
288static const unsigned short netdev_lock_type[] = {
289 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
290 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
291 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
292 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
293 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
294 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
295 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
296 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
297 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
298 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
299 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
300 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
301 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
302 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
303 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
304
305static const char *const netdev_lock_name[] = {
306 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
307 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
308 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
309 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
310 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
311 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
312 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
313 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
314 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
315 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
316 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
317 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
318 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
319 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
320 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
321
322static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
323static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
324
325static inline unsigned short netdev_lock_pos(unsigned short dev_type)
326{
327 int i;
328
329 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
330 if (netdev_lock_type[i] == dev_type)
331 return i;
332
333 return ARRAY_SIZE(netdev_lock_type) - 1;
334}
335
336static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
337 unsigned short dev_type)
338{
339 int i;
340
341 i = netdev_lock_pos(dev_type);
342 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
343 netdev_lock_name[i]);
344}
345
346static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
347{
348 int i;
349
350 i = netdev_lock_pos(dev->type);
351 lockdep_set_class_and_name(&dev->addr_list_lock,
352 &netdev_addr_lock_key[i],
353 netdev_lock_name[i]);
354}
355#else
356static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
357 unsigned short dev_type)
358{
359}
360static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
361{
362}
363#endif
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388static inline struct list_head *ptype_head(const struct packet_type *pt)
389{
390 if (pt->type == htons(ETH_P_ALL))
391 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
392 else
393 return pt->dev ? &pt->dev->ptype_specific :
394 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410void dev_add_pack(struct packet_type *pt)
411{
412 struct list_head *head = ptype_head(pt);
413
414 spin_lock(&ptype_lock);
415 list_add_rcu(&pt->list, head);
416 spin_unlock(&ptype_lock);
417}
418EXPORT_SYMBOL(dev_add_pack);
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433void __dev_remove_pack(struct packet_type *pt)
434{
435 struct list_head *head = ptype_head(pt);
436 struct packet_type *pt1;
437
438 spin_lock(&ptype_lock);
439
440 list_for_each_entry(pt1, head, list) {
441 if (pt == pt1) {
442 list_del_rcu(&pt->list);
443 goto out;
444 }
445 }
446
447 pr_warn("dev_remove_pack: %p not found\n", pt);
448out:
449 spin_unlock(&ptype_lock);
450}
451EXPORT_SYMBOL(__dev_remove_pack);
452
453
454
455
456
457
458
459
460
461
462
463
464
465void dev_remove_pack(struct packet_type *pt)
466{
467 __dev_remove_pack(pt);
468
469 synchronize_net();
470}
471EXPORT_SYMBOL(dev_remove_pack);
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486void dev_add_offload(struct packet_offload *po)
487{
488 struct packet_offload *elem;
489
490 spin_lock(&offload_lock);
491 list_for_each_entry(elem, &offload_base, list) {
492 if (po->priority < elem->priority)
493 break;
494 }
495 list_add_rcu(&po->list, elem->list.prev);
496 spin_unlock(&offload_lock);
497}
498EXPORT_SYMBOL(dev_add_offload);
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513static void __dev_remove_offload(struct packet_offload *po)
514{
515 struct list_head *head = &offload_base;
516 struct packet_offload *po1;
517
518 spin_lock(&offload_lock);
519
520 list_for_each_entry(po1, head, list) {
521 if (po == po1) {
522 list_del_rcu(&po->list);
523 goto out;
524 }
525 }
526
527 pr_warn("dev_remove_offload: %p not found\n", po);
528out:
529 spin_unlock(&offload_lock);
530}
531
532
533
534
535
536
537
538
539
540
541
542
543
544void dev_remove_offload(struct packet_offload *po)
545{
546 __dev_remove_offload(po);
547
548 synchronize_net();
549}
550EXPORT_SYMBOL(dev_remove_offload);
551
552
553
554
555
556
557
558
559static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
560
561
562
563
564
565
566
567
568
569
570static int netdev_boot_setup_add(char *name, struct ifmap *map)
571{
572 struct netdev_boot_setup *s;
573 int i;
574
575 s = dev_boot_setup;
576 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
577 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
578 memset(s[i].name, 0, sizeof(s[i].name));
579 strlcpy(s[i].name, name, IFNAMSIZ);
580 memcpy(&s[i].map, map, sizeof(s[i].map));
581 break;
582 }
583 }
584
585 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
586}
587
588
589
590
591
592
593
594
595
596
597int netdev_boot_setup_check(struct net_device *dev)
598{
599 struct netdev_boot_setup *s = dev_boot_setup;
600 int i;
601
602 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
603 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
604 !strcmp(dev->name, s[i].name)) {
605 dev->irq = s[i].map.irq;
606 dev->base_addr = s[i].map.base_addr;
607 dev->mem_start = s[i].map.mem_start;
608 dev->mem_end = s[i].map.mem_end;
609 return 1;
610 }
611 }
612 return 0;
613}
614EXPORT_SYMBOL(netdev_boot_setup_check);
615
616
617
618
619
620
621
622
623
624
625
626
627unsigned long netdev_boot_base(const char *prefix, int unit)
628{
629 const struct netdev_boot_setup *s = dev_boot_setup;
630 char name[IFNAMSIZ];
631 int i;
632
633 sprintf(name, "%s%d", prefix, unit);
634
635
636
637
638
639 if (__dev_get_by_name(&init_net, name))
640 return 1;
641
642 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
643 if (!strcmp(name, s[i].name))
644 return s[i].map.base_addr;
645 return 0;
646}
647
648
649
650
651int __init netdev_boot_setup(char *str)
652{
653 int ints[5];
654 struct ifmap map;
655
656 str = get_options(str, ARRAY_SIZE(ints), ints);
657 if (!str || !*str)
658 return 0;
659
660
661 memset(&map, 0, sizeof(map));
662 if (ints[0] > 0)
663 map.irq = ints[1];
664 if (ints[0] > 1)
665 map.base_addr = ints[2];
666 if (ints[0] > 2)
667 map.mem_start = ints[3];
668 if (ints[0] > 3)
669 map.mem_end = ints[4];
670
671
672 return netdev_boot_setup_add(str, &map);
673}
674
675__setup("netdev=", netdev_boot_setup);
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691int dev_get_iflink(const struct net_device *dev)
692{
693 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
694 return dev->netdev_ops->ndo_get_iflink(dev);
695
696 return dev->ifindex;
697}
698EXPORT_SYMBOL(dev_get_iflink);
699
700
701
702
703
704
705
706
707
708
709int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
710{
711 struct ip_tunnel_info *info;
712
713 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
714 return -EINVAL;
715
716 info = skb_tunnel_info_unclone(skb);
717 if (!info)
718 return -ENOMEM;
719 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
720 return -EINVAL;
721
722 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
723}
724EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
725
726
727
728
729
730
731
732
733
734
735
736
737
738struct net_device *__dev_get_by_name(struct net *net, const char *name)
739{
740 struct net_device *dev;
741 struct hlist_head *head = dev_name_hash(net, name);
742
743 hlist_for_each_entry(dev, head, name_hlist)
744 if (!strncmp(dev->name, name, IFNAMSIZ))
745 return dev;
746
747 return NULL;
748}
749EXPORT_SYMBOL(__dev_get_by_name);
750
751
752
753
754
755
756
757
758
759
760
761
762
763struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
764{
765 struct net_device *dev;
766 struct hlist_head *head = dev_name_hash(net, name);
767
768 hlist_for_each_entry_rcu(dev, head, name_hlist)
769 if (!strncmp(dev->name, name, IFNAMSIZ))
770 return dev;
771
772 return NULL;
773}
774EXPORT_SYMBOL(dev_get_by_name_rcu);
775
776
777
778
779
780
781
782
783
784
785
786
787
788struct net_device *dev_get_by_name(struct net *net, const char *name)
789{
790 struct net_device *dev;
791
792 rcu_read_lock();
793 dev = dev_get_by_name_rcu(net, name);
794 if (dev)
795 dev_hold(dev);
796 rcu_read_unlock();
797 return dev;
798}
799EXPORT_SYMBOL(dev_get_by_name);
800
801
802
803
804
805
806
807
808
809
810
811
812
813struct net_device *__dev_get_by_index(struct net *net, int ifindex)
814{
815 struct net_device *dev;
816 struct hlist_head *head = dev_index_hash(net, ifindex);
817
818 hlist_for_each_entry(dev, head, index_hlist)
819 if (dev->ifindex == ifindex)
820 return dev;
821
822 return NULL;
823}
824EXPORT_SYMBOL(__dev_get_by_index);
825
826
827
828
829
830
831
832
833
834
835
836
837struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
838{
839 struct net_device *dev;
840 struct hlist_head *head = dev_index_hash(net, ifindex);
841
842 hlist_for_each_entry_rcu(dev, head, index_hlist)
843 if (dev->ifindex == ifindex)
844 return dev;
845
846 return NULL;
847}
848EXPORT_SYMBOL(dev_get_by_index_rcu);
849
850
851
852
853
854
855
856
857
858
859
860
861
862struct net_device *dev_get_by_index(struct net *net, int ifindex)
863{
864 struct net_device *dev;
865
866 rcu_read_lock();
867 dev = dev_get_by_index_rcu(net, ifindex);
868 if (dev)
869 dev_hold(dev);
870 rcu_read_unlock();
871 return dev;
872}
873EXPORT_SYMBOL(dev_get_by_index);
874
875
876
877
878
879
880
881
882
883
884
885struct net_device *dev_get_by_napi_id(unsigned int napi_id)
886{
887 struct napi_struct *napi;
888
889 WARN_ON_ONCE(!rcu_read_lock_held());
890
891 if (napi_id < MIN_NAPI_ID)
892 return NULL;
893
894 napi = napi_by_id(napi_id);
895
896 return napi ? napi->dev : NULL;
897}
898EXPORT_SYMBOL(dev_get_by_napi_id);
899
900
901
902
903
904
905
906
907
908
909
910int netdev_get_name(struct net *net, char *name, int ifindex)
911{
912 struct net_device *dev;
913 unsigned int seq;
914
915retry:
916 seq = raw_seqcount_begin(&devnet_rename_seq);
917 rcu_read_lock();
918 dev = dev_get_by_index_rcu(net, ifindex);
919 if (!dev) {
920 rcu_read_unlock();
921 return -ENODEV;
922 }
923
924 strcpy(name, dev->name);
925 rcu_read_unlock();
926 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
927 cond_resched();
928 goto retry;
929 }
930
931 return 0;
932}
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
949 const char *ha)
950{
951 struct net_device *dev;
952
953 for_each_netdev_rcu(net, dev)
954 if (dev->type == type &&
955 !memcmp(dev->dev_addr, ha, dev->addr_len))
956 return dev;
957
958 return NULL;
959}
960EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
961
962struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
963{
964 struct net_device *dev;
965
966 ASSERT_RTNL();
967 for_each_netdev(net, dev)
968 if (dev->type == type)
969 return dev;
970
971 return NULL;
972}
973EXPORT_SYMBOL(__dev_getfirstbyhwtype);
974
975struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
976{
977 struct net_device *dev, *ret = NULL;
978
979 rcu_read_lock();
980 for_each_netdev_rcu(net, dev)
981 if (dev->type == type) {
982 dev_hold(dev);
983 ret = dev;
984 break;
985 }
986 rcu_read_unlock();
987 return ret;
988}
989EXPORT_SYMBOL(dev_getfirstbyhwtype);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1003 unsigned short mask)
1004{
1005 struct net_device *dev, *ret;
1006
1007 ASSERT_RTNL();
1008
1009 ret = NULL;
1010 for_each_netdev(net, dev) {
1011 if (((dev->flags ^ if_flags) & mask) == 0) {
1012 ret = dev;
1013 break;
1014 }
1015 }
1016 return ret;
1017}
1018EXPORT_SYMBOL(__dev_get_by_flags);
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028bool dev_valid_name(const char *name)
1029{
1030 if (*name == '\0')
1031 return false;
1032 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1033 return false;
1034 if (!strcmp(name, ".") || !strcmp(name, ".."))
1035 return false;
1036
1037 while (*name) {
1038 if (*name == '/' || *name == ':' || isspace(*name))
1039 return false;
1040 name++;
1041 }
1042 return true;
1043}
1044EXPORT_SYMBOL(dev_valid_name);
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1062{
1063 int i = 0;
1064 const char *p;
1065 const int max_netdevices = 8*PAGE_SIZE;
1066 unsigned long *inuse;
1067 struct net_device *d;
1068
1069 if (!dev_valid_name(name))
1070 return -EINVAL;
1071
1072 p = strchr(name, '%');
1073 if (p) {
1074
1075
1076
1077
1078
1079 if (p[1] != 'd' || strchr(p + 2, '%'))
1080 return -EINVAL;
1081
1082
1083 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1084 if (!inuse)
1085 return -ENOMEM;
1086
1087 for_each_netdev(net, d) {
1088 if (!sscanf(d->name, name, &i))
1089 continue;
1090 if (i < 0 || i >= max_netdevices)
1091 continue;
1092
1093
1094 snprintf(buf, IFNAMSIZ, name, i);
1095 if (!strncmp(buf, d->name, IFNAMSIZ))
1096 set_bit(i, inuse);
1097 }
1098
1099 i = find_first_zero_bit(inuse, max_netdevices);
1100 free_page((unsigned long) inuse);
1101 }
1102
1103 snprintf(buf, IFNAMSIZ, name, i);
1104 if (!__dev_get_by_name(net, buf))
1105 return i;
1106
1107
1108
1109
1110
1111 return -ENFILE;
1112}
1113
1114static int dev_alloc_name_ns(struct net *net,
1115 struct net_device *dev,
1116 const char *name)
1117{
1118 char buf[IFNAMSIZ];
1119 int ret;
1120
1121 BUG_ON(!net);
1122 ret = __dev_alloc_name(net, name, buf);
1123 if (ret >= 0)
1124 strlcpy(dev->name, buf, IFNAMSIZ);
1125 return ret;
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142int dev_alloc_name(struct net_device *dev, const char *name)
1143{
1144 return dev_alloc_name_ns(dev_net(dev), dev, name);
1145}
1146EXPORT_SYMBOL(dev_alloc_name);
1147
1148int dev_get_valid_name(struct net *net, struct net_device *dev,
1149 const char *name)
1150{
1151 BUG_ON(!net);
1152
1153 if (!dev_valid_name(name))
1154 return -EINVAL;
1155
1156 if (strchr(name, '%'))
1157 return dev_alloc_name_ns(net, dev, name);
1158 else if (__dev_get_by_name(net, name))
1159 return -EEXIST;
1160 else if (dev->name != name)
1161 strlcpy(dev->name, name, IFNAMSIZ);
1162
1163 return 0;
1164}
1165EXPORT_SYMBOL(dev_get_valid_name);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175int dev_change_name(struct net_device *dev, const char *newname)
1176{
1177 unsigned char old_assign_type;
1178 char oldname[IFNAMSIZ];
1179 int err = 0;
1180 int ret;
1181 struct net *net;
1182
1183 ASSERT_RTNL();
1184 BUG_ON(!dev_net(dev));
1185
1186 net = dev_net(dev);
1187 if (dev->flags & IFF_UP)
1188 return -EBUSY;
1189
1190 write_seqcount_begin(&devnet_rename_seq);
1191
1192 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1193 write_seqcount_end(&devnet_rename_seq);
1194 return 0;
1195 }
1196
1197 memcpy(oldname, dev->name, IFNAMSIZ);
1198
1199 err = dev_get_valid_name(net, dev, newname);
1200 if (err < 0) {
1201 write_seqcount_end(&devnet_rename_seq);
1202 return err;
1203 }
1204
1205 if (oldname[0] && !strchr(oldname, '%'))
1206 netdev_info(dev, "renamed from %s\n", oldname);
1207
1208 old_assign_type = dev->name_assign_type;
1209 dev->name_assign_type = NET_NAME_RENAMED;
1210
1211rollback:
1212 ret = device_rename(&dev->dev, dev->name);
1213 if (ret) {
1214 memcpy(dev->name, oldname, IFNAMSIZ);
1215 dev->name_assign_type = old_assign_type;
1216 write_seqcount_end(&devnet_rename_seq);
1217 return ret;
1218 }
1219
1220 write_seqcount_end(&devnet_rename_seq);
1221
1222 netdev_adjacent_rename_links(dev, oldname);
1223
1224 write_lock_bh(&dev_base_lock);
1225 hlist_del_rcu(&dev->name_hlist);
1226 write_unlock_bh(&dev_base_lock);
1227
1228 synchronize_rcu();
1229
1230 write_lock_bh(&dev_base_lock);
1231 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1232 write_unlock_bh(&dev_base_lock);
1233
1234 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1235 ret = notifier_to_errno(ret);
1236
1237 if (ret) {
1238
1239 if (err >= 0) {
1240 err = ret;
1241 write_seqcount_begin(&devnet_rename_seq);
1242 memcpy(dev->name, oldname, IFNAMSIZ);
1243 memcpy(oldname, newname, IFNAMSIZ);
1244 dev->name_assign_type = old_assign_type;
1245 old_assign_type = NET_NAME_RENAMED;
1246 goto rollback;
1247 } else {
1248 pr_err("%s: name change rollback failed: %d\n",
1249 dev->name, ret);
1250 }
1251 }
1252
1253 return err;
1254}
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1265{
1266 struct dev_ifalias *new_alias = NULL;
1267
1268 if (len >= IFALIASZ)
1269 return -EINVAL;
1270
1271 if (len) {
1272 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1273 if (!new_alias)
1274 return -ENOMEM;
1275
1276 memcpy(new_alias->ifalias, alias, len);
1277 new_alias->ifalias[len] = 0;
1278 }
1279
1280 mutex_lock(&ifalias_mutex);
1281 rcu_swap_protected(dev->ifalias, new_alias,
1282 mutex_is_locked(&ifalias_mutex));
1283 mutex_unlock(&ifalias_mutex);
1284
1285 if (new_alias)
1286 kfree_rcu(new_alias, rcuhead);
1287
1288 return len;
1289}
1290EXPORT_SYMBOL(dev_set_alias);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1302{
1303 const struct dev_ifalias *alias;
1304 int ret = 0;
1305
1306 rcu_read_lock();
1307 alias = rcu_dereference(dev->ifalias);
1308 if (alias)
1309 ret = snprintf(name, len, "%s", alias->ifalias);
1310 rcu_read_unlock();
1311
1312 return ret;
1313}
1314
1315
1316
1317
1318
1319
1320
1321void netdev_features_change(struct net_device *dev)
1322{
1323 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1324}
1325EXPORT_SYMBOL(netdev_features_change);
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335void netdev_state_change(struct net_device *dev)
1336{
1337 if (dev->flags & IFF_UP) {
1338 struct netdev_notifier_change_info change_info = {
1339 .info.dev = dev,
1340 };
1341
1342 call_netdevice_notifiers_info(NETDEV_CHANGE,
1343 &change_info.info);
1344 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1345 }
1346}
1347EXPORT_SYMBOL(netdev_state_change);
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359void netdev_notify_peers(struct net_device *dev)
1360{
1361 rtnl_lock();
1362 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1363 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1364 rtnl_unlock();
1365}
1366EXPORT_SYMBOL(netdev_notify_peers);
1367
1368static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1369{
1370 const struct net_device_ops *ops = dev->netdev_ops;
1371 int ret;
1372
1373 ASSERT_RTNL();
1374
1375 if (!netif_device_present(dev))
1376 return -ENODEV;
1377
1378
1379
1380
1381
1382 netpoll_poll_disable(dev);
1383
1384 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1385 ret = notifier_to_errno(ret);
1386 if (ret)
1387 return ret;
1388
1389 set_bit(__LINK_STATE_START, &dev->state);
1390
1391 if (ops->ndo_validate_addr)
1392 ret = ops->ndo_validate_addr(dev);
1393
1394 if (!ret && ops->ndo_open)
1395 ret = ops->ndo_open(dev);
1396
1397 netpoll_poll_enable(dev);
1398
1399 if (ret)
1400 clear_bit(__LINK_STATE_START, &dev->state);
1401 else {
1402 dev->flags |= IFF_UP;
1403 dev_set_rx_mode(dev);
1404 dev_activate(dev);
1405 add_device_randomness(dev->dev_addr, dev->addr_len);
1406 }
1407
1408 return ret;
1409}
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1425{
1426 int ret;
1427
1428 if (dev->flags & IFF_UP)
1429 return 0;
1430
1431 ret = __dev_open(dev, extack);
1432 if (ret < 0)
1433 return ret;
1434
1435 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1436 call_netdevice_notifiers(NETDEV_UP, dev);
1437
1438 return ret;
1439}
1440EXPORT_SYMBOL(dev_open);
1441
1442static void __dev_close_many(struct list_head *head)
1443{
1444 struct net_device *dev;
1445
1446 ASSERT_RTNL();
1447 might_sleep();
1448
1449 list_for_each_entry(dev, head, close_list) {
1450
1451 netpoll_poll_disable(dev);
1452
1453 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1454
1455 clear_bit(__LINK_STATE_START, &dev->state);
1456
1457
1458
1459
1460
1461
1462
1463 smp_mb__after_atomic();
1464 }
1465
1466 dev_deactivate_many(head);
1467
1468 list_for_each_entry(dev, head, close_list) {
1469 const struct net_device_ops *ops = dev->netdev_ops;
1470
1471
1472
1473
1474
1475
1476
1477
1478 if (ops->ndo_stop)
1479 ops->ndo_stop(dev);
1480
1481 dev->flags &= ~IFF_UP;
1482 netpoll_poll_enable(dev);
1483 }
1484}
1485
1486static void __dev_close(struct net_device *dev)
1487{
1488 LIST_HEAD(single);
1489
1490 list_add(&dev->close_list, &single);
1491 __dev_close_many(&single);
1492 list_del(&single);
1493}
1494
1495void dev_close_many(struct list_head *head, bool unlink)
1496{
1497 struct net_device *dev, *tmp;
1498
1499
1500 list_for_each_entry_safe(dev, tmp, head, close_list)
1501 if (!(dev->flags & IFF_UP))
1502 list_del_init(&dev->close_list);
1503
1504 __dev_close_many(head);
1505
1506 list_for_each_entry_safe(dev, tmp, head, close_list) {
1507 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1508 call_netdevice_notifiers(NETDEV_DOWN, dev);
1509 if (unlink)
1510 list_del_init(&dev->close_list);
1511 }
1512}
1513EXPORT_SYMBOL(dev_close_many);
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524void dev_close(struct net_device *dev)
1525{
1526 if (dev->flags & IFF_UP) {
1527 LIST_HEAD(single);
1528
1529 list_add(&dev->close_list, &single);
1530 dev_close_many(&single, true);
1531 list_del(&single);
1532 }
1533}
1534EXPORT_SYMBOL(dev_close);
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545void dev_disable_lro(struct net_device *dev)
1546{
1547 struct net_device *lower_dev;
1548 struct list_head *iter;
1549
1550 dev->wanted_features &= ~NETIF_F_LRO;
1551 netdev_update_features(dev);
1552
1553 if (unlikely(dev->features & NETIF_F_LRO))
1554 netdev_WARN(dev, "failed to disable LRO!\n");
1555
1556 netdev_for_each_lower_dev(dev, lower_dev, iter)
1557 dev_disable_lro(lower_dev);
1558}
1559EXPORT_SYMBOL(dev_disable_lro);
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569static void dev_disable_gro_hw(struct net_device *dev)
1570{
1571 dev->wanted_features &= ~NETIF_F_GRO_HW;
1572 netdev_update_features(dev);
1573
1574 if (unlikely(dev->features & NETIF_F_GRO_HW))
1575 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1576}
1577
1578const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1579{
1580#define N(val) \
1581 case NETDEV_##val: \
1582 return "NETDEV_" __stringify(val);
1583 switch (cmd) {
1584 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1585 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1586 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1587 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1588 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1589 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1590 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1591 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1592 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1593 N(PRE_CHANGEADDR)
1594 }
1595#undef N
1596 return "UNKNOWN_NETDEV_EVENT";
1597}
1598EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1599
1600static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1601 struct net_device *dev)
1602{
1603 struct netdev_notifier_info info = {
1604 .dev = dev,
1605 };
1606
1607 return nb->notifier_call(nb, val, &info);
1608}
1609
1610static int dev_boot_phase = 1;
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626int register_netdevice_notifier(struct notifier_block *nb)
1627{
1628 struct net_device *dev;
1629 struct net_device *last;
1630 struct net *net;
1631 int err;
1632
1633
1634 down_write(&pernet_ops_rwsem);
1635 rtnl_lock();
1636 err = raw_notifier_chain_register(&netdev_chain, nb);
1637 if (err)
1638 goto unlock;
1639 if (dev_boot_phase)
1640 goto unlock;
1641 for_each_net(net) {
1642 for_each_netdev(net, dev) {
1643 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1644 err = notifier_to_errno(err);
1645 if (err)
1646 goto rollback;
1647
1648 if (!(dev->flags & IFF_UP))
1649 continue;
1650
1651 call_netdevice_notifier(nb, NETDEV_UP, dev);
1652 }
1653 }
1654
1655unlock:
1656 rtnl_unlock();
1657 up_write(&pernet_ops_rwsem);
1658 return err;
1659
1660rollback:
1661 last = dev;
1662 for_each_net(net) {
1663 for_each_netdev(net, dev) {
1664 if (dev == last)
1665 goto outroll;
1666
1667 if (dev->flags & IFF_UP) {
1668 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1669 dev);
1670 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1671 }
1672 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1673 }
1674 }
1675
1676outroll:
1677 raw_notifier_chain_unregister(&netdev_chain, nb);
1678 goto unlock;
1679}
1680EXPORT_SYMBOL(register_netdevice_notifier);
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696int unregister_netdevice_notifier(struct notifier_block *nb)
1697{
1698 struct net_device *dev;
1699 struct net *net;
1700 int err;
1701
1702
1703 down_write(&pernet_ops_rwsem);
1704 rtnl_lock();
1705 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1706 if (err)
1707 goto unlock;
1708
1709 for_each_net(net) {
1710 for_each_netdev(net, dev) {
1711 if (dev->flags & IFF_UP) {
1712 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1713 dev);
1714 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1715 }
1716 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1717 }
1718 }
1719unlock:
1720 rtnl_unlock();
1721 up_write(&pernet_ops_rwsem);
1722 return err;
1723}
1724EXPORT_SYMBOL(unregister_netdevice_notifier);
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735static int call_netdevice_notifiers_info(unsigned long val,
1736 struct netdev_notifier_info *info)
1737{
1738 ASSERT_RTNL();
1739 return raw_notifier_call_chain(&netdev_chain, val, info);
1740}
1741
1742static int call_netdevice_notifiers_extack(unsigned long val,
1743 struct net_device *dev,
1744 struct netlink_ext_ack *extack)
1745{
1746 struct netdev_notifier_info info = {
1747 .dev = dev,
1748 .extack = extack,
1749 };
1750
1751 return call_netdevice_notifiers_info(val, &info);
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1764{
1765 return call_netdevice_notifiers_extack(val, dev, NULL);
1766}
1767EXPORT_SYMBOL(call_netdevice_notifiers);
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778static int call_netdevice_notifiers_mtu(unsigned long val,
1779 struct net_device *dev, u32 arg)
1780{
1781 struct netdev_notifier_info_ext info = {
1782 .info.dev = dev,
1783 .ext.mtu = arg,
1784 };
1785
1786 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
1787
1788 return call_netdevice_notifiers_info(val, &info.info);
1789}
1790
1791#ifdef CONFIG_NET_INGRESS
1792static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
1793
1794void net_inc_ingress_queue(void)
1795{
1796 static_branch_inc(&ingress_needed_key);
1797}
1798EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1799
1800void net_dec_ingress_queue(void)
1801{
1802 static_branch_dec(&ingress_needed_key);
1803}
1804EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1805#endif
1806
1807#ifdef CONFIG_NET_EGRESS
1808static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
1809
1810void net_inc_egress_queue(void)
1811{
1812 static_branch_inc(&egress_needed_key);
1813}
1814EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1815
1816void net_dec_egress_queue(void)
1817{
1818 static_branch_dec(&egress_needed_key);
1819}
1820EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1821#endif
1822
1823static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
1824#ifdef CONFIG_JUMP_LABEL
1825static atomic_t netstamp_needed_deferred;
1826static atomic_t netstamp_wanted;
1827static void netstamp_clear(struct work_struct *work)
1828{
1829 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1830 int wanted;
1831
1832 wanted = atomic_add_return(deferred, &netstamp_wanted);
1833 if (wanted > 0)
1834 static_branch_enable(&netstamp_needed_key);
1835 else
1836 static_branch_disable(&netstamp_needed_key);
1837}
1838static DECLARE_WORK(netstamp_work, netstamp_clear);
1839#endif
1840
1841void net_enable_timestamp(void)
1842{
1843#ifdef CONFIG_JUMP_LABEL
1844 int wanted;
1845
1846 while (1) {
1847 wanted = atomic_read(&netstamp_wanted);
1848 if (wanted <= 0)
1849 break;
1850 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1851 return;
1852 }
1853 atomic_inc(&netstamp_needed_deferred);
1854 schedule_work(&netstamp_work);
1855#else
1856 static_branch_inc(&netstamp_needed_key);
1857#endif
1858}
1859EXPORT_SYMBOL(net_enable_timestamp);
1860
1861void net_disable_timestamp(void)
1862{
1863#ifdef CONFIG_JUMP_LABEL
1864 int wanted;
1865
1866 while (1) {
1867 wanted = atomic_read(&netstamp_wanted);
1868 if (wanted <= 1)
1869 break;
1870 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1871 return;
1872 }
1873 atomic_dec(&netstamp_needed_deferred);
1874 schedule_work(&netstamp_work);
1875#else
1876 static_branch_dec(&netstamp_needed_key);
1877#endif
1878}
1879EXPORT_SYMBOL(net_disable_timestamp);
1880
1881static inline void net_timestamp_set(struct sk_buff *skb)
1882{
1883 skb->tstamp = 0;
1884 if (static_branch_unlikely(&netstamp_needed_key))
1885 __net_timestamp(skb);
1886}
1887
1888#define net_timestamp_check(COND, SKB) \
1889 if (static_branch_unlikely(&netstamp_needed_key)) { \
1890 if ((COND) && !(SKB)->tstamp) \
1891 __net_timestamp(SKB); \
1892 } \
1893
1894bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1895{
1896 unsigned int len;
1897
1898 if (!(dev->flags & IFF_UP))
1899 return false;
1900
1901 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1902 if (skb->len <= len)
1903 return true;
1904
1905
1906
1907
1908 if (skb_is_gso(skb))
1909 return true;
1910
1911 return false;
1912}
1913EXPORT_SYMBOL_GPL(is_skb_forwardable);
1914
1915int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1916{
1917 int ret = ____dev_forward_skb(dev, skb);
1918
1919 if (likely(!ret)) {
1920 skb->protocol = eth_type_trans(skb, dev);
1921 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1922 }
1923
1924 return ret;
1925}
1926EXPORT_SYMBOL_GPL(__dev_forward_skb);
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1947{
1948 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1949}
1950EXPORT_SYMBOL_GPL(dev_forward_skb);
1951
1952static inline int deliver_skb(struct sk_buff *skb,
1953 struct packet_type *pt_prev,
1954 struct net_device *orig_dev)
1955{
1956 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1957 return -ENOMEM;
1958 refcount_inc(&skb->users);
1959 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1960}
1961
1962static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1963 struct packet_type **pt,
1964 struct net_device *orig_dev,
1965 __be16 type,
1966 struct list_head *ptype_list)
1967{
1968 struct packet_type *ptype, *pt_prev = *pt;
1969
1970 list_for_each_entry_rcu(ptype, ptype_list, list) {
1971 if (ptype->type != type)
1972 continue;
1973 if (pt_prev)
1974 deliver_skb(skb, pt_prev, orig_dev);
1975 pt_prev = ptype;
1976 }
1977 *pt = pt_prev;
1978}
1979
1980static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1981{
1982 if (!ptype->af_packet_priv || !skb->sk)
1983 return false;
1984
1985 if (ptype->id_match)
1986 return ptype->id_match(ptype, skb->sk);
1987 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1988 return true;
1989
1990 return false;
1991}
1992
1993
1994
1995
1996
1997
1998bool dev_nit_active(struct net_device *dev)
1999{
2000 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2001}
2002EXPORT_SYMBOL_GPL(dev_nit_active);
2003
2004
2005
2006
2007
2008
2009void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2010{
2011 struct packet_type *ptype;
2012 struct sk_buff *skb2 = NULL;
2013 struct packet_type *pt_prev = NULL;
2014 struct list_head *ptype_list = &ptype_all;
2015
2016 rcu_read_lock();
2017again:
2018 list_for_each_entry_rcu(ptype, ptype_list, list) {
2019 if (ptype->ignore_outgoing)
2020 continue;
2021
2022
2023
2024
2025 if (skb_loop_sk(ptype, skb))
2026 continue;
2027
2028 if (pt_prev) {
2029 deliver_skb(skb2, pt_prev, skb->dev);
2030 pt_prev = ptype;
2031 continue;
2032 }
2033
2034
2035 skb2 = skb_clone(skb, GFP_ATOMIC);
2036 if (!skb2)
2037 goto out_unlock;
2038
2039 net_timestamp_set(skb2);
2040
2041
2042
2043
2044
2045 skb_reset_mac_header(skb2);
2046
2047 if (skb_network_header(skb2) < skb2->data ||
2048 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2049 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2050 ntohs(skb2->protocol),
2051 dev->name);
2052 skb_reset_network_header(skb2);
2053 }
2054
2055 skb2->transport_header = skb2->network_header;
2056 skb2->pkt_type = PACKET_OUTGOING;
2057 pt_prev = ptype;
2058 }
2059
2060 if (ptype_list == &ptype_all) {
2061 ptype_list = &dev->ptype_all;
2062 goto again;
2063 }
2064out_unlock:
2065 if (pt_prev) {
2066 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2067 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2068 else
2069 kfree_skb(skb2);
2070 }
2071 rcu_read_unlock();
2072}
2073EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2089{
2090 int i;
2091 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2092
2093
2094 if (tc->offset + tc->count > txq) {
2095 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2096 dev->num_tc = 0;
2097 return;
2098 }
2099
2100
2101 for (i = 1; i < TC_BITMASK + 1; i++) {
2102 int q = netdev_get_prio_tc_map(dev, i);
2103
2104 tc = &dev->tc_to_txq[q];
2105 if (tc->offset + tc->count > txq) {
2106 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2107 i, q);
2108 netdev_set_prio_tc_map(dev, i, 0);
2109 }
2110 }
2111}
2112
2113int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2114{
2115 if (dev->num_tc) {
2116 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2117 int i;
2118
2119
2120 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2121 if ((txq - tc->offset) < tc->count)
2122 return i;
2123 }
2124
2125
2126 return -1;
2127 }
2128
2129 return 0;
2130}
2131EXPORT_SYMBOL(netdev_txq_to_tc);
2132
2133#ifdef CONFIG_XPS
2134struct static_key xps_needed __read_mostly;
2135EXPORT_SYMBOL(xps_needed);
2136struct static_key xps_rxqs_needed __read_mostly;
2137EXPORT_SYMBOL(xps_rxqs_needed);
2138static DEFINE_MUTEX(xps_map_mutex);
2139#define xmap_dereference(P) \
2140 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2141
2142static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2143 int tci, u16 index)
2144{
2145 struct xps_map *map = NULL;
2146 int pos;
2147
2148 if (dev_maps)
2149 map = xmap_dereference(dev_maps->attr_map[tci]);
2150 if (!map)
2151 return false;
2152
2153 for (pos = map->len; pos--;) {
2154 if (map->queues[pos] != index)
2155 continue;
2156
2157 if (map->len > 1) {
2158 map->queues[pos] = map->queues[--map->len];
2159 break;
2160 }
2161
2162 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2163 kfree_rcu(map, rcu);
2164 return false;
2165 }
2166
2167 return true;
2168}
2169
2170static bool remove_xps_queue_cpu(struct net_device *dev,
2171 struct xps_dev_maps *dev_maps,
2172 int cpu, u16 offset, u16 count)
2173{
2174 int num_tc = dev->num_tc ? : 1;
2175 bool active = false;
2176 int tci;
2177
2178 for (tci = cpu * num_tc; num_tc--; tci++) {
2179 int i, j;
2180
2181 for (i = count, j = offset; i--; j++) {
2182 if (!remove_xps_queue(dev_maps, tci, j))
2183 break;
2184 }
2185
2186 active |= i < 0;
2187 }
2188
2189 return active;
2190}
2191
2192static void reset_xps_maps(struct net_device *dev,
2193 struct xps_dev_maps *dev_maps,
2194 bool is_rxqs_map)
2195{
2196 if (is_rxqs_map) {
2197 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2198 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2199 } else {
2200 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2201 }
2202 static_key_slow_dec_cpuslocked(&xps_needed);
2203 kfree_rcu(dev_maps, rcu);
2204}
2205
2206static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2207 struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2208 u16 offset, u16 count, bool is_rxqs_map)
2209{
2210 bool active = false;
2211 int i, j;
2212
2213 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2214 j < nr_ids;)
2215 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2216 count);
2217 if (!active)
2218 reset_xps_maps(dev, dev_maps, is_rxqs_map);
2219
2220 if (!is_rxqs_map) {
2221 for (i = offset + (count - 1); count--; i--) {
2222 netdev_queue_numa_node_write(
2223 netdev_get_tx_queue(dev, i),
2224 NUMA_NO_NODE);
2225 }
2226 }
2227}
2228
2229static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2230 u16 count)
2231{
2232 const unsigned long *possible_mask = NULL;
2233 struct xps_dev_maps *dev_maps;
2234 unsigned int nr_ids;
2235
2236 if (!static_key_false(&xps_needed))
2237 return;
2238
2239 cpus_read_lock();
2240 mutex_lock(&xps_map_mutex);
2241
2242 if (static_key_false(&xps_rxqs_needed)) {
2243 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2244 if (dev_maps) {
2245 nr_ids = dev->num_rx_queues;
2246 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2247 offset, count, true);
2248 }
2249 }
2250
2251 dev_maps = xmap_dereference(dev->xps_cpus_map);
2252 if (!dev_maps)
2253 goto out_no_maps;
2254
2255 if (num_possible_cpus() > 1)
2256 possible_mask = cpumask_bits(cpu_possible_mask);
2257 nr_ids = nr_cpu_ids;
2258 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2259 false);
2260
2261out_no_maps:
2262 mutex_unlock(&xps_map_mutex);
2263 cpus_read_unlock();
2264}
2265
2266static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2267{
2268 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2269}
2270
2271static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2272 u16 index, bool is_rxqs_map)
2273{
2274 struct xps_map *new_map;
2275 int alloc_len = XPS_MIN_MAP_ALLOC;
2276 int i, pos;
2277
2278 for (pos = 0; map && pos < map->len; pos++) {
2279 if (map->queues[pos] != index)
2280 continue;
2281 return map;
2282 }
2283
2284
2285 if (map) {
2286 if (pos < map->alloc_len)
2287 return map;
2288
2289 alloc_len = map->alloc_len * 2;
2290 }
2291
2292
2293
2294
2295 if (is_rxqs_map)
2296 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2297 else
2298 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2299 cpu_to_node(attr_index));
2300 if (!new_map)
2301 return NULL;
2302
2303 for (i = 0; i < pos; i++)
2304 new_map->queues[i] = map->queues[i];
2305 new_map->alloc_len = alloc_len;
2306 new_map->len = pos;
2307
2308 return new_map;
2309}
2310
2311
2312int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2313 u16 index, bool is_rxqs_map)
2314{
2315 const unsigned long *online_mask = NULL, *possible_mask = NULL;
2316 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2317 int i, j, tci, numa_node_id = -2;
2318 int maps_sz, num_tc = 1, tc = 0;
2319 struct xps_map *map, *new_map;
2320 bool active = false;
2321 unsigned int nr_ids;
2322
2323 if (dev->num_tc) {
2324
2325 num_tc = dev->num_tc;
2326 if (num_tc < 0)
2327 return -EINVAL;
2328
2329
2330 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2331
2332 tc = netdev_txq_to_tc(dev, index);
2333 if (tc < 0)
2334 return -EINVAL;
2335 }
2336
2337 mutex_lock(&xps_map_mutex);
2338 if (is_rxqs_map) {
2339 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2340 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2341 nr_ids = dev->num_rx_queues;
2342 } else {
2343 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2344 if (num_possible_cpus() > 1) {
2345 online_mask = cpumask_bits(cpu_online_mask);
2346 possible_mask = cpumask_bits(cpu_possible_mask);
2347 }
2348 dev_maps = xmap_dereference(dev->xps_cpus_map);
2349 nr_ids = nr_cpu_ids;
2350 }
2351
2352 if (maps_sz < L1_CACHE_BYTES)
2353 maps_sz = L1_CACHE_BYTES;
2354
2355
2356 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2357 j < nr_ids;) {
2358 if (!new_dev_maps)
2359 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2360 if (!new_dev_maps) {
2361 mutex_unlock(&xps_map_mutex);
2362 return -ENOMEM;
2363 }
2364
2365 tci = j * num_tc + tc;
2366 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
2367 NULL;
2368
2369 map = expand_xps_map(map, j, index, is_rxqs_map);
2370 if (!map)
2371 goto error;
2372
2373 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2374 }
2375
2376 if (!new_dev_maps)
2377 goto out_no_new_maps;
2378
2379 if (!dev_maps) {
2380
2381 static_key_slow_inc_cpuslocked(&xps_needed);
2382 if (is_rxqs_map)
2383 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2384 }
2385
2386 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2387 j < nr_ids;) {
2388
2389 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
2390
2391 map = xmap_dereference(dev_maps->attr_map[tci]);
2392 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2393 }
2394
2395
2396
2397
2398 tci = j * num_tc + tc;
2399
2400 if (netif_attr_test_mask(j, mask, nr_ids) &&
2401 netif_attr_test_online(j, online_mask, nr_ids)) {
2402
2403 int pos = 0;
2404
2405 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2406 while ((pos < map->len) && (map->queues[pos] != index))
2407 pos++;
2408
2409 if (pos == map->len)
2410 map->queues[map->len++] = index;
2411#ifdef CONFIG_NUMA
2412 if (!is_rxqs_map) {
2413 if (numa_node_id == -2)
2414 numa_node_id = cpu_to_node(j);
2415 else if (numa_node_id != cpu_to_node(j))
2416 numa_node_id = -1;
2417 }
2418#endif
2419 } else if (dev_maps) {
2420
2421 map = xmap_dereference(dev_maps->attr_map[tci]);
2422 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2423 }
2424
2425
2426 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2427
2428 map = xmap_dereference(dev_maps->attr_map[tci]);
2429 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2430 }
2431 }
2432
2433 if (is_rxqs_map)
2434 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2435 else
2436 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
2437
2438
2439 if (!dev_maps)
2440 goto out_no_old_maps;
2441
2442 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2443 j < nr_ids;) {
2444 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2445 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2446 map = xmap_dereference(dev_maps->attr_map[tci]);
2447 if (map && map != new_map)
2448 kfree_rcu(map, rcu);
2449 }
2450 }
2451
2452 kfree_rcu(dev_maps, rcu);
2453
2454out_no_old_maps:
2455 dev_maps = new_dev_maps;
2456 active = true;
2457
2458out_no_new_maps:
2459 if (!is_rxqs_map) {
2460
2461 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2462 (numa_node_id >= 0) ?
2463 numa_node_id : NUMA_NO_NODE);
2464 }
2465
2466 if (!dev_maps)
2467 goto out_no_maps;
2468
2469
2470 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2471 j < nr_ids;) {
2472 for (i = tc, tci = j * num_tc; i--; tci++)
2473 active |= remove_xps_queue(dev_maps, tci, index);
2474 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2475 !netif_attr_test_online(j, online_mask, nr_ids))
2476 active |= remove_xps_queue(dev_maps, tci, index);
2477 for (i = num_tc - tc, tci++; --i; tci++)
2478 active |= remove_xps_queue(dev_maps, tci, index);
2479 }
2480
2481
2482 if (!active)
2483 reset_xps_maps(dev, dev_maps, is_rxqs_map);
2484
2485out_no_maps:
2486 mutex_unlock(&xps_map_mutex);
2487
2488 return 0;
2489error:
2490
2491 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2492 j < nr_ids;) {
2493 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2494 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2495 map = dev_maps ?
2496 xmap_dereference(dev_maps->attr_map[tci]) :
2497 NULL;
2498 if (new_map && new_map != map)
2499 kfree(new_map);
2500 }
2501 }
2502
2503 mutex_unlock(&xps_map_mutex);
2504
2505 kfree(new_dev_maps);
2506 return -ENOMEM;
2507}
2508EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2509
2510int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2511 u16 index)
2512{
2513 int ret;
2514
2515 cpus_read_lock();
2516 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2517 cpus_read_unlock();
2518
2519 return ret;
2520}
2521EXPORT_SYMBOL(netif_set_xps_queue);
2522
2523#endif
2524static void netdev_unbind_all_sb_channels(struct net_device *dev)
2525{
2526 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2527
2528
2529 while (txq-- != &dev->_tx[0]) {
2530 if (txq->sb_dev)
2531 netdev_unbind_sb_channel(dev, txq->sb_dev);
2532 }
2533}
2534
2535void netdev_reset_tc(struct net_device *dev)
2536{
2537#ifdef CONFIG_XPS
2538 netif_reset_xps_queues_gt(dev, 0);
2539#endif
2540 netdev_unbind_all_sb_channels(dev);
2541
2542
2543 dev->num_tc = 0;
2544 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2545 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2546}
2547EXPORT_SYMBOL(netdev_reset_tc);
2548
2549int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2550{
2551 if (tc >= dev->num_tc)
2552 return -EINVAL;
2553
2554#ifdef CONFIG_XPS
2555 netif_reset_xps_queues(dev, offset, count);
2556#endif
2557 dev->tc_to_txq[tc].count = count;
2558 dev->tc_to_txq[tc].offset = offset;
2559 return 0;
2560}
2561EXPORT_SYMBOL(netdev_set_tc_queue);
2562
2563int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2564{
2565 if (num_tc > TC_MAX_QUEUE)
2566 return -EINVAL;
2567
2568#ifdef CONFIG_XPS
2569 netif_reset_xps_queues_gt(dev, 0);
2570#endif
2571 netdev_unbind_all_sb_channels(dev);
2572
2573 dev->num_tc = num_tc;
2574 return 0;
2575}
2576EXPORT_SYMBOL(netdev_set_num_tc);
2577
2578void netdev_unbind_sb_channel(struct net_device *dev,
2579 struct net_device *sb_dev)
2580{
2581 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2582
2583#ifdef CONFIG_XPS
2584 netif_reset_xps_queues_gt(sb_dev, 0);
2585#endif
2586 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2587 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2588
2589 while (txq-- != &dev->_tx[0]) {
2590 if (txq->sb_dev == sb_dev)
2591 txq->sb_dev = NULL;
2592 }
2593}
2594EXPORT_SYMBOL(netdev_unbind_sb_channel);
2595
2596int netdev_bind_sb_channel_queue(struct net_device *dev,
2597 struct net_device *sb_dev,
2598 u8 tc, u16 count, u16 offset)
2599{
2600
2601 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2602 return -EINVAL;
2603
2604
2605 if ((offset + count) > dev->real_num_tx_queues)
2606 return -EINVAL;
2607
2608
2609 sb_dev->tc_to_txq[tc].count = count;
2610 sb_dev->tc_to_txq[tc].offset = offset;
2611
2612
2613
2614
2615 while (count--)
2616 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2617
2618 return 0;
2619}
2620EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2621
2622int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2623{
2624
2625 if (netif_is_multiqueue(dev))
2626 return -ENODEV;
2627
2628
2629
2630
2631
2632
2633 if (channel > S16_MAX)
2634 return -EINVAL;
2635
2636 dev->num_tc = -channel;
2637
2638 return 0;
2639}
2640EXPORT_SYMBOL(netdev_set_sb_channel);
2641
2642
2643
2644
2645
2646int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2647{
2648 bool disabling;
2649 int rc;
2650
2651 disabling = txq < dev->real_num_tx_queues;
2652
2653 if (txq < 1 || txq > dev->num_tx_queues)
2654 return -EINVAL;
2655
2656 if (dev->reg_state == NETREG_REGISTERED ||
2657 dev->reg_state == NETREG_UNREGISTERING) {
2658 ASSERT_RTNL();
2659
2660 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2661 txq);
2662 if (rc)
2663 return rc;
2664
2665 if (dev->num_tc)
2666 netif_setup_tc(dev, txq);
2667
2668 dev->real_num_tx_queues = txq;
2669
2670 if (disabling) {
2671 synchronize_net();
2672 qdisc_reset_all_tx_gt(dev, txq);
2673#ifdef CONFIG_XPS
2674 netif_reset_xps_queues_gt(dev, txq);
2675#endif
2676 }
2677 } else {
2678 dev->real_num_tx_queues = txq;
2679 }
2680
2681 return 0;
2682}
2683EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2684
2685#ifdef CONFIG_SYSFS
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2697{
2698 int rc;
2699
2700 if (rxq < 1 || rxq > dev->num_rx_queues)
2701 return -EINVAL;
2702
2703 if (dev->reg_state == NETREG_REGISTERED) {
2704 ASSERT_RTNL();
2705
2706 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2707 rxq);
2708 if (rc)
2709 return rc;
2710 }
2711
2712 dev->real_num_rx_queues = rxq;
2713 return 0;
2714}
2715EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2716#endif
2717
2718
2719
2720
2721
2722
2723
2724int netif_get_num_default_rss_queues(void)
2725{
2726 return is_kdump_kernel() ?
2727 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2728}
2729EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2730
2731static void __netif_reschedule(struct Qdisc *q)
2732{
2733 struct softnet_data *sd;
2734 unsigned long flags;
2735
2736 local_irq_save(flags);
2737 sd = this_cpu_ptr(&softnet_data);
2738 q->next_sched = NULL;
2739 *sd->output_queue_tailp = q;
2740 sd->output_queue_tailp = &q->next_sched;
2741 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2742 local_irq_restore(flags);
2743}
2744
2745void __netif_schedule(struct Qdisc *q)
2746{
2747 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2748 __netif_reschedule(q);
2749}
2750EXPORT_SYMBOL(__netif_schedule);
2751
2752struct dev_kfree_skb_cb {
2753 enum skb_free_reason reason;
2754};
2755
2756static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2757{
2758 return (struct dev_kfree_skb_cb *)skb->cb;
2759}
2760
2761void netif_schedule_queue(struct netdev_queue *txq)
2762{
2763 rcu_read_lock();
2764 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2765 struct Qdisc *q = rcu_dereference(txq->qdisc);
2766
2767 __netif_schedule(q);
2768 }
2769 rcu_read_unlock();
2770}
2771EXPORT_SYMBOL(netif_schedule_queue);
2772
2773void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2774{
2775 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2776 struct Qdisc *q;
2777
2778 rcu_read_lock();
2779 q = rcu_dereference(dev_queue->qdisc);
2780 __netif_schedule(q);
2781 rcu_read_unlock();
2782 }
2783}
2784EXPORT_SYMBOL(netif_tx_wake_queue);
2785
2786void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2787{
2788 unsigned long flags;
2789
2790 if (unlikely(!skb))
2791 return;
2792
2793 if (likely(refcount_read(&skb->users) == 1)) {
2794 smp_rmb();
2795 refcount_set(&skb->users, 0);
2796 } else if (likely(!refcount_dec_and_test(&skb->users))) {
2797 return;
2798 }
2799 get_kfree_skb_cb(skb)->reason = reason;
2800 local_irq_save(flags);
2801 skb->next = __this_cpu_read(softnet_data.completion_queue);
2802 __this_cpu_write(softnet_data.completion_queue, skb);
2803 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2804 local_irq_restore(flags);
2805}
2806EXPORT_SYMBOL(__dev_kfree_skb_irq);
2807
2808void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2809{
2810 if (in_irq() || irqs_disabled())
2811 __dev_kfree_skb_irq(skb, reason);
2812 else
2813 dev_kfree_skb(skb);
2814}
2815EXPORT_SYMBOL(__dev_kfree_skb_any);
2816
2817
2818
2819
2820
2821
2822
2823
2824void netif_device_detach(struct net_device *dev)
2825{
2826 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2827 netif_running(dev)) {
2828 netif_tx_stop_all_queues(dev);
2829 }
2830}
2831EXPORT_SYMBOL(netif_device_detach);
2832
2833
2834
2835
2836
2837
2838
2839void netif_device_attach(struct net_device *dev)
2840{
2841 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2842 netif_running(dev)) {
2843 netif_tx_wake_all_queues(dev);
2844 __netdev_watchdog_up(dev);
2845 }
2846}
2847EXPORT_SYMBOL(netif_device_attach);
2848
2849
2850
2851
2852
2853static u16 skb_tx_hash(const struct net_device *dev,
2854 const struct net_device *sb_dev,
2855 struct sk_buff *skb)
2856{
2857 u32 hash;
2858 u16 qoffset = 0;
2859 u16 qcount = dev->real_num_tx_queues;
2860
2861 if (dev->num_tc) {
2862 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2863
2864 qoffset = sb_dev->tc_to_txq[tc].offset;
2865 qcount = sb_dev->tc_to_txq[tc].count;
2866 }
2867
2868 if (skb_rx_queue_recorded(skb)) {
2869 hash = skb_get_rx_queue(skb);
2870 while (unlikely(hash >= qcount))
2871 hash -= qcount;
2872 return hash + qoffset;
2873 }
2874
2875 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2876}
2877
2878static void skb_warn_bad_offload(const struct sk_buff *skb)
2879{
2880 static const netdev_features_t null_features;
2881 struct net_device *dev = skb->dev;
2882 const char *name = "";
2883
2884 if (!net_ratelimit())
2885 return;
2886
2887 if (dev) {
2888 if (dev->dev.parent)
2889 name = dev_driver_string(dev->dev.parent);
2890 else
2891 name = netdev_name(dev);
2892 }
2893 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2894 "gso_type=%d ip_summed=%d\n",
2895 name, dev ? &dev->features : &null_features,
2896 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2897 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2898 skb_shinfo(skb)->gso_type, skb->ip_summed);
2899}
2900
2901
2902
2903
2904
2905int skb_checksum_help(struct sk_buff *skb)
2906{
2907 __wsum csum;
2908 int ret = 0, offset;
2909
2910 if (skb->ip_summed == CHECKSUM_COMPLETE)
2911 goto out_set_summed;
2912
2913 if (unlikely(skb_shinfo(skb)->gso_size)) {
2914 skb_warn_bad_offload(skb);
2915 return -EINVAL;
2916 }
2917
2918
2919
2920
2921 if (skb_has_shared_frag(skb)) {
2922 ret = __skb_linearize(skb);
2923 if (ret)
2924 goto out;
2925 }
2926
2927 offset = skb_checksum_start_offset(skb);
2928 BUG_ON(offset >= skb_headlen(skb));
2929 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2930
2931 offset += skb->csum_offset;
2932 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2933
2934 if (skb_cloned(skb) &&
2935 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2936 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2937 if (ret)
2938 goto out;
2939 }
2940
2941 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2942out_set_summed:
2943 skb->ip_summed = CHECKSUM_NONE;
2944out:
2945 return ret;
2946}
2947EXPORT_SYMBOL(skb_checksum_help);
2948
2949int skb_crc32c_csum_help(struct sk_buff *skb)
2950{
2951 __le32 crc32c_csum;
2952 int ret = 0, offset, start;
2953
2954 if (skb->ip_summed != CHECKSUM_PARTIAL)
2955 goto out;
2956
2957 if (unlikely(skb_is_gso(skb)))
2958 goto out;
2959
2960
2961
2962
2963 if (unlikely(skb_has_shared_frag(skb))) {
2964 ret = __skb_linearize(skb);
2965 if (ret)
2966 goto out;
2967 }
2968 start = skb_checksum_start_offset(skb);
2969 offset = start + offsetof(struct sctphdr, checksum);
2970 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2971 ret = -EINVAL;
2972 goto out;
2973 }
2974 if (skb_cloned(skb) &&
2975 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2976 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2977 if (ret)
2978 goto out;
2979 }
2980 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2981 skb->len - start, ~(__u32)0,
2982 crc32c_csum_stub));
2983 *(__le32 *)(skb->data + offset) = crc32c_csum;
2984 skb->ip_summed = CHECKSUM_NONE;
2985 skb->csum_not_inet = 0;
2986out:
2987 return ret;
2988}
2989
2990__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2991{
2992 __be16 type = skb->protocol;
2993
2994
2995 if (type == htons(ETH_P_TEB)) {
2996 struct ethhdr *eth;
2997
2998 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2999 return 0;
3000
3001 eth = (struct ethhdr *)skb->data;
3002 type = eth->h_proto;
3003 }
3004
3005 return __vlan_get_protocol(skb, type, depth);
3006}
3007
3008
3009
3010
3011
3012
3013struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3014 netdev_features_t features)
3015{
3016 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3017 struct packet_offload *ptype;
3018 int vlan_depth = skb->mac_len;
3019 __be16 type = skb_network_protocol(skb, &vlan_depth);
3020
3021 if (unlikely(!type))
3022 return ERR_PTR(-EINVAL);
3023
3024 __skb_pull(skb, vlan_depth);
3025
3026 rcu_read_lock();
3027 list_for_each_entry_rcu(ptype, &offload_base, list) {
3028 if (ptype->type == type && ptype->callbacks.gso_segment) {
3029 segs = ptype->callbacks.gso_segment(skb, features);
3030 break;
3031 }
3032 }
3033 rcu_read_unlock();
3034
3035 __skb_push(skb, skb->data - skb_mac_header(skb));
3036
3037 return segs;
3038}
3039EXPORT_SYMBOL(skb_mac_gso_segment);
3040
3041
3042
3043
3044static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3045{
3046 if (tx_path)
3047 return skb->ip_summed != CHECKSUM_PARTIAL &&
3048 skb->ip_summed != CHECKSUM_UNNECESSARY;
3049
3050 return skb->ip_summed == CHECKSUM_NONE;
3051}
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3067 netdev_features_t features, bool tx_path)
3068{
3069 struct sk_buff *segs;
3070
3071 if (unlikely(skb_needs_check(skb, tx_path))) {
3072 int err;
3073
3074
3075 err = skb_cow_head(skb, 0);
3076 if (err < 0)
3077 return ERR_PTR(err);
3078 }
3079
3080
3081
3082
3083
3084 if (features & NETIF_F_GSO_PARTIAL) {
3085 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3086 struct net_device *dev = skb->dev;
3087
3088 partial_features |= dev->features & dev->gso_partial_features;
3089 if (!skb_gso_ok(skb, features | partial_features))
3090 features &= ~NETIF_F_GSO_PARTIAL;
3091 }
3092
3093 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
3094 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3095
3096 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3097 SKB_GSO_CB(skb)->encap_level = 0;
3098
3099 skb_reset_mac_header(skb);
3100 skb_reset_mac_len(skb);
3101
3102 segs = skb_mac_gso_segment(skb, features);
3103
3104 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3105 skb_warn_bad_offload(skb);
3106
3107 return segs;
3108}
3109EXPORT_SYMBOL(__skb_gso_segment);
3110
3111
3112#ifdef CONFIG_BUG
3113void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3114{
3115 if (net_ratelimit()) {
3116 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3117 if (dev)
3118 pr_err("dev features: %pNF\n", &dev->features);
3119 pr_err("skb len=%u data_len=%u pkt_type=%u gso_size=%u gso_type=%u nr_frags=%u ip_summed=%u csum=%x csum_complete_sw=%d csum_valid=%d csum_level=%u\n",
3120 skb->len, skb->data_len, skb->pkt_type,
3121 skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type,
3122 skb_shinfo(skb)->nr_frags, skb->ip_summed, skb->csum,
3123 skb->csum_complete_sw, skb->csum_valid, skb->csum_level);
3124 dump_stack();
3125 }
3126}
3127EXPORT_SYMBOL(netdev_rx_csum_fault);
3128#endif
3129
3130
3131static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3132{
3133#ifdef CONFIG_HIGHMEM
3134 int i;
3135
3136 if (!(dev->features & NETIF_F_HIGHDMA)) {
3137 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3138 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3139
3140 if (PageHighMem(skb_frag_page(frag)))
3141 return 1;
3142 }
3143 }
3144#endif
3145 return 0;
3146}
3147
3148
3149
3150
3151#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3152static netdev_features_t net_mpls_features(struct sk_buff *skb,
3153 netdev_features_t features,
3154 __be16 type)
3155{
3156 if (eth_p_mpls(type))
3157 features &= skb->dev->mpls_features;
3158
3159 return features;
3160}
3161#else
3162static netdev_features_t net_mpls_features(struct sk_buff *skb,
3163 netdev_features_t features,
3164 __be16 type)
3165{
3166 return features;
3167}
3168#endif
3169
3170static netdev_features_t harmonize_features(struct sk_buff *skb,
3171 netdev_features_t features)
3172{
3173 int tmp;
3174 __be16 type;
3175
3176 type = skb_network_protocol(skb, &tmp);
3177 features = net_mpls_features(skb, features, type);
3178
3179 if (skb->ip_summed != CHECKSUM_NONE &&
3180 !can_checksum_protocol(features, type)) {
3181 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3182 }
3183 if (illegal_highdma(skb->dev, skb))
3184 features &= ~NETIF_F_SG;
3185
3186 return features;
3187}
3188
3189netdev_features_t passthru_features_check(struct sk_buff *skb,
3190 struct net_device *dev,
3191 netdev_features_t features)
3192{
3193 return features;
3194}
3195EXPORT_SYMBOL(passthru_features_check);
3196
3197static netdev_features_t dflt_features_check(struct sk_buff *skb,
3198 struct net_device *dev,
3199 netdev_features_t features)
3200{
3201 return vlan_features_check(skb, features);
3202}
3203
3204static netdev_features_t gso_features_check(const struct sk_buff *skb,
3205 struct net_device *dev,
3206 netdev_features_t features)
3207{
3208 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3209
3210 if (gso_segs > dev->gso_max_segs)
3211 return features & ~NETIF_F_GSO_MASK;
3212
3213
3214
3215
3216
3217
3218
3219 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3220 features &= ~dev->gso_partial_features;
3221
3222
3223
3224
3225 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3226 struct iphdr *iph = skb->encapsulation ?
3227 inner_ip_hdr(skb) : ip_hdr(skb);
3228
3229 if (!(iph->frag_off & htons(IP_DF)))
3230 features &= ~NETIF_F_TSO_MANGLEID;
3231 }
3232
3233 return features;
3234}
3235
3236netdev_features_t netif_skb_features(struct sk_buff *skb)
3237{
3238 struct net_device *dev = skb->dev;
3239 netdev_features_t features = dev->features;
3240
3241 if (skb_is_gso(skb))
3242 features = gso_features_check(skb, dev, features);
3243
3244
3245
3246
3247
3248 if (skb->encapsulation)
3249 features &= dev->hw_enc_features;
3250
3251 if (skb_vlan_tagged(skb))
3252 features = netdev_intersect_features(features,
3253 dev->vlan_features |
3254 NETIF_F_HW_VLAN_CTAG_TX |
3255 NETIF_F_HW_VLAN_STAG_TX);
3256
3257 if (dev->netdev_ops->ndo_features_check)
3258 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3259 features);
3260 else
3261 features &= dflt_features_check(skb, dev, features);
3262
3263 return harmonize_features(skb, features);
3264}
3265EXPORT_SYMBOL(netif_skb_features);
3266
3267static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3268 struct netdev_queue *txq, bool more)
3269{
3270 unsigned int len;
3271 int rc;
3272
3273 if (dev_nit_active(dev))
3274 dev_queue_xmit_nit(skb, dev);
3275
3276 len = skb->len;
3277 trace_net_dev_start_xmit(skb, dev);
3278 rc = netdev_start_xmit(skb, dev, txq, more);
3279 trace_net_dev_xmit(skb, rc, dev, len);
3280
3281 return rc;
3282}
3283
3284struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3285 struct netdev_queue *txq, int *ret)
3286{
3287 struct sk_buff *skb = first;
3288 int rc = NETDEV_TX_OK;
3289
3290 while (skb) {
3291 struct sk_buff *next = skb->next;
3292
3293 skb_mark_not_on_list(skb);
3294 rc = xmit_one(skb, dev, txq, next != NULL);
3295 if (unlikely(!dev_xmit_complete(rc))) {
3296 skb->next = next;
3297 goto out;
3298 }
3299
3300 skb = next;
3301 if (netif_tx_queue_stopped(txq) && skb) {
3302 rc = NETDEV_TX_BUSY;
3303 break;
3304 }
3305 }
3306
3307out:
3308 *ret = rc;
3309 return skb;
3310}
3311
3312static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3313 netdev_features_t features)
3314{
3315 if (skb_vlan_tag_present(skb) &&
3316 !vlan_hw_offload_capable(features, skb->vlan_proto))
3317 skb = __vlan_hwaccel_push_inside(skb);
3318 return skb;
3319}
3320
3321int skb_csum_hwoffload_help(struct sk_buff *skb,
3322 const netdev_features_t features)
3323{
3324 if (unlikely(skb->csum_not_inet))
3325 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3326 skb_crc32c_csum_help(skb);
3327
3328 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3329}
3330EXPORT_SYMBOL(skb_csum_hwoffload_help);
3331
3332static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3333{
3334 netdev_features_t features;
3335
3336 features = netif_skb_features(skb);
3337 skb = validate_xmit_vlan(skb, features);
3338 if (unlikely(!skb))
3339 goto out_null;
3340
3341 skb = sk_validate_xmit_skb(skb, dev);
3342 if (unlikely(!skb))
3343 goto out_null;
3344
3345 if (netif_needs_gso(skb, features)) {
3346 struct sk_buff *segs;
3347
3348 segs = skb_gso_segment(skb, features);
3349 if (IS_ERR(segs)) {
3350 goto out_kfree_skb;
3351 } else if (segs) {
3352 consume_skb(skb);
3353 skb = segs;
3354 }
3355 } else {
3356 if (skb_needs_linearize(skb, features) &&
3357 __skb_linearize(skb))
3358 goto out_kfree_skb;
3359
3360
3361
3362
3363
3364 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3365 if (skb->encapsulation)
3366 skb_set_inner_transport_header(skb,
3367 skb_checksum_start_offset(skb));
3368 else
3369 skb_set_transport_header(skb,
3370 skb_checksum_start_offset(skb));
3371 if (skb_csum_hwoffload_help(skb, features))
3372 goto out_kfree_skb;
3373 }
3374 }
3375
3376 skb = validate_xmit_xfrm(skb, features, again);
3377
3378 return skb;
3379
3380out_kfree_skb:
3381 kfree_skb(skb);
3382out_null:
3383 atomic_long_inc(&dev->tx_dropped);
3384 return NULL;
3385}
3386
3387struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3388{
3389 struct sk_buff *next, *head = NULL, *tail;
3390
3391 for (; skb != NULL; skb = next) {
3392 next = skb->next;
3393 skb_mark_not_on_list(skb);
3394
3395
3396 skb->prev = skb;
3397
3398 skb = validate_xmit_skb(skb, dev, again);
3399 if (!skb)
3400 continue;
3401
3402 if (!head)
3403 head = skb;
3404 else
3405 tail->next = skb;
3406
3407
3408
3409 tail = skb->prev;
3410 }
3411 return head;
3412}
3413EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3414
3415static void qdisc_pkt_len_init(struct sk_buff *skb)
3416{
3417 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3418
3419 qdisc_skb_cb(skb)->pkt_len = skb->len;
3420
3421
3422
3423
3424 if (shinfo->gso_size) {
3425 unsigned int hdr_len;
3426 u16 gso_segs = shinfo->gso_segs;
3427
3428
3429 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3430
3431
3432 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3433 const struct tcphdr *th;
3434 struct tcphdr _tcphdr;
3435
3436 th = skb_header_pointer(skb, skb_transport_offset(skb),
3437 sizeof(_tcphdr), &_tcphdr);
3438 if (likely(th))
3439 hdr_len += __tcp_hdrlen(th);
3440 } else {
3441 struct udphdr _udphdr;
3442
3443 if (skb_header_pointer(skb, skb_transport_offset(skb),
3444 sizeof(_udphdr), &_udphdr))
3445 hdr_len += sizeof(struct udphdr);
3446 }
3447
3448 if (shinfo->gso_type & SKB_GSO_DODGY)
3449 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3450 shinfo->gso_size);
3451
3452 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3453 }
3454}
3455
3456static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3457 struct net_device *dev,
3458 struct netdev_queue *txq)
3459{
3460 spinlock_t *root_lock = qdisc_lock(q);
3461 struct sk_buff *to_free = NULL;
3462 bool contended;
3463 int rc;
3464
3465 qdisc_calculate_pkt_len(skb, q);
3466
3467 if (q->flags & TCQ_F_NOLOCK) {
3468 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3469 __qdisc_drop(skb, &to_free);
3470 rc = NET_XMIT_DROP;
3471 } else {
3472 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3473 qdisc_run(q);
3474 }
3475
3476 if (unlikely(to_free))
3477 kfree_skb_list(to_free);
3478 return rc;
3479 }
3480
3481
3482
3483
3484
3485
3486
3487 contended = qdisc_is_running(q);
3488 if (unlikely(contended))
3489 spin_lock(&q->busylock);
3490
3491 spin_lock(root_lock);
3492 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3493 __qdisc_drop(skb, &to_free);
3494 rc = NET_XMIT_DROP;
3495 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3496 qdisc_run_begin(q)) {
3497
3498
3499
3500
3501
3502
3503 qdisc_bstats_update(q, skb);
3504
3505 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3506 if (unlikely(contended)) {
3507 spin_unlock(&q->busylock);
3508 contended = false;
3509 }
3510 __qdisc_run(q);
3511 }
3512
3513 qdisc_run_end(q);
3514 rc = NET_XMIT_SUCCESS;
3515 } else {
3516 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3517 if (qdisc_run_begin(q)) {
3518 if (unlikely(contended)) {
3519 spin_unlock(&q->busylock);
3520 contended = false;
3521 }
3522 __qdisc_run(q);
3523 qdisc_run_end(q);
3524 }
3525 }
3526 spin_unlock(root_lock);
3527 if (unlikely(to_free))
3528 kfree_skb_list(to_free);
3529 if (unlikely(contended))
3530 spin_unlock(&q->busylock);
3531 return rc;
3532}
3533
3534#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3535static void skb_update_prio(struct sk_buff *skb)
3536{
3537 const struct netprio_map *map;
3538 const struct sock *sk;
3539 unsigned int prioidx;
3540
3541 if (skb->priority)
3542 return;
3543 map = rcu_dereference_bh(skb->dev->priomap);
3544 if (!map)
3545 return;
3546 sk = skb_to_full_sk(skb);
3547 if (!sk)
3548 return;
3549
3550 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3551
3552 if (prioidx < map->priomap_len)
3553 skb->priority = map->priomap[prioidx];
3554}
3555#else
3556#define skb_update_prio(skb)
3557#endif
3558
3559DEFINE_PER_CPU(int, xmit_recursion);
3560EXPORT_SYMBOL(xmit_recursion);
3561
3562
3563
3564
3565
3566
3567
3568int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3569{
3570 skb_reset_mac_header(skb);
3571 __skb_pull(skb, skb_network_offset(skb));
3572 skb->pkt_type = PACKET_LOOPBACK;
3573 skb->ip_summed = CHECKSUM_UNNECESSARY;
3574 WARN_ON(!skb_dst(skb));
3575 skb_dst_force(skb);
3576 netif_rx_ni(skb);
3577 return 0;
3578}
3579EXPORT_SYMBOL(dev_loopback_xmit);
3580
3581#ifdef CONFIG_NET_EGRESS
3582static struct sk_buff *
3583sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3584{
3585 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3586 struct tcf_result cl_res;
3587
3588 if (!miniq)
3589 return skb;
3590
3591
3592 mini_qdisc_bstats_cpu_update(miniq, skb);
3593
3594 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
3595 case TC_ACT_OK:
3596 case TC_ACT_RECLASSIFY:
3597 skb->tc_index = TC_H_MIN(cl_res.classid);
3598 break;
3599 case TC_ACT_SHOT:
3600 mini_qdisc_qstats_cpu_drop(miniq);
3601 *ret = NET_XMIT_DROP;
3602 kfree_skb(skb);
3603 return NULL;
3604 case TC_ACT_STOLEN:
3605 case TC_ACT_QUEUED:
3606 case TC_ACT_TRAP:
3607 *ret = NET_XMIT_SUCCESS;
3608 consume_skb(skb);
3609 return NULL;
3610 case TC_ACT_REDIRECT:
3611
3612 skb_do_redirect(skb);
3613 *ret = NET_XMIT_SUCCESS;
3614 return NULL;
3615 default:
3616 break;
3617 }
3618
3619 return skb;
3620}
3621#endif
3622
3623#ifdef CONFIG_XPS
3624static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3625 struct xps_dev_maps *dev_maps, unsigned int tci)
3626{
3627 struct xps_map *map;
3628 int queue_index = -1;
3629
3630 if (dev->num_tc) {
3631 tci *= dev->num_tc;
3632 tci += netdev_get_prio_tc_map(dev, skb->priority);
3633 }
3634
3635 map = rcu_dereference(dev_maps->attr_map[tci]);
3636 if (map) {
3637 if (map->len == 1)
3638 queue_index = map->queues[0];
3639 else
3640 queue_index = map->queues[reciprocal_scale(
3641 skb_get_hash(skb), map->len)];
3642 if (unlikely(queue_index >= dev->real_num_tx_queues))
3643 queue_index = -1;
3644 }
3645 return queue_index;
3646}
3647#endif
3648
3649static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3650 struct sk_buff *skb)
3651{
3652#ifdef CONFIG_XPS
3653 struct xps_dev_maps *dev_maps;
3654 struct sock *sk = skb->sk;
3655 int queue_index = -1;
3656
3657 if (!static_key_false(&xps_needed))
3658 return -1;
3659
3660 rcu_read_lock();
3661 if (!static_key_false(&xps_rxqs_needed))
3662 goto get_cpus_map;
3663
3664 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
3665 if (dev_maps) {
3666 int tci = sk_rx_queue_get(sk);
3667
3668 if (tci >= 0 && tci < dev->num_rx_queues)
3669 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3670 tci);
3671 }
3672
3673get_cpus_map:
3674 if (queue_index < 0) {
3675 dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
3676 if (dev_maps) {
3677 unsigned int tci = skb->sender_cpu - 1;
3678
3679 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3680 tci);
3681 }
3682 }
3683 rcu_read_unlock();
3684
3685 return queue_index;
3686#else
3687 return -1;
3688#endif
3689}
3690
3691u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3692 struct net_device *sb_dev,
3693 select_queue_fallback_t fallback)
3694{
3695 return 0;
3696}
3697EXPORT_SYMBOL(dev_pick_tx_zero);
3698
3699u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3700 struct net_device *sb_dev,
3701 select_queue_fallback_t fallback)
3702{
3703 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3704}
3705EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3706
3707static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3708 struct net_device *sb_dev)
3709{
3710 struct sock *sk = skb->sk;
3711 int queue_index = sk_tx_queue_get(sk);
3712
3713 sb_dev = sb_dev ? : dev;
3714
3715 if (queue_index < 0 || skb->ooo_okay ||
3716 queue_index >= dev->real_num_tx_queues) {
3717 int new_index = get_xps_queue(dev, sb_dev, skb);
3718
3719 if (new_index < 0)
3720 new_index = skb_tx_hash(dev, sb_dev, skb);
3721
3722 if (queue_index != new_index && sk &&
3723 sk_fullsock(sk) &&
3724 rcu_access_pointer(sk->sk_dst_cache))
3725 sk_tx_queue_set(sk, new_index);
3726
3727 queue_index = new_index;
3728 }
3729
3730 return queue_index;
3731}
3732
3733struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3734 struct sk_buff *skb,
3735 struct net_device *sb_dev)
3736{
3737 int queue_index = 0;
3738
3739#ifdef CONFIG_XPS
3740 u32 sender_cpu = skb->sender_cpu - 1;
3741
3742 if (sender_cpu >= (u32)NR_CPUS)
3743 skb->sender_cpu = raw_smp_processor_id() + 1;
3744#endif
3745
3746 if (dev->real_num_tx_queues != 1) {
3747 const struct net_device_ops *ops = dev->netdev_ops;
3748
3749 if (ops->ndo_select_queue)
3750 queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
3751 __netdev_pick_tx);
3752 else
3753 queue_index = __netdev_pick_tx(dev, skb, sb_dev);
3754
3755 queue_index = netdev_cap_txqueue(dev, queue_index);
3756 }
3757
3758 skb_set_queue_mapping(skb, queue_index);
3759 return netdev_get_tx_queue(dev, queue_index);
3760}
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
3789{
3790 struct net_device *dev = skb->dev;
3791 struct netdev_queue *txq;
3792 struct Qdisc *q;
3793 int rc = -ENOMEM;
3794 bool again = false;
3795
3796 skb_reset_mac_header(skb);
3797
3798 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3799 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3800
3801
3802
3803
3804 rcu_read_lock_bh();
3805
3806 skb_update_prio(skb);
3807
3808 qdisc_pkt_len_init(skb);
3809#ifdef CONFIG_NET_CLS_ACT
3810 skb->tc_at_ingress = 0;
3811# ifdef CONFIG_NET_EGRESS
3812 if (static_branch_unlikely(&egress_needed_key)) {
3813 skb = sch_handle_egress(skb, &rc, dev);
3814 if (!skb)
3815 goto out;
3816 }
3817# endif
3818#endif
3819
3820
3821
3822 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3823 skb_dst_drop(skb);
3824 else
3825 skb_dst_force(skb);
3826
3827 txq = netdev_pick_tx(dev, skb, sb_dev);
3828 q = rcu_dereference_bh(txq->qdisc);
3829
3830 trace_net_dev_queue(skb);
3831 if (q->enqueue) {
3832 rc = __dev_xmit_skb(skb, q, dev, txq);
3833 goto out;
3834 }
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848 if (dev->flags & IFF_UP) {
3849 int cpu = smp_processor_id();
3850
3851 if (txq->xmit_lock_owner != cpu) {
3852 if (unlikely(__this_cpu_read(xmit_recursion) >
3853 XMIT_RECURSION_LIMIT))
3854 goto recursion_alert;
3855
3856 skb = validate_xmit_skb(skb, dev, &again);
3857 if (!skb)
3858 goto out;
3859
3860 HARD_TX_LOCK(dev, txq, cpu);
3861
3862 if (!netif_xmit_stopped(txq)) {
3863 __this_cpu_inc(xmit_recursion);
3864 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3865 __this_cpu_dec(xmit_recursion);
3866 if (dev_xmit_complete(rc)) {
3867 HARD_TX_UNLOCK(dev, txq);
3868 goto out;
3869 }
3870 }
3871 HARD_TX_UNLOCK(dev, txq);
3872 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3873 dev->name);
3874 } else {
3875
3876
3877
3878recursion_alert:
3879 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3880 dev->name);
3881 }
3882 }
3883
3884 rc = -ENETDOWN;
3885 rcu_read_unlock_bh();
3886
3887 atomic_long_inc(&dev->tx_dropped);
3888 kfree_skb_list(skb);
3889 return rc;
3890out:
3891 rcu_read_unlock_bh();
3892 return rc;
3893}
3894
3895int dev_queue_xmit(struct sk_buff *skb)
3896{
3897 return __dev_queue_xmit(skb, NULL);
3898}
3899EXPORT_SYMBOL(dev_queue_xmit);
3900
3901int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
3902{
3903 return __dev_queue_xmit(skb, sb_dev);
3904}
3905EXPORT_SYMBOL(dev_queue_xmit_accel);
3906
3907int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3908{
3909 struct net_device *dev = skb->dev;
3910 struct sk_buff *orig_skb = skb;
3911 struct netdev_queue *txq;
3912 int ret = NETDEV_TX_BUSY;
3913 bool again = false;
3914
3915 if (unlikely(!netif_running(dev) ||
3916 !netif_carrier_ok(dev)))
3917 goto drop;
3918
3919 skb = validate_xmit_skb_list(skb, dev, &again);
3920 if (skb != orig_skb)
3921 goto drop;
3922
3923 skb_set_queue_mapping(skb, queue_id);
3924 txq = skb_get_tx_queue(dev, skb);
3925
3926 local_bh_disable();
3927
3928 HARD_TX_LOCK(dev, txq, smp_processor_id());
3929 if (!netif_xmit_frozen_or_drv_stopped(txq))
3930 ret = netdev_start_xmit(skb, dev, txq, false);
3931 HARD_TX_UNLOCK(dev, txq);
3932
3933 local_bh_enable();
3934
3935 if (!dev_xmit_complete(ret))
3936 kfree_skb(skb);
3937
3938 return ret;
3939drop:
3940 atomic_long_inc(&dev->tx_dropped);
3941 kfree_skb_list(skb);
3942 return NET_XMIT_DROP;
3943}
3944EXPORT_SYMBOL(dev_direct_xmit);
3945
3946
3947
3948
3949
3950int netdev_max_backlog __read_mostly = 1000;
3951EXPORT_SYMBOL(netdev_max_backlog);
3952
3953int netdev_tstamp_prequeue __read_mostly = 1;
3954int netdev_budget __read_mostly = 300;
3955unsigned int __read_mostly netdev_budget_usecs = 2000;
3956int weight_p __read_mostly = 64;
3957int dev_weight_rx_bias __read_mostly = 1;
3958int dev_weight_tx_bias __read_mostly = 1;
3959int dev_rx_weight __read_mostly = 64;
3960int dev_tx_weight __read_mostly = 64;
3961
3962
3963static inline void ____napi_schedule(struct softnet_data *sd,
3964 struct napi_struct *napi)
3965{
3966 list_add_tail(&napi->poll_list, &sd->poll_list);
3967 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3968}
3969
3970#ifdef CONFIG_RPS
3971
3972
3973struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3974EXPORT_SYMBOL(rps_sock_flow_table);
3975u32 rps_cpu_mask __read_mostly;
3976EXPORT_SYMBOL(rps_cpu_mask);
3977
3978struct static_key rps_needed __read_mostly;
3979EXPORT_SYMBOL(rps_needed);
3980struct static_key rfs_needed __read_mostly;
3981EXPORT_SYMBOL(rfs_needed);
3982
3983static struct rps_dev_flow *
3984set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3985 struct rps_dev_flow *rflow, u16 next_cpu)
3986{
3987 if (next_cpu < nr_cpu_ids) {
3988#ifdef CONFIG_RFS_ACCEL
3989 struct netdev_rx_queue *rxqueue;
3990 struct rps_dev_flow_table *flow_table;
3991 struct rps_dev_flow *old_rflow;
3992 u32 flow_id;
3993 u16 rxq_index;
3994 int rc;
3995
3996
3997 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3998 !(dev->features & NETIF_F_NTUPLE))
3999 goto out;
4000 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4001 if (rxq_index == skb_get_rx_queue(skb))
4002 goto out;
4003
4004 rxqueue = dev->_rx + rxq_index;
4005 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4006 if (!flow_table)
4007 goto out;
4008 flow_id = skb_get_hash(skb) & flow_table->mask;
4009 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4010 rxq_index, flow_id);
4011 if (rc < 0)
4012 goto out;
4013 old_rflow = rflow;
4014 rflow = &flow_table->flows[flow_id];
4015 rflow->filter = rc;
4016 if (old_rflow->filter == rflow->filter)
4017 old_rflow->filter = RPS_NO_FILTER;
4018 out:
4019#endif
4020 rflow->last_qtail =
4021 per_cpu(softnet_data, next_cpu).input_queue_head;
4022 }
4023
4024 rflow->cpu = next_cpu;
4025 return rflow;
4026}
4027
4028
4029
4030
4031
4032
4033static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4034 struct rps_dev_flow **rflowp)
4035{
4036 const struct rps_sock_flow_table *sock_flow_table;
4037 struct netdev_rx_queue *rxqueue = dev->_rx;
4038 struct rps_dev_flow_table *flow_table;
4039 struct rps_map *map;
4040 int cpu = -1;
4041 u32 tcpu;
4042 u32 hash;
4043
4044 if (skb_rx_queue_recorded(skb)) {
4045 u16 index = skb_get_rx_queue(skb);
4046
4047 if (unlikely(index >= dev->real_num_rx_queues)) {
4048 WARN_ONCE(dev->real_num_rx_queues > 1,
4049 "%s received packet on queue %u, but number "
4050 "of RX queues is %u\n",
4051 dev->name, index, dev->real_num_rx_queues);
4052 goto done;
4053 }
4054 rxqueue += index;
4055 }
4056
4057
4058
4059 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4060 map = rcu_dereference(rxqueue->rps_map);
4061 if (!flow_table && !map)
4062 goto done;
4063
4064 skb_reset_network_header(skb);
4065 hash = skb_get_hash(skb);
4066 if (!hash)
4067 goto done;
4068
4069 sock_flow_table = rcu_dereference(rps_sock_flow_table);
4070 if (flow_table && sock_flow_table) {
4071 struct rps_dev_flow *rflow;
4072 u32 next_cpu;
4073 u32 ident;
4074
4075
4076 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4077 if ((ident ^ hash) & ~rps_cpu_mask)
4078 goto try_rps;
4079
4080 next_cpu = ident & rps_cpu_mask;
4081
4082
4083
4084
4085 rflow = &flow_table->flows[hash & flow_table->mask];
4086 tcpu = rflow->cpu;
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099 if (unlikely(tcpu != next_cpu) &&
4100 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4101 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4102 rflow->last_qtail)) >= 0)) {
4103 tcpu = next_cpu;
4104 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4105 }
4106
4107 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4108 *rflowp = rflow;
4109 cpu = tcpu;
4110 goto done;
4111 }
4112 }
4113
4114try_rps:
4115
4116 if (map) {
4117 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4118 if (cpu_online(tcpu)) {
4119 cpu = tcpu;
4120 goto done;
4121 }
4122 }
4123
4124done:
4125 return cpu;
4126}
4127
4128#ifdef CONFIG_RFS_ACCEL
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4142 u32 flow_id, u16 filter_id)
4143{
4144 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4145 struct rps_dev_flow_table *flow_table;
4146 struct rps_dev_flow *rflow;
4147 bool expire = true;
4148 unsigned int cpu;
4149
4150 rcu_read_lock();
4151 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4152 if (flow_table && flow_id <= flow_table->mask) {
4153 rflow = &flow_table->flows[flow_id];
4154 cpu = READ_ONCE(rflow->cpu);
4155 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4156 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4157 rflow->last_qtail) <
4158 (int)(10 * flow_table->mask)))
4159 expire = false;
4160 }
4161 rcu_read_unlock();
4162 return expire;
4163}
4164EXPORT_SYMBOL(rps_may_expire_flow);
4165
4166#endif
4167
4168
4169static void rps_trigger_softirq(void *data)
4170{
4171 struct softnet_data *sd = data;
4172
4173 ____napi_schedule(sd, &sd->backlog);
4174 sd->received_rps++;
4175}
4176
4177#endif
4178
4179
4180
4181
4182
4183
4184static int rps_ipi_queued(struct softnet_data *sd)
4185{
4186#ifdef CONFIG_RPS
4187 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4188
4189 if (sd != mysd) {
4190 sd->rps_ipi_next = mysd->rps_ipi_list;
4191 mysd->rps_ipi_list = sd;
4192
4193 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4194 return 1;
4195 }
4196#endif
4197 return 0;
4198}
4199
4200#ifdef CONFIG_NET_FLOW_LIMIT
4201int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4202#endif
4203
4204static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4205{
4206#ifdef CONFIG_NET_FLOW_LIMIT
4207 struct sd_flow_limit *fl;
4208 struct softnet_data *sd;
4209 unsigned int old_flow, new_flow;
4210
4211 if (qlen < (netdev_max_backlog >> 1))
4212 return false;
4213
4214 sd = this_cpu_ptr(&softnet_data);
4215
4216 rcu_read_lock();
4217 fl = rcu_dereference(sd->flow_limit);
4218 if (fl) {
4219 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4220 old_flow = fl->history[fl->history_head];
4221 fl->history[fl->history_head] = new_flow;
4222
4223 fl->history_head++;
4224 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4225
4226 if (likely(fl->buckets[old_flow]))
4227 fl->buckets[old_flow]--;
4228
4229 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4230 fl->count++;
4231 rcu_read_unlock();
4232 return true;
4233 }
4234 }
4235 rcu_read_unlock();
4236#endif
4237 return false;
4238}
4239
4240
4241
4242
4243
4244static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4245 unsigned int *qtail)
4246{
4247 struct softnet_data *sd;
4248 unsigned long flags;
4249 unsigned int qlen;
4250
4251 sd = &per_cpu(softnet_data, cpu);
4252
4253 local_irq_save(flags);
4254
4255 rps_lock(sd);
4256 if (!netif_running(skb->dev))
4257 goto drop;
4258 qlen = skb_queue_len(&sd->input_pkt_queue);
4259 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4260 if (qlen) {
4261enqueue:
4262 __skb_queue_tail(&sd->input_pkt_queue, skb);
4263 input_queue_tail_incr_save(sd, qtail);
4264 rps_unlock(sd);
4265 local_irq_restore(flags);
4266 return NET_RX_SUCCESS;
4267 }
4268
4269
4270
4271
4272 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4273 if (!rps_ipi_queued(sd))
4274 ____napi_schedule(sd, &sd->backlog);
4275 }
4276 goto enqueue;
4277 }
4278
4279drop:
4280 sd->dropped++;
4281 rps_unlock(sd);
4282
4283 local_irq_restore(flags);
4284
4285 atomic_long_inc(&skb->dev->rx_dropped);
4286 kfree_skb(skb);
4287 return NET_RX_DROP;
4288}
4289
4290static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4291{
4292 struct net_device *dev = skb->dev;
4293 struct netdev_rx_queue *rxqueue;
4294
4295 rxqueue = dev->_rx;
4296
4297 if (skb_rx_queue_recorded(skb)) {
4298 u16 index = skb_get_rx_queue(skb);
4299
4300 if (unlikely(index >= dev->real_num_rx_queues)) {
4301 WARN_ONCE(dev->real_num_rx_queues > 1,
4302 "%s received packet on queue %u, but number "
4303 "of RX queues is %u\n",
4304 dev->name, index, dev->real_num_rx_queues);
4305
4306 return rxqueue;
4307 }
4308 rxqueue += index;
4309 }
4310 return rxqueue;
4311}
4312
4313static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4314 struct xdp_buff *xdp,
4315 struct bpf_prog *xdp_prog)
4316{
4317 struct netdev_rx_queue *rxqueue;
4318 void *orig_data, *orig_data_end;
4319 u32 metalen, act = XDP_DROP;
4320 __be16 orig_eth_type;
4321 struct ethhdr *eth;
4322 bool orig_bcast;
4323 int hlen, off;
4324 u32 mac_len;
4325
4326
4327
4328
4329 if (skb_cloned(skb) || skb_is_tc_redirected(skb))
4330 return XDP_PASS;
4331
4332
4333
4334
4335
4336 if (skb_is_nonlinear(skb) ||
4337 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4338 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4339 int troom = skb->tail + skb->data_len - skb->end;
4340
4341
4342
4343
4344 if (pskb_expand_head(skb,
4345 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4346 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4347 goto do_drop;
4348 if (skb_linearize(skb))
4349 goto do_drop;
4350 }
4351
4352
4353
4354
4355 mac_len = skb->data - skb_mac_header(skb);
4356 hlen = skb_headlen(skb) + mac_len;
4357 xdp->data = skb->data - mac_len;
4358 xdp->data_meta = xdp->data;
4359 xdp->data_end = xdp->data + hlen;
4360 xdp->data_hard_start = skb->data - skb_headroom(skb);
4361 orig_data_end = xdp->data_end;
4362 orig_data = xdp->data;
4363 eth = (struct ethhdr *)xdp->data;
4364 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4365 orig_eth_type = eth->h_proto;
4366
4367 rxqueue = netif_get_rxqueue(skb);
4368 xdp->rxq = &rxqueue->xdp_rxq;
4369
4370 act = bpf_prog_run_xdp(xdp_prog, xdp);
4371
4372 off = xdp->data - orig_data;
4373 if (off > 0)
4374 __skb_pull(skb, off);
4375 else if (off < 0)
4376 __skb_push(skb, -off);
4377 skb->mac_header += off;
4378
4379
4380
4381
4382 off = orig_data_end - xdp->data_end;
4383 if (off != 0) {
4384 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4385 skb->len -= off;
4386
4387 }
4388
4389
4390 eth = (struct ethhdr *)xdp->data;
4391 if ((orig_eth_type != eth->h_proto) ||
4392 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4393 __skb_push(skb, ETH_HLEN);
4394 skb->protocol = eth_type_trans(skb, skb->dev);
4395 }
4396
4397 switch (act) {
4398 case XDP_REDIRECT:
4399 case XDP_TX:
4400 __skb_push(skb, mac_len);
4401 break;
4402 case XDP_PASS:
4403 metalen = xdp->data - xdp->data_meta;
4404 if (metalen)
4405 skb_metadata_set(skb, metalen);
4406 break;
4407 default:
4408 bpf_warn_invalid_xdp_action(act);
4409
4410 case XDP_ABORTED:
4411 trace_xdp_exception(skb->dev, xdp_prog, act);
4412
4413 case XDP_DROP:
4414 do_drop:
4415 kfree_skb(skb);
4416 break;
4417 }
4418
4419 return act;
4420}
4421
4422
4423
4424
4425void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4426{
4427 struct net_device *dev = skb->dev;
4428 struct netdev_queue *txq;
4429 bool free_skb = true;
4430 int cpu, rc;
4431
4432 txq = netdev_pick_tx(dev, skb, NULL);
4433 cpu = smp_processor_id();
4434 HARD_TX_LOCK(dev, txq, cpu);
4435 if (!netif_xmit_stopped(txq)) {
4436 rc = netdev_start_xmit(skb, dev, txq, 0);
4437 if (dev_xmit_complete(rc))
4438 free_skb = false;
4439 }
4440 HARD_TX_UNLOCK(dev, txq);
4441 if (free_skb) {
4442 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4443 kfree_skb(skb);
4444 }
4445}
4446EXPORT_SYMBOL_GPL(generic_xdp_tx);
4447
4448static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4449
4450int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4451{
4452 if (xdp_prog) {
4453 struct xdp_buff xdp;
4454 u32 act;
4455 int err;
4456
4457 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4458 if (act != XDP_PASS) {
4459 switch (act) {
4460 case XDP_REDIRECT:
4461 err = xdp_do_generic_redirect(skb->dev, skb,
4462 &xdp, xdp_prog);
4463 if (err)
4464 goto out_redir;
4465 break;
4466 case XDP_TX:
4467 generic_xdp_tx(skb, xdp_prog);
4468 break;
4469 }
4470 return XDP_DROP;
4471 }
4472 }
4473 return XDP_PASS;
4474out_redir:
4475 kfree_skb(skb);
4476 return XDP_DROP;
4477}
4478EXPORT_SYMBOL_GPL(do_xdp_generic);
4479
4480static int netif_rx_internal(struct sk_buff *skb)
4481{
4482 int ret;
4483
4484 net_timestamp_check(netdev_tstamp_prequeue, skb);
4485
4486 trace_netif_rx(skb);
4487
4488 if (static_branch_unlikely(&generic_xdp_needed_key)) {
4489 int ret;
4490
4491 preempt_disable();
4492 rcu_read_lock();
4493 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4494 rcu_read_unlock();
4495 preempt_enable();
4496
4497
4498
4499
4500
4501 if (ret != XDP_PASS)
4502 return NET_RX_SUCCESS;
4503 }
4504
4505#ifdef CONFIG_RPS
4506 if (static_key_false(&rps_needed)) {
4507 struct rps_dev_flow voidflow, *rflow = &voidflow;
4508 int cpu;
4509
4510 preempt_disable();
4511 rcu_read_lock();
4512
4513 cpu = get_rps_cpu(skb->dev, skb, &rflow);
4514 if (cpu < 0)
4515 cpu = smp_processor_id();
4516
4517 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4518
4519 rcu_read_unlock();
4520 preempt_enable();
4521 } else
4522#endif
4523 {
4524 unsigned int qtail;
4525
4526 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4527 put_cpu();
4528 }
4529 return ret;
4530}
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547int netif_rx(struct sk_buff *skb)
4548{
4549 int ret;
4550
4551 trace_netif_rx_entry(skb);
4552
4553 ret = netif_rx_internal(skb);
4554 trace_netif_rx_exit(ret);
4555
4556 return ret;
4557}
4558EXPORT_SYMBOL(netif_rx);
4559
4560int netif_rx_ni(struct sk_buff *skb)
4561{
4562 int err;
4563
4564 trace_netif_rx_ni_entry(skb);
4565
4566 preempt_disable();
4567 err = netif_rx_internal(skb);
4568 if (local_softirq_pending())
4569 do_softirq();
4570 preempt_enable();
4571 trace_netif_rx_ni_exit(err);
4572
4573 return err;
4574}
4575EXPORT_SYMBOL(netif_rx_ni);
4576
4577static __latent_entropy void net_tx_action(struct softirq_action *h)
4578{
4579 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4580
4581 if (sd->completion_queue) {
4582 struct sk_buff *clist;
4583
4584 local_irq_disable();
4585 clist = sd->completion_queue;
4586 sd->completion_queue = NULL;
4587 local_irq_enable();
4588
4589 while (clist) {
4590 struct sk_buff *skb = clist;
4591
4592 clist = clist->next;
4593
4594 WARN_ON(refcount_read(&skb->users));
4595 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4596 trace_consume_skb(skb);
4597 else
4598 trace_kfree_skb(skb, net_tx_action);
4599
4600 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4601 __kfree_skb(skb);
4602 else
4603 __kfree_skb_defer(skb);
4604 }
4605
4606 __kfree_skb_flush();
4607 }
4608
4609 if (sd->output_queue) {
4610 struct Qdisc *head;
4611
4612 local_irq_disable();
4613 head = sd->output_queue;
4614 sd->output_queue = NULL;
4615 sd->output_queue_tailp = &sd->output_queue;
4616 local_irq_enable();
4617
4618 while (head) {
4619 struct Qdisc *q = head;
4620 spinlock_t *root_lock = NULL;
4621
4622 head = head->next_sched;
4623
4624 if (!(q->flags & TCQ_F_NOLOCK)) {
4625 root_lock = qdisc_lock(q);
4626 spin_lock(root_lock);
4627 }
4628
4629
4630
4631 smp_mb__before_atomic();
4632 clear_bit(__QDISC_STATE_SCHED, &q->state);
4633 qdisc_run(q);
4634 if (root_lock)
4635 spin_unlock(root_lock);
4636 }
4637 }
4638
4639 xfrm_dev_backlog(sd);
4640}
4641
4642#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4643
4644int (*br_fdb_test_addr_hook)(struct net_device *dev,
4645 unsigned char *addr) __read_mostly;
4646EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
4647#endif
4648
4649static inline struct sk_buff *
4650sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4651 struct net_device *orig_dev)
4652{
4653#ifdef CONFIG_NET_CLS_ACT
4654 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
4655 struct tcf_result cl_res;
4656
4657
4658
4659
4660
4661
4662 if (!miniq)
4663 return skb;
4664
4665 if (*pt_prev) {
4666 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4667 *pt_prev = NULL;
4668 }
4669
4670 qdisc_skb_cb(skb)->pkt_len = skb->len;
4671 skb->tc_at_ingress = 1;
4672 mini_qdisc_bstats_cpu_update(miniq, skb);
4673
4674 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
4675 case TC_ACT_OK:
4676 case TC_ACT_RECLASSIFY:
4677 skb->tc_index = TC_H_MIN(cl_res.classid);
4678 break;
4679 case TC_ACT_SHOT:
4680 mini_qdisc_qstats_cpu_drop(miniq);
4681 kfree_skb(skb);
4682 return NULL;
4683 case TC_ACT_STOLEN:
4684 case TC_ACT_QUEUED:
4685 case TC_ACT_TRAP:
4686 consume_skb(skb);
4687 return NULL;
4688 case TC_ACT_REDIRECT:
4689
4690
4691
4692
4693 __skb_push(skb, skb->mac_len);
4694 skb_do_redirect(skb);
4695 return NULL;
4696 case TC_ACT_REINSERT:
4697
4698 skb_tc_reinsert(skb, &cl_res);
4699 return NULL;
4700 default:
4701 break;
4702 }
4703#endif
4704 return skb;
4705}
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716bool netdev_is_rx_handler_busy(struct net_device *dev)
4717{
4718 ASSERT_RTNL();
4719 return dev && rtnl_dereference(dev->rx_handler);
4720}
4721EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737int netdev_rx_handler_register(struct net_device *dev,
4738 rx_handler_func_t *rx_handler,
4739 void *rx_handler_data)
4740{
4741 if (netdev_is_rx_handler_busy(dev))
4742 return -EBUSY;
4743
4744 if (dev->priv_flags & IFF_NO_RX_HANDLER)
4745 return -EINVAL;
4746
4747
4748 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
4749 rcu_assign_pointer(dev->rx_handler, rx_handler);
4750
4751 return 0;
4752}
4753EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763void netdev_rx_handler_unregister(struct net_device *dev)
4764{
4765
4766 ASSERT_RTNL();
4767 RCU_INIT_POINTER(dev->rx_handler, NULL);
4768
4769
4770
4771
4772 synchronize_net();
4773 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
4774}
4775EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4776
4777
4778
4779
4780
4781static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4782{
4783 switch (skb->protocol) {
4784 case htons(ETH_P_ARP):
4785 case htons(ETH_P_IP):
4786 case htons(ETH_P_IPV6):
4787 case htons(ETH_P_8021Q):
4788 case htons(ETH_P_8021AD):
4789 return true;
4790 default:
4791 return false;
4792 }
4793}
4794
4795static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4796 int *ret, struct net_device *orig_dev)
4797{
4798#ifdef CONFIG_NETFILTER_INGRESS
4799 if (nf_hook_ingress_active(skb)) {
4800 int ingress_retval;
4801
4802 if (*pt_prev) {
4803 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4804 *pt_prev = NULL;
4805 }
4806
4807 rcu_read_lock();
4808 ingress_retval = nf_hook_ingress(skb);
4809 rcu_read_unlock();
4810 return ingress_retval;
4811 }
4812#endif
4813 return 0;
4814}
4815
4816static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
4817 struct packet_type **ppt_prev)
4818{
4819 struct packet_type *ptype, *pt_prev;
4820 rx_handler_func_t *rx_handler;
4821 struct net_device *orig_dev;
4822 bool deliver_exact = false;
4823 int ret = NET_RX_DROP;
4824 __be16 type;
4825
4826 net_timestamp_check(!netdev_tstamp_prequeue, skb);
4827
4828 trace_netif_receive_skb(skb);
4829
4830 orig_dev = skb->dev;
4831
4832 skb_reset_network_header(skb);
4833 if (!skb_transport_header_was_set(skb))
4834 skb_reset_transport_header(skb);
4835 skb_reset_mac_len(skb);
4836
4837 pt_prev = NULL;
4838
4839another_round:
4840 skb->skb_iif = skb->dev->ifindex;
4841
4842 __this_cpu_inc(softnet_data.processed);
4843
4844 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4845 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4846 skb = skb_vlan_untag(skb);
4847 if (unlikely(!skb))
4848 goto out;
4849 }
4850
4851 if (skb_skip_tc_classify(skb))
4852 goto skip_classify;
4853
4854 if (pfmemalloc)
4855 goto skip_taps;
4856
4857 list_for_each_entry_rcu(ptype, &ptype_all, list) {
4858 if (pt_prev)
4859 ret = deliver_skb(skb, pt_prev, orig_dev);
4860 pt_prev = ptype;
4861 }
4862
4863 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4864 if (pt_prev)
4865 ret = deliver_skb(skb, pt_prev, orig_dev);
4866 pt_prev = ptype;
4867 }
4868
4869skip_taps:
4870#ifdef CONFIG_NET_INGRESS
4871 if (static_branch_unlikely(&ingress_needed_key)) {
4872 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4873 if (!skb)
4874 goto out;
4875
4876 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
4877 goto out;
4878 }
4879#endif
4880 skb_reset_tc(skb);
4881skip_classify:
4882 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4883 goto drop;
4884
4885 if (skb_vlan_tag_present(skb)) {
4886 if (pt_prev) {
4887 ret = deliver_skb(skb, pt_prev, orig_dev);
4888 pt_prev = NULL;
4889 }
4890 if (vlan_do_receive(&skb))
4891 goto another_round;
4892 else if (unlikely(!skb))
4893 goto out;
4894 }
4895
4896 rx_handler = rcu_dereference(skb->dev->rx_handler);
4897 if (rx_handler) {
4898 if (pt_prev) {
4899 ret = deliver_skb(skb, pt_prev, orig_dev);
4900 pt_prev = NULL;
4901 }
4902 switch (rx_handler(&skb)) {
4903 case RX_HANDLER_CONSUMED:
4904 ret = NET_RX_SUCCESS;
4905 goto out;
4906 case RX_HANDLER_ANOTHER:
4907 goto another_round;
4908 case RX_HANDLER_EXACT:
4909 deliver_exact = true;
4910 case RX_HANDLER_PASS:
4911 break;
4912 default:
4913 BUG();
4914 }
4915 }
4916
4917 if (unlikely(skb_vlan_tag_present(skb))) {
4918 if (skb_vlan_tag_get_id(skb))
4919 skb->pkt_type = PACKET_OTHERHOST;
4920
4921
4922
4923
4924 __vlan_hwaccel_clear_tag(skb);
4925 }
4926
4927 type = skb->protocol;
4928
4929
4930 if (likely(!deliver_exact)) {
4931 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4932 &ptype_base[ntohs(type) &
4933 PTYPE_HASH_MASK]);
4934 }
4935
4936 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4937 &orig_dev->ptype_specific);
4938
4939 if (unlikely(skb->dev != orig_dev)) {
4940 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4941 &skb->dev->ptype_specific);
4942 }
4943
4944 if (pt_prev) {
4945 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
4946 goto drop;
4947 *ppt_prev = pt_prev;
4948 } else {
4949drop:
4950 if (!deliver_exact)
4951 atomic_long_inc(&skb->dev->rx_dropped);
4952 else
4953 atomic_long_inc(&skb->dev->rx_nohandler);
4954 kfree_skb(skb);
4955
4956
4957
4958 ret = NET_RX_DROP;
4959 }
4960
4961out:
4962 return ret;
4963}
4964
4965static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
4966{
4967 struct net_device *orig_dev = skb->dev;
4968 struct packet_type *pt_prev = NULL;
4969 int ret;
4970
4971 ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
4972 if (pt_prev)
4973 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4974 return ret;
4975}
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992int netif_receive_skb_core(struct sk_buff *skb)
4993{
4994 int ret;
4995
4996 rcu_read_lock();
4997 ret = __netif_receive_skb_one_core(skb, false);
4998 rcu_read_unlock();
4999
5000 return ret;
5001}
5002EXPORT_SYMBOL(netif_receive_skb_core);
5003
5004static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5005 struct packet_type *pt_prev,
5006 struct net_device *orig_dev)
5007{
5008 struct sk_buff *skb, *next;
5009
5010 if (!pt_prev)
5011 return;
5012 if (list_empty(head))
5013 return;
5014 if (pt_prev->list_func != NULL)
5015 pt_prev->list_func(head, pt_prev, orig_dev);
5016 else
5017 list_for_each_entry_safe(skb, next, head, list)
5018 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5019}
5020
5021static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5022{
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033 struct packet_type *pt_curr = NULL;
5034
5035 struct net_device *od_curr = NULL;
5036 struct list_head sublist;
5037 struct sk_buff *skb, *next;
5038
5039 INIT_LIST_HEAD(&sublist);
5040 list_for_each_entry_safe(skb, next, head, list) {
5041 struct net_device *orig_dev = skb->dev;
5042 struct packet_type *pt_prev = NULL;
5043
5044 skb_list_del_init(skb);
5045 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
5046 if (!pt_prev)
5047 continue;
5048 if (pt_curr != pt_prev || od_curr != orig_dev) {
5049
5050 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5051
5052 INIT_LIST_HEAD(&sublist);
5053 pt_curr = pt_prev;
5054 od_curr = orig_dev;
5055 }
5056 list_add_tail(&skb->list, &sublist);
5057 }
5058
5059
5060 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5061}
5062
5063static int __netif_receive_skb(struct sk_buff *skb)
5064{
5065 int ret;
5066
5067 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5068 unsigned int noreclaim_flag;
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079 noreclaim_flag = memalloc_noreclaim_save();
5080 ret = __netif_receive_skb_one_core(skb, true);
5081 memalloc_noreclaim_restore(noreclaim_flag);
5082 } else
5083 ret = __netif_receive_skb_one_core(skb, false);
5084
5085 return ret;
5086}
5087
5088static void __netif_receive_skb_list(struct list_head *head)
5089{
5090 unsigned long noreclaim_flag = 0;
5091 struct sk_buff *skb, *next;
5092 bool pfmemalloc = false;
5093
5094 list_for_each_entry_safe(skb, next, head, list) {
5095 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5096 struct list_head sublist;
5097
5098
5099 list_cut_before(&sublist, head, &skb->list);
5100 if (!list_empty(&sublist))
5101 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5102 pfmemalloc = !pfmemalloc;
5103
5104 if (pfmemalloc)
5105 noreclaim_flag = memalloc_noreclaim_save();
5106 else
5107 memalloc_noreclaim_restore(noreclaim_flag);
5108 }
5109 }
5110
5111 if (!list_empty(head))
5112 __netif_receive_skb_list_core(head, pfmemalloc);
5113
5114 if (pfmemalloc)
5115 memalloc_noreclaim_restore(noreclaim_flag);
5116}
5117
5118static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5119{
5120 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5121 struct bpf_prog *new = xdp->prog;
5122 int ret = 0;
5123
5124 switch (xdp->command) {
5125 case XDP_SETUP_PROG:
5126 rcu_assign_pointer(dev->xdp_prog, new);
5127 if (old)
5128 bpf_prog_put(old);
5129
5130 if (old && !new) {
5131 static_branch_dec(&generic_xdp_needed_key);
5132 } else if (new && !old) {
5133 static_branch_inc(&generic_xdp_needed_key);
5134 dev_disable_lro(dev);
5135 dev_disable_gro_hw(dev);
5136 }
5137 break;
5138
5139 case XDP_QUERY_PROG:
5140 xdp->prog_id = old ? old->aux->id : 0;
5141 break;
5142
5143 default:
5144 ret = -EINVAL;
5145 break;
5146 }
5147
5148 return ret;
5149}
5150
5151static int netif_receive_skb_internal(struct sk_buff *skb)
5152{
5153 int ret;
5154
5155 net_timestamp_check(netdev_tstamp_prequeue, skb);
5156
5157 if (skb_defer_rx_timestamp(skb))
5158 return NET_RX_SUCCESS;
5159
5160 if (static_branch_unlikely(&generic_xdp_needed_key)) {
5161 int ret;
5162
5163 preempt_disable();
5164 rcu_read_lock();
5165 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5166 rcu_read_unlock();
5167 preempt_enable();
5168
5169 if (ret != XDP_PASS)
5170 return NET_RX_DROP;
5171 }
5172
5173 rcu_read_lock();
5174#ifdef CONFIG_RPS
5175 if (static_key_false(&rps_needed)) {
5176 struct rps_dev_flow voidflow, *rflow = &voidflow;
5177 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5178
5179 if (cpu >= 0) {
5180 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5181 rcu_read_unlock();
5182 return ret;
5183 }
5184 }
5185#endif
5186 ret = __netif_receive_skb(skb);
5187 rcu_read_unlock();
5188 return ret;
5189}
5190
5191static void netif_receive_skb_list_internal(struct list_head *head)
5192{
5193 struct bpf_prog *xdp_prog = NULL;
5194 struct sk_buff *skb, *next;
5195 struct list_head sublist;
5196
5197 INIT_LIST_HEAD(&sublist);
5198 list_for_each_entry_safe(skb, next, head, list) {
5199 net_timestamp_check(netdev_tstamp_prequeue, skb);
5200 skb_list_del_init(skb);
5201 if (!skb_defer_rx_timestamp(skb))
5202 list_add_tail(&skb->list, &sublist);
5203 }
5204 list_splice_init(&sublist, head);
5205
5206 if (static_branch_unlikely(&generic_xdp_needed_key)) {
5207 preempt_disable();
5208 rcu_read_lock();
5209 list_for_each_entry_safe(skb, next, head, list) {
5210 xdp_prog = rcu_dereference(skb->dev->xdp_prog);
5211 skb_list_del_init(skb);
5212 if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
5213 list_add_tail(&skb->list, &sublist);
5214 }
5215 rcu_read_unlock();
5216 preempt_enable();
5217
5218 list_splice_init(&sublist, head);
5219 }
5220
5221 rcu_read_lock();
5222#ifdef CONFIG_RPS
5223 if (static_key_false(&rps_needed)) {
5224 list_for_each_entry_safe(skb, next, head, list) {
5225 struct rps_dev_flow voidflow, *rflow = &voidflow;
5226 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5227
5228 if (cpu >= 0) {
5229
5230 skb_list_del_init(skb);
5231 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5232 }
5233 }
5234 }
5235#endif
5236 __netif_receive_skb_list(head);
5237 rcu_read_unlock();
5238}
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255int netif_receive_skb(struct sk_buff *skb)
5256{
5257 int ret;
5258
5259 trace_netif_receive_skb_entry(skb);
5260
5261 ret = netif_receive_skb_internal(skb);
5262 trace_netif_receive_skb_exit(ret);
5263
5264 return ret;
5265}
5266EXPORT_SYMBOL(netif_receive_skb);
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278void netif_receive_skb_list(struct list_head *head)
5279{
5280 struct sk_buff *skb;
5281
5282 if (list_empty(head))
5283 return;
5284 if (trace_netif_receive_skb_list_entry_enabled()) {
5285 list_for_each_entry(skb, head, list)
5286 trace_netif_receive_skb_list_entry(skb);
5287 }
5288 netif_receive_skb_list_internal(head);
5289 trace_netif_receive_skb_list_exit(0);
5290}
5291EXPORT_SYMBOL(netif_receive_skb_list);
5292
5293DEFINE_PER_CPU(struct work_struct, flush_works);
5294
5295
5296static void flush_backlog(struct work_struct *work)
5297{
5298 struct sk_buff *skb, *tmp;
5299 struct softnet_data *sd;
5300
5301 local_bh_disable();
5302 sd = this_cpu_ptr(&softnet_data);
5303
5304 local_irq_disable();
5305 rps_lock(sd);
5306 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5307 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5308 __skb_unlink(skb, &sd->input_pkt_queue);
5309 kfree_skb(skb);
5310 input_queue_head_incr(sd);
5311 }
5312 }
5313 rps_unlock(sd);
5314 local_irq_enable();
5315
5316 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5317 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5318 __skb_unlink(skb, &sd->process_queue);
5319 kfree_skb(skb);
5320 input_queue_head_incr(sd);
5321 }
5322 }
5323 local_bh_enable();
5324}
5325
5326static void flush_all_backlogs(void)
5327{
5328 unsigned int cpu;
5329
5330 get_online_cpus();
5331
5332 for_each_online_cpu(cpu)
5333 queue_work_on(cpu, system_highpri_wq,
5334 per_cpu_ptr(&flush_works, cpu));
5335
5336 for_each_online_cpu(cpu)
5337 flush_work(per_cpu_ptr(&flush_works, cpu));
5338
5339 put_online_cpus();
5340}
5341
5342INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
5343INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
5344static int napi_gro_complete(struct sk_buff *skb)
5345{
5346 struct packet_offload *ptype;
5347 __be16 type = skb->protocol;
5348 struct list_head *head = &offload_base;
5349 int err = -ENOENT;
5350
5351 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5352
5353 if (NAPI_GRO_CB(skb)->count == 1) {
5354 skb_shinfo(skb)->gso_size = 0;
5355 goto out;
5356 }
5357
5358 rcu_read_lock();
5359 list_for_each_entry_rcu(ptype, head, list) {
5360 if (ptype->type != type || !ptype->callbacks.gro_complete)
5361 continue;
5362
5363 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5364 ipv6_gro_complete, inet_gro_complete,
5365 skb, 0);
5366 break;
5367 }
5368 rcu_read_unlock();
5369
5370 if (err) {
5371 WARN_ON(&ptype->list == head);
5372 kfree_skb(skb);
5373 return NET_RX_SUCCESS;
5374 }
5375
5376out:
5377 return netif_receive_skb_internal(skb);
5378}
5379
5380static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5381 bool flush_old)
5382{
5383 struct list_head *head = &napi->gro_hash[index].list;
5384 struct sk_buff *skb, *p;
5385
5386 list_for_each_entry_safe_reverse(skb, p, head, list) {
5387 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5388 return;
5389 skb_list_del_init(skb);
5390 napi_gro_complete(skb);
5391 napi->gro_hash[index].count--;
5392 }
5393
5394 if (!napi->gro_hash[index].count)
5395 __clear_bit(index, &napi->gro_bitmask);
5396}
5397
5398
5399
5400
5401
5402void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5403{
5404 unsigned long bitmask = napi->gro_bitmask;
5405 unsigned int i, base = ~0U;
5406
5407 while ((i = ffs(bitmask)) != 0) {
5408 bitmask >>= i;
5409 base += i;
5410 __napi_gro_flush_chain(napi, base, flush_old);
5411 }
5412}
5413EXPORT_SYMBOL(napi_gro_flush);
5414
5415static struct list_head *gro_list_prepare(struct napi_struct *napi,
5416 struct sk_buff *skb)
5417{
5418 unsigned int maclen = skb->dev->hard_header_len;
5419 u32 hash = skb_get_hash_raw(skb);
5420 struct list_head *head;
5421 struct sk_buff *p;
5422
5423 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
5424 list_for_each_entry(p, head, list) {
5425 unsigned long diffs;
5426
5427 NAPI_GRO_CB(p)->flush = 0;
5428
5429 if (hash != skb_get_hash_raw(p)) {
5430 NAPI_GRO_CB(p)->same_flow = 0;
5431 continue;
5432 }
5433
5434 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
5435 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
5436 if (skb_vlan_tag_present(p))
5437 diffs |= p->vlan_tci ^ skb->vlan_tci;
5438 diffs |= skb_metadata_dst_cmp(p, skb);
5439 diffs |= skb_metadata_differs(p, skb);
5440 if (maclen == ETH_HLEN)
5441 diffs |= compare_ether_header(skb_mac_header(p),
5442 skb_mac_header(skb));
5443 else if (!diffs)
5444 diffs = memcmp(skb_mac_header(p),
5445 skb_mac_header(skb),
5446 maclen);
5447 NAPI_GRO_CB(p)->same_flow = !diffs;
5448 }
5449
5450 return head;
5451}
5452
5453static void skb_gro_reset_offset(struct sk_buff *skb)
5454{
5455 const struct skb_shared_info *pinfo = skb_shinfo(skb);
5456 const skb_frag_t *frag0 = &pinfo->frags[0];
5457
5458 NAPI_GRO_CB(skb)->data_offset = 0;
5459 NAPI_GRO_CB(skb)->frag0 = NULL;
5460 NAPI_GRO_CB(skb)->frag0_len = 0;
5461
5462 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
5463 pinfo->nr_frags &&
5464 !PageHighMem(skb_frag_page(frag0))) {
5465 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
5466 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5467 skb_frag_size(frag0),
5468 skb->end - skb->tail);
5469 }
5470}
5471
5472static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5473{
5474 struct skb_shared_info *pinfo = skb_shinfo(skb);
5475
5476 BUG_ON(skb->end - skb->tail < grow);
5477
5478 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5479
5480 skb->data_len -= grow;
5481 skb->tail += grow;
5482
5483 pinfo->frags[0].page_offset += grow;
5484 skb_frag_size_sub(&pinfo->frags[0], grow);
5485
5486 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5487 skb_frag_unref(skb, 0);
5488 memmove(pinfo->frags, pinfo->frags + 1,
5489 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
5490 }
5491}
5492
5493static void gro_flush_oldest(struct list_head *head)
5494{
5495 struct sk_buff *oldest;
5496
5497 oldest = list_last_entry(head, struct sk_buff, list);
5498
5499
5500
5501
5502 if (WARN_ON_ONCE(!oldest))
5503 return;
5504
5505
5506
5507
5508 skb_list_del_init(oldest);
5509 napi_gro_complete(oldest);
5510}
5511
5512INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
5513 struct sk_buff *));
5514INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
5515 struct sk_buff *));
5516static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5517{
5518 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
5519 struct list_head *head = &offload_base;
5520 struct packet_offload *ptype;
5521 __be16 type = skb->protocol;
5522 struct list_head *gro_head;
5523 struct sk_buff *pp = NULL;
5524 enum gro_result ret;
5525 int same_flow;
5526 int grow;
5527
5528 if (netif_elide_gro(skb->dev))
5529 goto normal;
5530
5531 gro_head = gro_list_prepare(napi, skb);
5532
5533 rcu_read_lock();
5534 list_for_each_entry_rcu(ptype, head, list) {
5535 if (ptype->type != type || !ptype->callbacks.gro_receive)
5536 continue;
5537
5538 skb_set_network_header(skb, skb_gro_offset(skb));
5539 skb_reset_mac_len(skb);
5540 NAPI_GRO_CB(skb)->same_flow = 0;
5541 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5542 NAPI_GRO_CB(skb)->free = 0;
5543 NAPI_GRO_CB(skb)->encap_mark = 0;
5544 NAPI_GRO_CB(skb)->recursion_counter = 0;
5545 NAPI_GRO_CB(skb)->is_fou = 0;
5546 NAPI_GRO_CB(skb)->is_atomic = 1;
5547 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
5548
5549
5550 switch (skb->ip_summed) {
5551 case CHECKSUM_COMPLETE:
5552 NAPI_GRO_CB(skb)->csum = skb->csum;
5553 NAPI_GRO_CB(skb)->csum_valid = 1;
5554 NAPI_GRO_CB(skb)->csum_cnt = 0;
5555 break;
5556 case CHECKSUM_UNNECESSARY:
5557 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5558 NAPI_GRO_CB(skb)->csum_valid = 0;
5559 break;
5560 default:
5561 NAPI_GRO_CB(skb)->csum_cnt = 0;
5562 NAPI_GRO_CB(skb)->csum_valid = 0;
5563 }
5564
5565 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
5566 ipv6_gro_receive, inet_gro_receive,
5567 gro_head, skb);
5568 break;
5569 }
5570 rcu_read_unlock();
5571
5572 if (&ptype->list == head)
5573 goto normal;
5574
5575 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
5576 ret = GRO_CONSUMED;
5577 goto ok;
5578 }
5579
5580 same_flow = NAPI_GRO_CB(skb)->same_flow;
5581 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
5582
5583 if (pp) {
5584 skb_list_del_init(pp);
5585 napi_gro_complete(pp);
5586 napi->gro_hash[hash].count--;
5587 }
5588
5589 if (same_flow)
5590 goto ok;
5591
5592 if (NAPI_GRO_CB(skb)->flush)
5593 goto normal;
5594
5595 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
5596 gro_flush_oldest(gro_head);
5597 } else {
5598 napi->gro_hash[hash].count++;
5599 }
5600 NAPI_GRO_CB(skb)->count = 1;
5601 NAPI_GRO_CB(skb)->age = jiffies;
5602 NAPI_GRO_CB(skb)->last = skb;
5603 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
5604 list_add(&skb->list, gro_head);
5605 ret = GRO_HELD;
5606
5607pull:
5608 grow = skb_gro_offset(skb) - skb_headlen(skb);
5609 if (grow > 0)
5610 gro_pull_from_frag0(skb, grow);
5611ok:
5612 if (napi->gro_hash[hash].count) {
5613 if (!test_bit(hash, &napi->gro_bitmask))
5614 __set_bit(hash, &napi->gro_bitmask);
5615 } else if (test_bit(hash, &napi->gro_bitmask)) {
5616 __clear_bit(hash, &napi->gro_bitmask);
5617 }
5618
5619 return ret;
5620
5621normal:
5622 ret = GRO_NORMAL;
5623 goto pull;
5624}
5625
5626struct packet_offload *gro_find_receive_by_type(__be16 type)
5627{
5628 struct list_head *offload_head = &offload_base;
5629 struct packet_offload *ptype;
5630
5631 list_for_each_entry_rcu(ptype, offload_head, list) {
5632 if (ptype->type != type || !ptype->callbacks.gro_receive)
5633 continue;
5634 return ptype;
5635 }
5636 return NULL;
5637}
5638EXPORT_SYMBOL(gro_find_receive_by_type);
5639
5640struct packet_offload *gro_find_complete_by_type(__be16 type)
5641{
5642 struct list_head *offload_head = &offload_base;
5643 struct packet_offload *ptype;
5644
5645 list_for_each_entry_rcu(ptype, offload_head, list) {
5646 if (ptype->type != type || !ptype->callbacks.gro_complete)
5647 continue;
5648 return ptype;
5649 }
5650 return NULL;
5651}
5652EXPORT_SYMBOL(gro_find_complete_by_type);
5653
5654static void napi_skb_free_stolen_head(struct sk_buff *skb)
5655{
5656 skb_dst_drop(skb);
5657 secpath_reset(skb);
5658 kmem_cache_free(skbuff_head_cache, skb);
5659}
5660
5661static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5662{
5663 switch (ret) {
5664 case GRO_NORMAL:
5665 if (netif_receive_skb_internal(skb))
5666 ret = GRO_DROP;
5667 break;
5668
5669 case GRO_DROP:
5670 kfree_skb(skb);
5671 break;
5672
5673 case GRO_MERGED_FREE:
5674 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5675 napi_skb_free_stolen_head(skb);
5676 else
5677 __kfree_skb(skb);
5678 break;
5679
5680 case GRO_HELD:
5681 case GRO_MERGED:
5682 case GRO_CONSUMED:
5683 break;
5684 }
5685
5686 return ret;
5687}
5688
5689gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5690{
5691 gro_result_t ret;
5692
5693 skb_mark_napi_id(skb, napi);
5694 trace_napi_gro_receive_entry(skb);
5695
5696 skb_gro_reset_offset(skb);
5697
5698 ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
5699 trace_napi_gro_receive_exit(ret);
5700
5701 return ret;
5702}
5703EXPORT_SYMBOL(napi_gro_receive);
5704
5705static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
5706{
5707 if (unlikely(skb->pfmemalloc)) {
5708 consume_skb(skb);
5709 return;
5710 }
5711 __skb_pull(skb, skb_headlen(skb));
5712
5713 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
5714 __vlan_hwaccel_clear_tag(skb);
5715 skb->dev = napi->dev;
5716 skb->skb_iif = 0;
5717
5718
5719 skb->pkt_type = PACKET_HOST;
5720
5721 skb->encapsulation = 0;
5722 skb_shinfo(skb)->gso_type = 0;
5723 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5724 secpath_reset(skb);
5725
5726 napi->skb = skb;
5727}
5728
5729struct sk_buff *napi_get_frags(struct napi_struct *napi)
5730{
5731 struct sk_buff *skb = napi->skb;
5732
5733 if (!skb) {
5734 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
5735 if (skb) {
5736 napi->skb = skb;
5737 skb_mark_napi_id(skb, napi);
5738 }
5739 }
5740 return skb;
5741}
5742EXPORT_SYMBOL(napi_get_frags);
5743
5744static gro_result_t napi_frags_finish(struct napi_struct *napi,
5745 struct sk_buff *skb,
5746 gro_result_t ret)
5747{
5748 switch (ret) {
5749 case GRO_NORMAL:
5750 case GRO_HELD:
5751 __skb_push(skb, ETH_HLEN);
5752 skb->protocol = eth_type_trans(skb, skb->dev);
5753 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
5754 ret = GRO_DROP;
5755 break;
5756
5757 case GRO_DROP:
5758 napi_reuse_skb(napi, skb);
5759 break;
5760
5761 case GRO_MERGED_FREE:
5762 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5763 napi_skb_free_stolen_head(skb);
5764 else
5765 napi_reuse_skb(napi, skb);
5766 break;
5767
5768 case GRO_MERGED:
5769 case GRO_CONSUMED:
5770 break;
5771 }
5772
5773 return ret;
5774}
5775
5776
5777
5778
5779
5780static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
5781{
5782 struct sk_buff *skb = napi->skb;
5783 const struct ethhdr *eth;
5784 unsigned int hlen = sizeof(*eth);
5785
5786 napi->skb = NULL;
5787
5788 skb_reset_mac_header(skb);
5789 skb_gro_reset_offset(skb);
5790
5791 eth = skb_gro_header_fast(skb, 0);
5792 if (unlikely(skb_gro_header_hard(skb, hlen))) {
5793 eth = skb_gro_header_slow(skb, hlen, 0);
5794 if (unlikely(!eth)) {
5795 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5796 __func__, napi->dev->name);
5797 napi_reuse_skb(napi, skb);
5798 return NULL;
5799 }
5800 } else {
5801 gro_pull_from_frag0(skb, hlen);
5802 NAPI_GRO_CB(skb)->frag0 += hlen;
5803 NAPI_GRO_CB(skb)->frag0_len -= hlen;
5804 }
5805 __skb_pull(skb, hlen);
5806
5807
5808
5809
5810
5811
5812 skb->protocol = eth->h_proto;
5813
5814 return skb;
5815}
5816
5817gro_result_t napi_gro_frags(struct napi_struct *napi)
5818{
5819 gro_result_t ret;
5820 struct sk_buff *skb = napi_frags_skb(napi);
5821
5822 if (!skb)
5823 return GRO_DROP;
5824
5825 trace_napi_gro_frags_entry(skb);
5826
5827 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5828 trace_napi_gro_frags_exit(ret);
5829
5830 return ret;
5831}
5832EXPORT_SYMBOL(napi_gro_frags);
5833
5834
5835
5836
5837__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5838{
5839 __wsum wsum;
5840 __sum16 sum;
5841
5842 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5843
5844
5845 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5846
5847 if (likely(!sum)) {
5848 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5849 !skb->csum_complete_sw)
5850 netdev_rx_csum_fault(skb->dev, skb);
5851 }
5852
5853 NAPI_GRO_CB(skb)->csum = wsum;
5854 NAPI_GRO_CB(skb)->csum_valid = 1;
5855
5856 return sum;
5857}
5858EXPORT_SYMBOL(__skb_gro_checksum_complete);
5859
5860static void net_rps_send_ipi(struct softnet_data *remsd)
5861{
5862#ifdef CONFIG_RPS
5863 while (remsd) {
5864 struct softnet_data *next = remsd->rps_ipi_next;
5865
5866 if (cpu_online(remsd->cpu))
5867 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5868 remsd = next;
5869 }
5870#endif
5871}
5872
5873
5874
5875
5876
5877static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5878{
5879#ifdef CONFIG_RPS
5880 struct softnet_data *remsd = sd->rps_ipi_list;
5881
5882 if (remsd) {
5883 sd->rps_ipi_list = NULL;
5884
5885 local_irq_enable();
5886
5887
5888 net_rps_send_ipi(remsd);
5889 } else
5890#endif
5891 local_irq_enable();
5892}
5893
5894static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5895{
5896#ifdef CONFIG_RPS
5897 return sd->rps_ipi_list != NULL;
5898#else
5899 return false;
5900#endif
5901}
5902
5903static int process_backlog(struct napi_struct *napi, int quota)
5904{
5905 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5906 bool again = true;
5907 int work = 0;
5908
5909
5910
5911
5912 if (sd_has_rps_ipi_waiting(sd)) {
5913 local_irq_disable();
5914 net_rps_action_and_irq_enable(sd);
5915 }
5916
5917 napi->weight = dev_rx_weight;
5918 while (again) {
5919 struct sk_buff *skb;
5920
5921 while ((skb = __skb_dequeue(&sd->process_queue))) {
5922 rcu_read_lock();
5923 __netif_receive_skb(skb);
5924 rcu_read_unlock();
5925 input_queue_head_incr(sd);
5926 if (++work >= quota)
5927 return work;
5928
5929 }
5930
5931 local_irq_disable();
5932 rps_lock(sd);
5933 if (skb_queue_empty(&sd->input_pkt_queue)) {
5934
5935
5936
5937
5938
5939
5940
5941
5942 napi->state = 0;
5943 again = false;
5944 } else {
5945 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5946 &sd->process_queue);
5947 }
5948 rps_unlock(sd);
5949 local_irq_enable();
5950 }
5951
5952 return work;
5953}
5954
5955
5956
5957
5958
5959
5960
5961
5962void __napi_schedule(struct napi_struct *n)
5963{
5964 unsigned long flags;
5965
5966 local_irq_save(flags);
5967 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5968 local_irq_restore(flags);
5969}
5970EXPORT_SYMBOL(__napi_schedule);
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981bool napi_schedule_prep(struct napi_struct *n)
5982{
5983 unsigned long val, new;
5984
5985 do {
5986 val = READ_ONCE(n->state);
5987 if (unlikely(val & NAPIF_STATE_DISABLE))
5988 return false;
5989 new = val | NAPIF_STATE_SCHED;
5990
5991
5992
5993
5994
5995
5996
5997 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5998 NAPIF_STATE_MISSED;
5999 } while (cmpxchg(&n->state, val, new) != val);
6000
6001 return !(val & NAPIF_STATE_SCHED);
6002}
6003EXPORT_SYMBOL(napi_schedule_prep);
6004
6005
6006
6007
6008
6009
6010
6011void __napi_schedule_irqoff(struct napi_struct *n)
6012{
6013 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6014}
6015EXPORT_SYMBOL(__napi_schedule_irqoff);
6016
6017bool napi_complete_done(struct napi_struct *n, int work_done)
6018{
6019 unsigned long flags, val, new;
6020
6021
6022
6023
6024
6025
6026
6027 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6028 NAPIF_STATE_IN_BUSY_POLL)))
6029 return false;
6030
6031 if (n->gro_bitmask) {
6032 unsigned long timeout = 0;
6033
6034 if (work_done)
6035 timeout = n->dev->gro_flush_timeout;
6036
6037
6038
6039
6040
6041 napi_gro_flush(n, !!timeout);
6042 if (timeout)
6043 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6044 HRTIMER_MODE_REL_PINNED);
6045 }
6046 if (unlikely(!list_empty(&n->poll_list))) {
6047
6048 local_irq_save(flags);
6049 list_del_init(&n->poll_list);
6050 local_irq_restore(flags);
6051 }
6052
6053 do {
6054 val = READ_ONCE(n->state);
6055
6056 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6057
6058 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
6059
6060
6061
6062
6063
6064 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6065 NAPIF_STATE_SCHED;
6066 } while (cmpxchg(&n->state, val, new) != val);
6067
6068 if (unlikely(val & NAPIF_STATE_MISSED)) {
6069 __napi_schedule(n);
6070 return false;
6071 }
6072
6073 return true;
6074}
6075EXPORT_SYMBOL(napi_complete_done);
6076
6077
6078static struct napi_struct *napi_by_id(unsigned int napi_id)
6079{
6080 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6081 struct napi_struct *napi;
6082
6083 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6084 if (napi->napi_id == napi_id)
6085 return napi;
6086
6087 return NULL;
6088}
6089
6090#if defined(CONFIG_NET_RX_BUSY_POLL)
6091
6092#define BUSY_POLL_BUDGET 8
6093
6094static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
6095{
6096 int rc;
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107 clear_bit(NAPI_STATE_MISSED, &napi->state);
6108 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6109
6110 local_bh_disable();
6111
6112
6113
6114
6115 rc = napi->poll(napi, BUSY_POLL_BUDGET);
6116 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
6117 netpoll_poll_unlock(have_poll_lock);
6118 if (rc == BUSY_POLL_BUDGET)
6119 __napi_schedule(napi);
6120 local_bh_enable();
6121}
6122
6123void napi_busy_loop(unsigned int napi_id,
6124 bool (*loop_end)(void *, unsigned long),
6125 void *loop_end_arg)
6126{
6127 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6128 int (*napi_poll)(struct napi_struct *napi, int budget);
6129 void *have_poll_lock = NULL;
6130 struct napi_struct *napi;
6131
6132restart:
6133 napi_poll = NULL;
6134
6135 rcu_read_lock();
6136
6137 napi = napi_by_id(napi_id);
6138 if (!napi)
6139 goto out;
6140
6141 preempt_disable();
6142 for (;;) {
6143 int work = 0;
6144
6145 local_bh_disable();
6146 if (!napi_poll) {
6147 unsigned long val = READ_ONCE(napi->state);
6148
6149
6150
6151
6152 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6153 NAPIF_STATE_IN_BUSY_POLL))
6154 goto count;
6155 if (cmpxchg(&napi->state, val,
6156 val | NAPIF_STATE_IN_BUSY_POLL |
6157 NAPIF_STATE_SCHED) != val)
6158 goto count;
6159 have_poll_lock = netpoll_poll_lock(napi);
6160 napi_poll = napi->poll;
6161 }
6162 work = napi_poll(napi, BUSY_POLL_BUDGET);
6163 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
6164count:
6165 if (work > 0)
6166 __NET_ADD_STATS(dev_net(napi->dev),
6167 LINUX_MIB_BUSYPOLLRXPACKETS, work);
6168 local_bh_enable();
6169
6170 if (!loop_end || loop_end(loop_end_arg, start_time))
6171 break;
6172
6173 if (unlikely(need_resched())) {
6174 if (napi_poll)
6175 busy_poll_stop(napi, have_poll_lock);
6176 preempt_enable();
6177 rcu_read_unlock();
6178 cond_resched();
6179 if (loop_end(loop_end_arg, start_time))
6180 return;
6181 goto restart;
6182 }
6183 cpu_relax();
6184 }
6185 if (napi_poll)
6186 busy_poll_stop(napi, have_poll_lock);
6187 preempt_enable();
6188out:
6189 rcu_read_unlock();
6190}
6191EXPORT_SYMBOL(napi_busy_loop);
6192
6193#endif
6194
6195static void napi_hash_add(struct napi_struct *napi)
6196{
6197 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
6198 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
6199 return;
6200
6201 spin_lock(&napi_hash_lock);
6202
6203
6204 do {
6205 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6206 napi_gen_id = MIN_NAPI_ID;
6207 } while (napi_by_id(napi_gen_id));
6208 napi->napi_id = napi_gen_id;
6209
6210 hlist_add_head_rcu(&napi->napi_hash_node,
6211 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6212
6213 spin_unlock(&napi_hash_lock);
6214}
6215
6216
6217
6218
6219bool napi_hash_del(struct napi_struct *napi)
6220{
6221 bool rcu_sync_needed = false;
6222
6223 spin_lock(&napi_hash_lock);
6224
6225 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
6226 rcu_sync_needed = true;
6227 hlist_del_rcu(&napi->napi_hash_node);
6228 }
6229 spin_unlock(&napi_hash_lock);
6230 return rcu_sync_needed;
6231}
6232EXPORT_SYMBOL_GPL(napi_hash_del);
6233
6234static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6235{
6236 struct napi_struct *napi;
6237
6238 napi = container_of(timer, struct napi_struct, timer);
6239
6240
6241
6242
6243 if (napi->gro_bitmask && !napi_disable_pending(napi) &&
6244 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
6245 __napi_schedule_irqoff(napi);
6246
6247 return HRTIMER_NORESTART;
6248}
6249
6250static void init_gro_hash(struct napi_struct *napi)
6251{
6252 int i;
6253
6254 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6255 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6256 napi->gro_hash[i].count = 0;
6257 }
6258 napi->gro_bitmask = 0;
6259}
6260
6261void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6262 int (*poll)(struct napi_struct *, int), int weight)
6263{
6264 INIT_LIST_HEAD(&napi->poll_list);
6265 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6266 napi->timer.function = napi_watchdog;
6267 init_gro_hash(napi);
6268 napi->skb = NULL;
6269 napi->poll = poll;
6270 if (weight > NAPI_POLL_WEIGHT)
6271 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6272 weight);
6273 napi->weight = weight;
6274 list_add(&napi->dev_list, &dev->napi_list);
6275 napi->dev = dev;
6276#ifdef CONFIG_NETPOLL
6277 napi->poll_owner = -1;
6278#endif
6279 set_bit(NAPI_STATE_SCHED, &napi->state);
6280 napi_hash_add(napi);
6281}
6282EXPORT_SYMBOL(netif_napi_add);
6283
6284void napi_disable(struct napi_struct *n)
6285{
6286 might_sleep();
6287 set_bit(NAPI_STATE_DISABLE, &n->state);
6288
6289 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6290 msleep(1);
6291 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6292 msleep(1);
6293
6294 hrtimer_cancel(&n->timer);
6295
6296 clear_bit(NAPI_STATE_DISABLE, &n->state);
6297}
6298EXPORT_SYMBOL(napi_disable);
6299
6300static void flush_gro_hash(struct napi_struct *napi)
6301{
6302 int i;
6303
6304 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6305 struct sk_buff *skb, *n;
6306
6307 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6308 kfree_skb(skb);
6309 napi->gro_hash[i].count = 0;
6310 }
6311}
6312
6313
6314void netif_napi_del(struct napi_struct *napi)
6315{
6316 might_sleep();
6317 if (napi_hash_del(napi))
6318 synchronize_net();
6319 list_del_init(&napi->dev_list);
6320 napi_free_frags(napi);
6321
6322 flush_gro_hash(napi);
6323 napi->gro_bitmask = 0;
6324}
6325EXPORT_SYMBOL(netif_napi_del);
6326
6327static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6328{
6329 void *have;
6330 int work, weight;
6331
6332 list_del_init(&n->poll_list);
6333
6334 have = netpoll_poll_lock(n);
6335
6336 weight = n->weight;
6337
6338
6339
6340
6341
6342
6343
6344 work = 0;
6345 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6346 work = n->poll(n, weight);
6347 trace_napi_poll(n, work, weight);
6348 }
6349
6350 WARN_ON_ONCE(work > weight);
6351
6352 if (likely(work < weight))
6353 goto out_unlock;
6354
6355
6356
6357
6358
6359
6360 if (unlikely(napi_disable_pending(n))) {
6361 napi_complete(n);
6362 goto out_unlock;
6363 }
6364
6365 if (n->gro_bitmask) {
6366
6367
6368
6369 napi_gro_flush(n, HZ >= 1000);
6370 }
6371
6372
6373
6374
6375 if (unlikely(!list_empty(&n->poll_list))) {
6376 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6377 n->dev ? n->dev->name : "backlog");
6378 goto out_unlock;
6379 }
6380
6381 list_add_tail(&n->poll_list, repoll);
6382
6383out_unlock:
6384 netpoll_poll_unlock(have);
6385
6386 return work;
6387}
6388
6389static __latent_entropy void net_rx_action(struct softirq_action *h)
6390{
6391 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6392 unsigned long time_limit = jiffies +
6393 usecs_to_jiffies(netdev_budget_usecs);
6394 int budget = netdev_budget;
6395 LIST_HEAD(list);
6396 LIST_HEAD(repoll);
6397
6398 local_irq_disable();
6399 list_splice_init(&sd->poll_list, &list);
6400 local_irq_enable();
6401
6402 for (;;) {
6403 struct napi_struct *n;
6404
6405 if (list_empty(&list)) {
6406 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
6407 goto out;
6408 break;
6409 }
6410
6411 n = list_first_entry(&list, struct napi_struct, poll_list);
6412 budget -= napi_poll(n, &repoll);
6413
6414
6415
6416
6417
6418 if (unlikely(budget <= 0 ||
6419 time_after_eq(jiffies, time_limit))) {
6420 sd->time_squeeze++;
6421 break;
6422 }
6423 }
6424
6425 local_irq_disable();
6426
6427 list_splice_tail_init(&sd->poll_list, &list);
6428 list_splice_tail(&repoll, &list);
6429 list_splice(&list, &sd->poll_list);
6430 if (!list_empty(&sd->poll_list))
6431 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6432
6433 net_rps_action_and_irq_enable(sd);
6434out:
6435 __kfree_skb_flush();
6436}
6437
6438struct netdev_adjacent {
6439 struct net_device *dev;
6440
6441
6442 bool master;
6443
6444
6445 u16 ref_nr;
6446
6447
6448 void *private;
6449
6450 struct list_head list;
6451 struct rcu_head rcu;
6452};
6453
6454static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6455 struct list_head *adj_list)
6456{
6457 struct netdev_adjacent *adj;
6458
6459 list_for_each_entry(adj, adj_list, list) {
6460 if (adj->dev == adj_dev)
6461 return adj;
6462 }
6463 return NULL;
6464}
6465
6466static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6467{
6468 struct net_device *dev = data;
6469
6470 return upper_dev == dev;
6471}
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482bool netdev_has_upper_dev(struct net_device *dev,
6483 struct net_device *upper_dev)
6484{
6485 ASSERT_RTNL();
6486
6487 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6488 upper_dev);
6489}
6490EXPORT_SYMBOL(netdev_has_upper_dev);
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6503 struct net_device *upper_dev)
6504{
6505 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6506 upper_dev);
6507}
6508EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6509
6510
6511
6512
6513
6514
6515
6516
6517bool netdev_has_any_upper_dev(struct net_device *dev)
6518{
6519 ASSERT_RTNL();
6520
6521 return !list_empty(&dev->adj_list.upper);
6522}
6523EXPORT_SYMBOL(netdev_has_any_upper_dev);
6524
6525
6526
6527
6528
6529
6530
6531
6532struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6533{
6534 struct netdev_adjacent *upper;
6535
6536 ASSERT_RTNL();
6537
6538 if (list_empty(&dev->adj_list.upper))
6539 return NULL;
6540
6541 upper = list_first_entry(&dev->adj_list.upper,
6542 struct netdev_adjacent, list);
6543 if (likely(upper->master))
6544 return upper->dev;
6545 return NULL;
6546}
6547EXPORT_SYMBOL(netdev_master_upper_dev_get);
6548
6549
6550
6551
6552
6553
6554
6555
6556static bool netdev_has_any_lower_dev(struct net_device *dev)
6557{
6558 ASSERT_RTNL();
6559
6560 return !list_empty(&dev->adj_list.lower);
6561}
6562
6563void *netdev_adjacent_get_private(struct list_head *adj_list)
6564{
6565 struct netdev_adjacent *adj;
6566
6567 adj = list_entry(adj_list, struct netdev_adjacent, list);
6568
6569 return adj->private;
6570}
6571EXPORT_SYMBOL(netdev_adjacent_get_private);
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6582 struct list_head **iter)
6583{
6584 struct netdev_adjacent *upper;
6585
6586 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6587
6588 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6589
6590 if (&upper->list == &dev->adj_list.upper)
6591 return NULL;
6592
6593 *iter = &upper->list;
6594
6595 return upper->dev;
6596}
6597EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6598
6599static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6600 struct list_head **iter)
6601{
6602 struct netdev_adjacent *upper;
6603
6604 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6605
6606 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6607
6608 if (&upper->list == &dev->adj_list.upper)
6609 return NULL;
6610
6611 *iter = &upper->list;
6612
6613 return upper->dev;
6614}
6615
6616int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6617 int (*fn)(struct net_device *dev,
6618 void *data),
6619 void *data)
6620{
6621 struct net_device *udev;
6622 struct list_head *iter;
6623 int ret;
6624
6625 for (iter = &dev->adj_list.upper,
6626 udev = netdev_next_upper_dev_rcu(dev, &iter);
6627 udev;
6628 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
6629
6630 ret = fn(udev, data);
6631 if (ret)
6632 return ret;
6633
6634
6635 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
6636 if (ret)
6637 return ret;
6638 }
6639
6640 return 0;
6641}
6642EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6643
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655void *netdev_lower_get_next_private(struct net_device *dev,
6656 struct list_head **iter)
6657{
6658 struct netdev_adjacent *lower;
6659
6660 lower = list_entry(*iter, struct netdev_adjacent, list);
6661
6662 if (&lower->list == &dev->adj_list.lower)
6663 return NULL;
6664
6665 *iter = lower->list.next;
6666
6667 return lower->private;
6668}
6669EXPORT_SYMBOL(netdev_lower_get_next_private);
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680
6681void *netdev_lower_get_next_private_rcu(struct net_device *dev,
6682 struct list_head **iter)
6683{
6684 struct netdev_adjacent *lower;
6685
6686 WARN_ON_ONCE(!rcu_read_lock_held());
6687
6688 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6689
6690 if (&lower->list == &dev->adj_list.lower)
6691 return NULL;
6692
6693 *iter = &lower->list;
6694
6695 return lower->private;
6696}
6697EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
6698
6699
6700
6701
6702
6703
6704
6705
6706
6707
6708
6709
6710void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
6711{
6712 struct netdev_adjacent *lower;
6713
6714 lower = list_entry(*iter, struct netdev_adjacent, list);
6715
6716 if (&lower->list == &dev->adj_list.lower)
6717 return NULL;
6718
6719 *iter = lower->list.next;
6720
6721 return lower->dev;
6722}
6723EXPORT_SYMBOL(netdev_lower_get_next);
6724
6725static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6726 struct list_head **iter)
6727{
6728 struct netdev_adjacent *lower;
6729
6730 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
6731
6732 if (&lower->list == &dev->adj_list.lower)
6733 return NULL;
6734
6735 *iter = &lower->list;
6736
6737 return lower->dev;
6738}
6739
6740int netdev_walk_all_lower_dev(struct net_device *dev,
6741 int (*fn)(struct net_device *dev,
6742 void *data),
6743 void *data)
6744{
6745 struct net_device *ldev;
6746 struct list_head *iter;
6747 int ret;
6748
6749 for (iter = &dev->adj_list.lower,
6750 ldev = netdev_next_lower_dev(dev, &iter);
6751 ldev;
6752 ldev = netdev_next_lower_dev(dev, &iter)) {
6753
6754 ret = fn(ldev, data);
6755 if (ret)
6756 return ret;
6757
6758
6759 ret = netdev_walk_all_lower_dev(ldev, fn, data);
6760 if (ret)
6761 return ret;
6762 }
6763
6764 return 0;
6765}
6766EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6767
6768static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6769 struct list_head **iter)
6770{
6771 struct netdev_adjacent *lower;
6772
6773 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6774 if (&lower->list == &dev->adj_list.lower)
6775 return NULL;
6776
6777 *iter = &lower->list;
6778
6779 return lower->dev;
6780}
6781
6782int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6783 int (*fn)(struct net_device *dev,
6784 void *data),
6785 void *data)
6786{
6787 struct net_device *ldev;
6788 struct list_head *iter;
6789 int ret;
6790
6791 for (iter = &dev->adj_list.lower,
6792 ldev = netdev_next_lower_dev_rcu(dev, &iter);
6793 ldev;
6794 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6795
6796 ret = fn(ldev, data);
6797 if (ret)
6798 return ret;
6799
6800
6801 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6802 if (ret)
6803 return ret;
6804 }
6805
6806 return 0;
6807}
6808EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6809
6810
6811
6812
6813
6814
6815
6816
6817
6818
6819void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6820{
6821 struct netdev_adjacent *lower;
6822
6823 lower = list_first_or_null_rcu(&dev->adj_list.lower,
6824 struct netdev_adjacent, list);
6825 if (lower)
6826 return lower->private;
6827 return NULL;
6828}
6829EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6830
6831
6832
6833
6834
6835
6836
6837
6838struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6839{
6840 struct netdev_adjacent *upper;
6841
6842 upper = list_first_or_null_rcu(&dev->adj_list.upper,
6843 struct netdev_adjacent, list);
6844 if (upper && likely(upper->master))
6845 return upper->dev;
6846 return NULL;
6847}
6848EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6849
6850static int netdev_adjacent_sysfs_add(struct net_device *dev,
6851 struct net_device *adj_dev,
6852 struct list_head *dev_list)
6853{
6854 char linkname[IFNAMSIZ+7];
6855
6856 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6857 "upper_%s" : "lower_%s", adj_dev->name);
6858 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6859 linkname);
6860}
6861static void netdev_adjacent_sysfs_del(struct net_device *dev,
6862 char *name,
6863 struct list_head *dev_list)
6864{
6865 char linkname[IFNAMSIZ+7];
6866
6867 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6868 "upper_%s" : "lower_%s", name);
6869 sysfs_remove_link(&(dev->dev.kobj), linkname);
6870}
6871
6872static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6873 struct net_device *adj_dev,
6874 struct list_head *dev_list)
6875{
6876 return (dev_list == &dev->adj_list.upper ||
6877 dev_list == &dev->adj_list.lower) &&
6878 net_eq(dev_net(dev), dev_net(adj_dev));
6879}
6880
6881static int __netdev_adjacent_dev_insert(struct net_device *dev,
6882 struct net_device *adj_dev,
6883 struct list_head *dev_list,
6884 void *private, bool master)
6885{
6886 struct netdev_adjacent *adj;
6887 int ret;
6888
6889 adj = __netdev_find_adj(adj_dev, dev_list);
6890
6891 if (adj) {
6892 adj->ref_nr += 1;
6893 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6894 dev->name, adj_dev->name, adj->ref_nr);
6895
6896 return 0;
6897 }
6898
6899 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6900 if (!adj)
6901 return -ENOMEM;
6902
6903 adj->dev = adj_dev;
6904 adj->master = master;
6905 adj->ref_nr = 1;
6906 adj->private = private;
6907 dev_hold(adj_dev);
6908
6909 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6910 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
6911
6912 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
6913 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
6914 if (ret)
6915 goto free_adj;
6916 }
6917
6918
6919 if (master) {
6920 ret = sysfs_create_link(&(dev->dev.kobj),
6921 &(adj_dev->dev.kobj), "master");
6922 if (ret)
6923 goto remove_symlinks;
6924
6925 list_add_rcu(&adj->list, dev_list);
6926 } else {
6927 list_add_tail_rcu(&adj->list, dev_list);
6928 }
6929
6930 return 0;
6931
6932remove_symlinks:
6933 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6934 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6935free_adj:
6936 kfree(adj);
6937 dev_put(adj_dev);
6938
6939 return ret;
6940}
6941
6942static void __netdev_adjacent_dev_remove(struct net_device *dev,
6943 struct net_device *adj_dev,
6944 u16 ref_nr,
6945 struct list_head *dev_list)
6946{
6947 struct netdev_adjacent *adj;
6948
6949 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6950 dev->name, adj_dev->name, ref_nr);
6951
6952 adj = __netdev_find_adj(adj_dev, dev_list);
6953
6954 if (!adj) {
6955 pr_err("Adjacency does not exist for device %s from %s\n",
6956 dev->name, adj_dev->name);
6957 WARN_ON(1);
6958 return;
6959 }
6960
6961 if (adj->ref_nr > ref_nr) {
6962 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6963 dev->name, adj_dev->name, ref_nr,
6964 adj->ref_nr - ref_nr);
6965 adj->ref_nr -= ref_nr;
6966 return;
6967 }
6968
6969 if (adj->master)
6970 sysfs_remove_link(&(dev->dev.kobj), "master");
6971
6972 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6973 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6974
6975 list_del_rcu(&adj->list);
6976 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6977 adj_dev->name, dev->name, adj_dev->name);
6978 dev_put(adj_dev);
6979 kfree_rcu(adj, rcu);
6980}
6981
6982static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6983 struct net_device *upper_dev,
6984 struct list_head *up_list,
6985 struct list_head *down_list,
6986 void *private, bool master)
6987{
6988 int ret;
6989
6990 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
6991 private, master);
6992 if (ret)
6993 return ret;
6994
6995 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
6996 private, false);
6997 if (ret) {
6998 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
6999 return ret;
7000 }
7001
7002 return 0;
7003}
7004
7005static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7006 struct net_device *upper_dev,
7007 u16 ref_nr,
7008 struct list_head *up_list,
7009 struct list_head *down_list)
7010{
7011 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7012 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7013}
7014
7015static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7016 struct net_device *upper_dev,
7017 void *private, bool master)
7018{
7019 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7020 &dev->adj_list.upper,
7021 &upper_dev->adj_list.lower,
7022 private, master);
7023}
7024
7025static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7026 struct net_device *upper_dev)
7027{
7028 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7029 &dev->adj_list.upper,
7030 &upper_dev->adj_list.lower);
7031}
7032
7033static int __netdev_upper_dev_link(struct net_device *dev,
7034 struct net_device *upper_dev, bool master,
7035 void *upper_priv, void *upper_info,
7036 struct netlink_ext_ack *extack)
7037{
7038 struct netdev_notifier_changeupper_info changeupper_info = {
7039 .info = {
7040 .dev = dev,
7041 .extack = extack,
7042 },
7043 .upper_dev = upper_dev,
7044 .master = master,
7045 .linking = true,
7046 .upper_info = upper_info,
7047 };
7048 struct net_device *master_dev;
7049 int ret = 0;
7050
7051 ASSERT_RTNL();
7052
7053 if (dev == upper_dev)
7054 return -EBUSY;
7055
7056
7057 if (netdev_has_upper_dev(upper_dev, dev))
7058 return -EBUSY;
7059
7060 if (!master) {
7061 if (netdev_has_upper_dev(dev, upper_dev))
7062 return -EEXIST;
7063 } else {
7064 master_dev = netdev_master_upper_dev_get(dev);
7065 if (master_dev)
7066 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7067 }
7068
7069 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7070 &changeupper_info.info);
7071 ret = notifier_to_errno(ret);
7072 if (ret)
7073 return ret;
7074
7075 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7076 master);
7077 if (ret)
7078 return ret;
7079
7080 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7081 &changeupper_info.info);
7082 ret = notifier_to_errno(ret);
7083 if (ret)
7084 goto rollback;
7085
7086 return 0;
7087
7088rollback:
7089 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7090
7091 return ret;
7092}
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104
7105int netdev_upper_dev_link(struct net_device *dev,
7106 struct net_device *upper_dev,
7107 struct netlink_ext_ack *extack)
7108{
7109 return __netdev_upper_dev_link(dev, upper_dev, false,
7110 NULL, NULL, extack);
7111}
7112EXPORT_SYMBOL(netdev_upper_dev_link);
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128int netdev_master_upper_dev_link(struct net_device *dev,
7129 struct net_device *upper_dev,
7130 void *upper_priv, void *upper_info,
7131 struct netlink_ext_ack *extack)
7132{
7133 return __netdev_upper_dev_link(dev, upper_dev, true,
7134 upper_priv, upper_info, extack);
7135}
7136EXPORT_SYMBOL(netdev_master_upper_dev_link);
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146void netdev_upper_dev_unlink(struct net_device *dev,
7147 struct net_device *upper_dev)
7148{
7149 struct netdev_notifier_changeupper_info changeupper_info = {
7150 .info = {
7151 .dev = dev,
7152 },
7153 .upper_dev = upper_dev,
7154 .linking = false,
7155 };
7156
7157 ASSERT_RTNL();
7158
7159 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7160
7161 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7162 &changeupper_info.info);
7163
7164 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7165
7166 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7167 &changeupper_info.info);
7168}
7169EXPORT_SYMBOL(netdev_upper_dev_unlink);
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179void netdev_bonding_info_change(struct net_device *dev,
7180 struct netdev_bonding_info *bonding_info)
7181{
7182 struct netdev_notifier_bonding_info info = {
7183 .info.dev = dev,
7184 };
7185
7186 memcpy(&info.bonding_info, bonding_info,
7187 sizeof(struct netdev_bonding_info));
7188 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7189 &info.info);
7190}
7191EXPORT_SYMBOL(netdev_bonding_info_change);
7192
7193static void netdev_adjacent_add_links(struct net_device *dev)
7194{
7195 struct netdev_adjacent *iter;
7196
7197 struct net *net = dev_net(dev);
7198
7199 list_for_each_entry(iter, &dev->adj_list.upper, list) {
7200 if (!net_eq(net, dev_net(iter->dev)))
7201 continue;
7202 netdev_adjacent_sysfs_add(iter->dev, dev,
7203 &iter->dev->adj_list.lower);
7204 netdev_adjacent_sysfs_add(dev, iter->dev,
7205 &dev->adj_list.upper);
7206 }
7207
7208 list_for_each_entry(iter, &dev->adj_list.lower, list) {
7209 if (!net_eq(net, dev_net(iter->dev)))
7210 continue;
7211 netdev_adjacent_sysfs_add(iter->dev, dev,
7212 &iter->dev->adj_list.upper);
7213 netdev_adjacent_sysfs_add(dev, iter->dev,
7214 &dev->adj_list.lower);
7215 }
7216}
7217
7218static void netdev_adjacent_del_links(struct net_device *dev)
7219{
7220 struct netdev_adjacent *iter;
7221
7222 struct net *net = dev_net(dev);
7223
7224 list_for_each_entry(iter, &dev->adj_list.upper, list) {
7225 if (!net_eq(net, dev_net(iter->dev)))
7226 continue;
7227 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7228 &iter->dev->adj_list.lower);
7229 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7230 &dev->adj_list.upper);
7231 }
7232
7233 list_for_each_entry(iter, &dev->adj_list.lower, list) {
7234 if (!net_eq(net, dev_net(iter->dev)))
7235 continue;
7236 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7237 &iter->dev->adj_list.upper);
7238 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7239 &dev->adj_list.lower);
7240 }
7241}
7242
7243void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
7244{
7245 struct netdev_adjacent *iter;
7246
7247 struct net *net = dev_net(dev);
7248
7249 list_for_each_entry(iter, &dev->adj_list.upper, list) {
7250 if (!net_eq(net, dev_net(iter->dev)))
7251 continue;
7252 netdev_adjacent_sysfs_del(iter->dev, oldname,
7253 &iter->dev->adj_list.lower);
7254 netdev_adjacent_sysfs_add(iter->dev, dev,
7255 &iter->dev->adj_list.lower);
7256 }
7257
7258 list_for_each_entry(iter, &dev->adj_list.lower, list) {
7259 if (!net_eq(net, dev_net(iter->dev)))
7260 continue;
7261 netdev_adjacent_sysfs_del(iter->dev, oldname,
7262 &iter->dev->adj_list.upper);
7263 netdev_adjacent_sysfs_add(iter->dev, dev,
7264 &iter->dev->adj_list.upper);
7265 }
7266}
7267
7268void *netdev_lower_dev_get_private(struct net_device *dev,
7269 struct net_device *lower_dev)
7270{
7271 struct netdev_adjacent *lower;
7272
7273 if (!lower_dev)
7274 return NULL;
7275 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
7276 if (!lower)
7277 return NULL;
7278
7279 return lower->private;
7280}
7281EXPORT_SYMBOL(netdev_lower_dev_get_private);
7282
7283
7284int dev_get_nest_level(struct net_device *dev)
7285{
7286 struct net_device *lower = NULL;
7287 struct list_head *iter;
7288 int max_nest = -1;
7289 int nest;
7290
7291 ASSERT_RTNL();
7292
7293 netdev_for_each_lower_dev(dev, lower, iter) {
7294 nest = dev_get_nest_level(lower);
7295 if (max_nest < nest)
7296 max_nest = nest;
7297 }
7298
7299 return max_nest + 1;
7300}
7301EXPORT_SYMBOL(dev_get_nest_level);
7302
7303
7304
7305
7306
7307
7308
7309
7310
7311void netdev_lower_state_changed(struct net_device *lower_dev,
7312 void *lower_state_info)
7313{
7314 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
7315 .info.dev = lower_dev,
7316 };
7317
7318 ASSERT_RTNL();
7319 changelowerstate_info.lower_state_info = lower_state_info;
7320 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
7321 &changelowerstate_info.info);
7322}
7323EXPORT_SYMBOL(netdev_lower_state_changed);
7324
7325static void dev_change_rx_flags(struct net_device *dev, int flags)
7326{
7327 const struct net_device_ops *ops = dev->netdev_ops;
7328
7329 if (ops->ndo_change_rx_flags)
7330 ops->ndo_change_rx_flags(dev, flags);
7331}
7332
7333static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
7334{
7335 unsigned int old_flags = dev->flags;
7336 kuid_t uid;
7337 kgid_t gid;
7338
7339 ASSERT_RTNL();
7340
7341 dev->flags |= IFF_PROMISC;
7342 dev->promiscuity += inc;
7343 if (dev->promiscuity == 0) {
7344
7345
7346
7347
7348 if (inc < 0)
7349 dev->flags &= ~IFF_PROMISC;
7350 else {
7351 dev->promiscuity -= inc;
7352 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7353 dev->name);
7354 return -EOVERFLOW;
7355 }
7356 }
7357 if (dev->flags != old_flags) {
7358 pr_info("device %s %s promiscuous mode\n",
7359 dev->name,
7360 dev->flags & IFF_PROMISC ? "entered" : "left");
7361 if (audit_enabled) {
7362 current_uid_gid(&uid, &gid);
7363 audit_log(audit_context(), GFP_ATOMIC,
7364 AUDIT_ANOM_PROMISCUOUS,
7365 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7366 dev->name, (dev->flags & IFF_PROMISC),
7367 (old_flags & IFF_PROMISC),
7368 from_kuid(&init_user_ns, audit_get_loginuid(current)),
7369 from_kuid(&init_user_ns, uid),
7370 from_kgid(&init_user_ns, gid),
7371 audit_get_sessionid(current));
7372 }
7373
7374 dev_change_rx_flags(dev, IFF_PROMISC);
7375 }
7376 if (notify)
7377 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
7378 return 0;
7379}
7380
7381
7382
7383
7384
7385
7386
7387
7388
7389
7390
7391
7392int dev_set_promiscuity(struct net_device *dev, int inc)
7393{
7394 unsigned int old_flags = dev->flags;
7395 int err;
7396
7397 err = __dev_set_promiscuity(dev, inc, true);
7398 if (err < 0)
7399 return err;
7400 if (dev->flags != old_flags)
7401 dev_set_rx_mode(dev);
7402 return err;
7403}
7404EXPORT_SYMBOL(dev_set_promiscuity);
7405
7406static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
7407{
7408 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
7409
7410 ASSERT_RTNL();
7411
7412 dev->flags |= IFF_ALLMULTI;
7413 dev->allmulti += inc;
7414 if (dev->allmulti == 0) {
7415
7416
7417
7418
7419 if (inc < 0)
7420 dev->flags &= ~IFF_ALLMULTI;
7421 else {
7422 dev->allmulti -= inc;
7423 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
7424 dev->name);
7425 return -EOVERFLOW;
7426 }
7427 }
7428 if (dev->flags ^ old_flags) {
7429 dev_change_rx_flags(dev, IFF_ALLMULTI);
7430 dev_set_rx_mode(dev);
7431 if (notify)
7432 __dev_notify_flags(dev, old_flags,
7433 dev->gflags ^ old_gflags);
7434 }
7435 return 0;
7436}
7437
7438
7439
7440
7441
7442
7443
7444
7445
7446
7447
7448
7449
7450
7451int dev_set_allmulti(struct net_device *dev, int inc)
7452{
7453 return __dev_set_allmulti(dev, inc, true);
7454}
7455EXPORT_SYMBOL(dev_set_allmulti);
7456
7457
7458
7459
7460
7461
7462
7463void __dev_set_rx_mode(struct net_device *dev)
7464{
7465 const struct net_device_ops *ops = dev->netdev_ops;
7466
7467
7468 if (!(dev->flags&IFF_UP))
7469 return;
7470
7471 if (!netif_device_present(dev))
7472 return;
7473
7474 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
7475
7476
7477
7478 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
7479 __dev_set_promiscuity(dev, 1, false);
7480 dev->uc_promisc = true;
7481 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
7482 __dev_set_promiscuity(dev, -1, false);
7483 dev->uc_promisc = false;
7484 }
7485 }
7486
7487 if (ops->ndo_set_rx_mode)
7488 ops->ndo_set_rx_mode(dev);
7489}
7490
7491void dev_set_rx_mode(struct net_device *dev)
7492{
7493 netif_addr_lock_bh(dev);
7494 __dev_set_rx_mode(dev);
7495 netif_addr_unlock_bh(dev);
7496}
7497
7498
7499
7500
7501
7502
7503
7504unsigned int dev_get_flags(const struct net_device *dev)
7505{
7506 unsigned int flags;
7507
7508 flags = (dev->flags & ~(IFF_PROMISC |
7509 IFF_ALLMULTI |
7510 IFF_RUNNING |
7511 IFF_LOWER_UP |
7512 IFF_DORMANT)) |
7513 (dev->gflags & (IFF_PROMISC |
7514 IFF_ALLMULTI));
7515
7516 if (netif_running(dev)) {
7517 if (netif_oper_up(dev))
7518 flags |= IFF_RUNNING;
7519 if (netif_carrier_ok(dev))
7520 flags |= IFF_LOWER_UP;
7521 if (netif_dormant(dev))
7522 flags |= IFF_DORMANT;
7523 }
7524
7525 return flags;
7526}
7527EXPORT_SYMBOL(dev_get_flags);
7528
7529int __dev_change_flags(struct net_device *dev, unsigned int flags,
7530 struct netlink_ext_ack *extack)
7531{
7532 unsigned int old_flags = dev->flags;
7533 int ret;
7534
7535 ASSERT_RTNL();
7536
7537
7538
7539
7540
7541 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
7542 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
7543 IFF_AUTOMEDIA)) |
7544 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
7545 IFF_ALLMULTI));
7546
7547
7548
7549
7550
7551 if ((old_flags ^ flags) & IFF_MULTICAST)
7552 dev_change_rx_flags(dev, IFF_MULTICAST);
7553
7554 dev_set_rx_mode(dev);
7555
7556
7557
7558
7559
7560
7561
7562 ret = 0;
7563 if ((old_flags ^ flags) & IFF_UP) {
7564 if (old_flags & IFF_UP)
7565 __dev_close(dev);
7566 else
7567 ret = __dev_open(dev, extack);
7568 }
7569
7570 if ((flags ^ dev->gflags) & IFF_PROMISC) {
7571 int inc = (flags & IFF_PROMISC) ? 1 : -1;
7572 unsigned int old_flags = dev->flags;
7573
7574 dev->gflags ^= IFF_PROMISC;
7575
7576 if (__dev_set_promiscuity(dev, inc, false) >= 0)
7577 if (dev->flags != old_flags)
7578 dev_set_rx_mode(dev);
7579 }
7580
7581
7582
7583
7584
7585 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
7586 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
7587
7588 dev->gflags ^= IFF_ALLMULTI;
7589 __dev_set_allmulti(dev, inc, false);
7590 }
7591
7592 return ret;
7593}
7594
7595void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
7596 unsigned int gchanges)
7597{
7598 unsigned int changes = dev->flags ^ old_flags;
7599
7600 if (gchanges)
7601 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
7602
7603 if (changes & IFF_UP) {
7604 if (dev->flags & IFF_UP)
7605 call_netdevice_notifiers(NETDEV_UP, dev);
7606 else
7607 call_netdevice_notifiers(NETDEV_DOWN, dev);
7608 }
7609
7610 if (dev->flags & IFF_UP &&
7611 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
7612 struct netdev_notifier_change_info change_info = {
7613 .info = {
7614 .dev = dev,
7615 },
7616 .flags_changed = changes,
7617 };
7618
7619 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
7620 }
7621}
7622
7623
7624
7625
7626
7627
7628
7629
7630
7631
7632int dev_change_flags(struct net_device *dev, unsigned int flags,
7633 struct netlink_ext_ack *extack)
7634{
7635 int ret;
7636 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
7637
7638 ret = __dev_change_flags(dev, flags, extack);
7639 if (ret < 0)
7640 return ret;
7641
7642 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
7643 __dev_notify_flags(dev, old_flags, changes);
7644 return ret;
7645}
7646EXPORT_SYMBOL(dev_change_flags);
7647
7648int __dev_set_mtu(struct net_device *dev, int new_mtu)
7649{
7650 const struct net_device_ops *ops = dev->netdev_ops;
7651
7652 if (ops->ndo_change_mtu)
7653 return ops->ndo_change_mtu(dev, new_mtu);
7654
7655 dev->mtu = new_mtu;
7656 return 0;
7657}
7658EXPORT_SYMBOL(__dev_set_mtu);
7659
7660
7661
7662
7663
7664
7665
7666
7667
7668int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
7669 struct netlink_ext_ack *extack)
7670{
7671 int err, orig_mtu;
7672
7673 if (new_mtu == dev->mtu)
7674 return 0;
7675
7676
7677 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
7678 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
7679 return -EINVAL;
7680 }
7681
7682 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
7683 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
7684 return -EINVAL;
7685 }
7686
7687 if (!netif_device_present(dev))
7688 return -ENODEV;
7689
7690 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
7691 err = notifier_to_errno(err);
7692 if (err)
7693 return err;
7694
7695 orig_mtu = dev->mtu;
7696 err = __dev_set_mtu(dev, new_mtu);
7697
7698 if (!err) {
7699 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
7700 orig_mtu);
7701 err = notifier_to_errno(err);
7702 if (err) {
7703
7704
7705
7706 __dev_set_mtu(dev, orig_mtu);
7707 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
7708 new_mtu);
7709 }
7710 }
7711 return err;
7712}
7713
7714int dev_set_mtu(struct net_device *dev, int new_mtu)
7715{
7716 struct netlink_ext_ack extack;
7717 int err;
7718
7719 memset(&extack, 0, sizeof(extack));
7720 err = dev_set_mtu_ext(dev, new_mtu, &extack);
7721 if (err && extack._msg)
7722 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
7723 return err;
7724}
7725EXPORT_SYMBOL(dev_set_mtu);
7726
7727
7728
7729
7730
7731
7732int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
7733{
7734 unsigned int orig_len = dev->tx_queue_len;
7735 int res;
7736
7737 if (new_len != (unsigned int)new_len)
7738 return -ERANGE;
7739
7740 if (new_len != orig_len) {
7741 dev->tx_queue_len = new_len;
7742 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
7743 res = notifier_to_errno(res);
7744 if (res)
7745 goto err_rollback;
7746 res = dev_qdisc_change_tx_queue_len(dev);
7747 if (res)
7748 goto err_rollback;
7749 }
7750
7751 return 0;
7752
7753err_rollback:
7754 netdev_err(dev, "refused to change device tx_queue_len\n");
7755 dev->tx_queue_len = orig_len;
7756 return res;
7757}
7758
7759
7760
7761
7762
7763
7764void dev_set_group(struct net_device *dev, int new_group)
7765{
7766 dev->group = new_group;
7767}
7768EXPORT_SYMBOL(dev_set_group);
7769
7770
7771
7772
7773
7774
7775
7776int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
7777 struct netlink_ext_ack *extack)
7778{
7779 struct netdev_notifier_pre_changeaddr_info info = {
7780 .info.dev = dev,
7781 .info.extack = extack,
7782 .dev_addr = addr,
7783 };
7784 int rc;
7785
7786 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
7787 return notifier_to_errno(rc);
7788}
7789EXPORT_SYMBOL(dev_pre_changeaddr_notify);
7790
7791
7792
7793
7794
7795
7796
7797
7798
7799int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
7800 struct netlink_ext_ack *extack)
7801{
7802 const struct net_device_ops *ops = dev->netdev_ops;
7803 int err;
7804
7805 if (!ops->ndo_set_mac_address)
7806 return -EOPNOTSUPP;
7807 if (sa->sa_family != dev->type)
7808 return -EINVAL;
7809 if (!netif_device_present(dev))
7810 return -ENODEV;
7811 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
7812 if (err)
7813 return err;
7814 err = ops->ndo_set_mac_address(dev, sa);
7815 if (err)
7816 return err;
7817 dev->addr_assign_type = NET_ADDR_SET;
7818 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7819 add_device_randomness(dev->dev_addr, dev->addr_len);
7820 return 0;
7821}
7822EXPORT_SYMBOL(dev_set_mac_address);
7823
7824
7825
7826
7827
7828
7829
7830
7831int dev_change_carrier(struct net_device *dev, bool new_carrier)
7832{
7833 const struct net_device_ops *ops = dev->netdev_ops;
7834
7835 if (!ops->ndo_change_carrier)
7836 return -EOPNOTSUPP;
7837 if (!netif_device_present(dev))
7838 return -ENODEV;
7839 return ops->ndo_change_carrier(dev, new_carrier);
7840}
7841EXPORT_SYMBOL(dev_change_carrier);
7842
7843
7844
7845
7846
7847
7848
7849
7850int dev_get_phys_port_id(struct net_device *dev,
7851 struct netdev_phys_item_id *ppid)
7852{
7853 const struct net_device_ops *ops = dev->netdev_ops;
7854
7855 if (!ops->ndo_get_phys_port_id)
7856 return -EOPNOTSUPP;
7857 return ops->ndo_get_phys_port_id(dev, ppid);
7858}
7859EXPORT_SYMBOL(dev_get_phys_port_id);
7860
7861
7862
7863
7864
7865
7866
7867
7868
7869int dev_get_phys_port_name(struct net_device *dev,
7870 char *name, size_t len)
7871{
7872 const struct net_device_ops *ops = dev->netdev_ops;
7873
7874 if (!ops->ndo_get_phys_port_name)
7875 return -EOPNOTSUPP;
7876 return ops->ndo_get_phys_port_name(dev, name, len);
7877}
7878EXPORT_SYMBOL(dev_get_phys_port_name);
7879
7880
7881
7882
7883
7884
7885
7886
7887
7888int dev_change_proto_down(struct net_device *dev, bool proto_down)
7889{
7890 const struct net_device_ops *ops = dev->netdev_ops;
7891
7892 if (!ops->ndo_change_proto_down)
7893 return -EOPNOTSUPP;
7894 if (!netif_device_present(dev))
7895 return -ENODEV;
7896 return ops->ndo_change_proto_down(dev, proto_down);
7897}
7898EXPORT_SYMBOL(dev_change_proto_down);
7899
7900u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
7901 enum bpf_netdev_command cmd)
7902{
7903 struct netdev_bpf xdp;
7904
7905 if (!bpf_op)
7906 return 0;
7907
7908 memset(&xdp, 0, sizeof(xdp));
7909 xdp.command = cmd;
7910
7911
7912 WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
7913
7914 return xdp.prog_id;
7915}
7916
7917static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
7918 struct netlink_ext_ack *extack, u32 flags,
7919 struct bpf_prog *prog)
7920{
7921 struct netdev_bpf xdp;
7922
7923 memset(&xdp, 0, sizeof(xdp));
7924 if (flags & XDP_FLAGS_HW_MODE)
7925 xdp.command = XDP_SETUP_PROG_HW;
7926 else
7927 xdp.command = XDP_SETUP_PROG;
7928 xdp.extack = extack;
7929 xdp.flags = flags;
7930 xdp.prog = prog;
7931
7932 return bpf_op(dev, &xdp);
7933}
7934
7935static void dev_xdp_uninstall(struct net_device *dev)
7936{
7937 struct netdev_bpf xdp;
7938 bpf_op_t ndo_bpf;
7939
7940
7941 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
7942
7943
7944 ndo_bpf = dev->netdev_ops->ndo_bpf;
7945 if (!ndo_bpf)
7946 return;
7947
7948 memset(&xdp, 0, sizeof(xdp));
7949 xdp.command = XDP_QUERY_PROG;
7950 WARN_ON(ndo_bpf(dev, &xdp));
7951 if (xdp.prog_id)
7952 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
7953 NULL));
7954
7955
7956 memset(&xdp, 0, sizeof(xdp));
7957 xdp.command = XDP_QUERY_PROG_HW;
7958 if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
7959 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
7960 NULL));
7961}
7962
7963
7964
7965
7966
7967
7968
7969
7970
7971
7972int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7973 int fd, u32 flags)
7974{
7975 const struct net_device_ops *ops = dev->netdev_ops;
7976 enum bpf_netdev_command query;
7977 struct bpf_prog *prog = NULL;
7978 bpf_op_t bpf_op, bpf_chk;
7979 int err;
7980
7981 ASSERT_RTNL();
7982
7983 query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
7984
7985 bpf_op = bpf_chk = ops->ndo_bpf;
7986 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
7987 return -EOPNOTSUPP;
7988 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
7989 bpf_op = generic_xdp_install;
7990 if (bpf_op == bpf_chk)
7991 bpf_chk = generic_xdp_install;
7992
7993 if (fd >= 0) {
7994 if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) ||
7995 __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW))
7996 return -EEXIST;
7997 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
7998 __dev_xdp_query(dev, bpf_op, query))
7999 return -EBUSY;
8000
8001 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
8002 bpf_op == ops->ndo_bpf);
8003 if (IS_ERR(prog))
8004 return PTR_ERR(prog);
8005
8006 if (!(flags & XDP_FLAGS_HW_MODE) &&
8007 bpf_prog_is_dev_bound(prog->aux)) {
8008 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
8009 bpf_prog_put(prog);
8010 return -EINVAL;
8011 }
8012 }
8013
8014 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
8015 if (err < 0 && prog)
8016 bpf_prog_put(prog);
8017
8018 return err;
8019}
8020
8021
8022
8023
8024
8025
8026
8027
8028
8029static int dev_new_index(struct net *net)
8030{
8031 int ifindex = net->ifindex;
8032
8033 for (;;) {
8034 if (++ifindex <= 0)
8035 ifindex = 1;
8036 if (!__dev_get_by_index(net, ifindex))
8037 return net->ifindex = ifindex;
8038 }
8039}
8040
8041
8042static LIST_HEAD(net_todo_list);
8043DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
8044
8045static void net_set_todo(struct net_device *dev)
8046{
8047 list_add_tail(&dev->todo_list, &net_todo_list);
8048 dev_net(dev)->dev_unreg_count++;
8049}
8050
8051static void rollback_registered_many(struct list_head *head)
8052{
8053 struct net_device *dev, *tmp;
8054 LIST_HEAD(close_head);
8055
8056 BUG_ON(dev_boot_phase);
8057 ASSERT_RTNL();
8058
8059 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
8060
8061
8062
8063
8064 if (dev->reg_state == NETREG_UNINITIALIZED) {
8065 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
8066 dev->name, dev);
8067
8068 WARN_ON(1);
8069 list_del(&dev->unreg_list);
8070 continue;
8071 }
8072 dev->dismantle = true;
8073 BUG_ON(dev->reg_state != NETREG_REGISTERED);
8074 }
8075
8076
8077 list_for_each_entry(dev, head, unreg_list)
8078 list_add_tail(&dev->close_list, &close_head);
8079 dev_close_many(&close_head, true);
8080
8081 list_for_each_entry(dev, head, unreg_list) {
8082
8083 unlist_netdevice(dev);
8084
8085 dev->reg_state = NETREG_UNREGISTERING;
8086 }
8087 flush_all_backlogs();
8088
8089 synchronize_net();
8090
8091 list_for_each_entry(dev, head, unreg_list) {
8092 struct sk_buff *skb = NULL;
8093
8094
8095 dev_shutdown(dev);
8096
8097 dev_xdp_uninstall(dev);
8098
8099
8100
8101
8102 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8103
8104 if (!dev->rtnl_link_ops ||
8105 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
8106 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
8107 GFP_KERNEL, NULL, 0);
8108
8109
8110
8111
8112 dev_uc_flush(dev);
8113 dev_mc_flush(dev);
8114
8115 if (dev->netdev_ops->ndo_uninit)
8116 dev->netdev_ops->ndo_uninit(dev);
8117
8118 if (skb)
8119 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
8120
8121
8122 WARN_ON(netdev_has_any_upper_dev(dev));
8123 WARN_ON(netdev_has_any_lower_dev(dev));
8124
8125
8126 netdev_unregister_kobject(dev);
8127#ifdef CONFIG_XPS
8128
8129 netif_reset_xps_queues_gt(dev, 0);
8130#endif
8131 }
8132
8133 synchronize_net();
8134
8135 list_for_each_entry(dev, head, unreg_list)
8136 dev_put(dev);
8137}
8138
8139static void rollback_registered(struct net_device *dev)
8140{
8141 LIST_HEAD(single);
8142
8143 list_add(&dev->unreg_list, &single);
8144 rollback_registered_many(&single);
8145 list_del(&single);
8146}
8147
8148static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
8149 struct net_device *upper, netdev_features_t features)
8150{
8151 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
8152 netdev_features_t feature;
8153 int feature_bit;
8154
8155 for_each_netdev_feature(upper_disables, feature_bit) {
8156 feature = __NETIF_F_BIT(feature_bit);
8157 if (!(upper->wanted_features & feature)
8158 && (features & feature)) {
8159 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
8160 &feature, upper->name);
8161 features &= ~feature;
8162 }
8163 }
8164
8165 return features;
8166}
8167
8168static void netdev_sync_lower_features(struct net_device *upper,
8169 struct net_device *lower, netdev_features_t features)
8170{
8171 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
8172 netdev_features_t feature;
8173 int feature_bit;
8174
8175 for_each_netdev_feature(upper_disables, feature_bit) {
8176 feature = __NETIF_F_BIT(feature_bit);
8177 if (!(features & feature) && (lower->features & feature)) {
8178 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
8179 &feature, lower->name);
8180 lower->wanted_features &= ~feature;
8181 netdev_update_features(lower);
8182
8183 if (unlikely(lower->features & feature))
8184 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
8185 &feature, lower->name);
8186 }
8187 }
8188}
8189
8190static netdev_features_t netdev_fix_features(struct net_device *dev,
8191 netdev_features_t features)
8192{
8193
8194 if ((features & NETIF_F_HW_CSUM) &&
8195 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
8196 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
8197 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
8198 }
8199
8200
8201 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
8202 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
8203 features &= ~NETIF_F_ALL_TSO;
8204 }
8205
8206 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
8207 !(features & NETIF_F_IP_CSUM)) {
8208 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
8209 features &= ~NETIF_F_TSO;
8210 features &= ~NETIF_F_TSO_ECN;
8211 }
8212
8213 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
8214 !(features & NETIF_F_IPV6_CSUM)) {
8215 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
8216 features &= ~NETIF_F_TSO6;
8217 }
8218
8219
8220 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
8221 features &= ~NETIF_F_TSO_MANGLEID;
8222
8223
8224 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
8225 features &= ~NETIF_F_TSO_ECN;
8226
8227
8228 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
8229 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
8230 features &= ~NETIF_F_GSO;
8231 }
8232
8233
8234 if ((features & dev->gso_partial_features) &&
8235 !(features & NETIF_F_GSO_PARTIAL)) {
8236 netdev_dbg(dev,
8237 "Dropping partially supported GSO features since no GSO partial.\n");
8238 features &= ~dev->gso_partial_features;
8239 }
8240
8241 if (!(features & NETIF_F_RXCSUM)) {
8242
8243
8244
8245
8246
8247 if (features & NETIF_F_GRO_HW) {
8248 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
8249 features &= ~NETIF_F_GRO_HW;
8250 }
8251 }
8252
8253
8254 if (features & NETIF_F_RXFCS) {
8255 if (features & NETIF_F_LRO) {
8256 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
8257 features &= ~NETIF_F_LRO;
8258 }
8259
8260 if (features & NETIF_F_GRO_HW) {
8261 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
8262 features &= ~NETIF_F_GRO_HW;
8263 }
8264 }
8265
8266 return features;
8267}
8268
8269int __netdev_update_features(struct net_device *dev)
8270{
8271 struct net_device *upper, *lower;
8272 netdev_features_t features;
8273 struct list_head *iter;
8274 int err = -1;
8275
8276 ASSERT_RTNL();
8277
8278 features = netdev_get_wanted_features(dev);
8279
8280 if (dev->netdev_ops->ndo_fix_features)
8281 features = dev->netdev_ops->ndo_fix_features(dev, features);
8282
8283
8284 features = netdev_fix_features(dev, features);
8285
8286
8287 netdev_for_each_upper_dev_rcu(dev, upper, iter)
8288 features = netdev_sync_upper_features(dev, upper, features);
8289
8290 if (dev->features == features)
8291 goto sync_lower;
8292
8293 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
8294 &dev->features, &features);
8295
8296 if (dev->netdev_ops->ndo_set_features)
8297 err = dev->netdev_ops->ndo_set_features(dev, features);
8298 else
8299 err = 0;
8300
8301 if (unlikely(err < 0)) {
8302 netdev_err(dev,
8303 "set_features() failed (%d); wanted %pNF, left %pNF\n",
8304 err, &features, &dev->features);
8305
8306
8307
8308 return -1;
8309 }
8310
8311sync_lower:
8312
8313
8314
8315 netdev_for_each_lower_dev(dev, lower, iter)
8316 netdev_sync_lower_features(dev, lower, features);
8317
8318 if (!err) {
8319 netdev_features_t diff = features ^ dev->features;
8320
8321 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
8322
8323
8324
8325
8326
8327
8328
8329 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
8330 dev->features = features;
8331 udp_tunnel_get_rx_info(dev);
8332 } else {
8333 udp_tunnel_drop_rx_info(dev);
8334 }
8335 }
8336
8337 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
8338 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
8339 dev->features = features;
8340 err |= vlan_get_rx_ctag_filter_info(dev);
8341 } else {
8342 vlan_drop_rx_ctag_filter_info(dev);
8343 }
8344 }
8345
8346 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
8347 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
8348 dev->features = features;
8349 err |= vlan_get_rx_stag_filter_info(dev);
8350 } else {
8351 vlan_drop_rx_stag_filter_info(dev);
8352 }
8353 }
8354
8355 dev->features = features;
8356 }
8357
8358 return err < 0 ? 0 : 1;
8359}
8360
8361
8362
8363
8364
8365
8366
8367
8368
8369void netdev_update_features(struct net_device *dev)
8370{
8371 if (__netdev_update_features(dev))
8372 netdev_features_change(dev);
8373}
8374EXPORT_SYMBOL(netdev_update_features);
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384
8385
8386void netdev_change_features(struct net_device *dev)
8387{
8388 __netdev_update_features(dev);
8389 netdev_features_change(dev);
8390}
8391EXPORT_SYMBOL(netdev_change_features);
8392
8393
8394
8395
8396
8397
8398
8399
8400
8401
8402void netif_stacked_transfer_operstate(const struct net_device *rootdev,
8403 struct net_device *dev)
8404{
8405 if (rootdev->operstate == IF_OPER_DORMANT)
8406 netif_dormant_on(dev);
8407 else
8408 netif_dormant_off(dev);
8409
8410 if (netif_carrier_ok(rootdev))
8411 netif_carrier_on(dev);
8412 else
8413 netif_carrier_off(dev);
8414}
8415EXPORT_SYMBOL(netif_stacked_transfer_operstate);
8416
8417static int netif_alloc_rx_queues(struct net_device *dev)
8418{
8419 unsigned int i, count = dev->num_rx_queues;
8420 struct netdev_rx_queue *rx;
8421 size_t sz = count * sizeof(*rx);
8422 int err = 0;
8423
8424 BUG_ON(count < 1);
8425
8426 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8427 if (!rx)
8428 return -ENOMEM;
8429
8430 dev->_rx = rx;
8431
8432 for (i = 0; i < count; i++) {
8433 rx[i].dev = dev;
8434
8435
8436 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
8437 if (err < 0)
8438 goto err_rxq_info;
8439 }
8440 return 0;
8441
8442err_rxq_info:
8443
8444 while (i--)
8445 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
8446 kvfree(dev->_rx);
8447 dev->_rx = NULL;
8448 return err;
8449}
8450
8451static void netif_free_rx_queues(struct net_device *dev)
8452{
8453 unsigned int i, count = dev->num_rx_queues;
8454
8455
8456 if (!dev->_rx)
8457 return;
8458
8459 for (i = 0; i < count; i++)
8460 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
8461
8462 kvfree(dev->_rx);
8463}
8464
8465static void netdev_init_one_queue(struct net_device *dev,
8466 struct netdev_queue *queue, void *_unused)
8467{
8468
8469 spin_lock_init(&queue->_xmit_lock);
8470 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
8471 queue->xmit_lock_owner = -1;
8472 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
8473 queue->dev = dev;
8474#ifdef CONFIG_BQL
8475 dql_init(&queue->dql, HZ);
8476#endif
8477}
8478
8479static void netif_free_tx_queues(struct net_device *dev)
8480{
8481 kvfree(dev->_tx);
8482}
8483
8484static int netif_alloc_netdev_queues(struct net_device *dev)
8485{
8486 unsigned int count = dev->num_tx_queues;
8487 struct netdev_queue *tx;
8488 size_t sz = count * sizeof(*tx);
8489
8490 if (count < 1 || count > 0xffff)
8491 return -EINVAL;
8492
8493 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8494 if (!tx)
8495 return -ENOMEM;
8496
8497 dev->_tx = tx;
8498
8499 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
8500 spin_lock_init(&dev->tx_global_lock);
8501
8502 return 0;
8503}
8504
8505void netif_tx_stop_all_queues(struct net_device *dev)
8506{
8507 unsigned int i;
8508
8509 for (i = 0; i < dev->num_tx_queues; i++) {
8510 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
8511
8512 netif_tx_stop_queue(txq);
8513 }
8514}
8515EXPORT_SYMBOL(netif_tx_stop_all_queues);
8516
8517
8518
8519
8520
8521
8522
8523
8524
8525
8526
8527
8528
8529
8530
8531
8532
8533
8534int register_netdevice(struct net_device *dev)
8535{
8536 int ret;
8537 struct net *net = dev_net(dev);
8538
8539 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
8540 NETDEV_FEATURE_COUNT);
8541 BUG_ON(dev_boot_phase);
8542 ASSERT_RTNL();
8543
8544 might_sleep();
8545
8546
8547 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
8548 BUG_ON(!net);
8549
8550 spin_lock_init(&dev->addr_list_lock);
8551 netdev_set_addr_lockdep_class(dev);
8552
8553 ret = dev_get_valid_name(net, dev, dev->name);
8554 if (ret < 0)
8555 goto out;
8556
8557
8558 if (dev->netdev_ops->ndo_init) {
8559 ret = dev->netdev_ops->ndo_init(dev);
8560 if (ret) {
8561 if (ret > 0)
8562 ret = -EIO;
8563 goto out;
8564 }
8565 }
8566
8567 if (((dev->hw_features | dev->features) &
8568 NETIF_F_HW_VLAN_CTAG_FILTER) &&
8569 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
8570 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
8571 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
8572 ret = -EINVAL;
8573 goto err_uninit;
8574 }
8575
8576 ret = -EBUSY;
8577 if (!dev->ifindex)
8578 dev->ifindex = dev_new_index(net);
8579 else if (__dev_get_by_index(net, dev->ifindex))
8580 goto err_uninit;
8581
8582
8583
8584
8585 dev->hw_features |= NETIF_F_SOFT_FEATURES;
8586 dev->features |= NETIF_F_SOFT_FEATURES;
8587
8588 if (dev->netdev_ops->ndo_udp_tunnel_add) {
8589 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8590 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8591 }
8592
8593 dev->wanted_features = dev->features & dev->hw_features;
8594
8595 if (!(dev->flags & IFF_LOOPBACK))
8596 dev->hw_features |= NETIF_F_NOCACHE_COPY;
8597
8598
8599
8600
8601
8602
8603 if (dev->hw_features & NETIF_F_TSO)
8604 dev->hw_features |= NETIF_F_TSO_MANGLEID;
8605 if (dev->vlan_features & NETIF_F_TSO)
8606 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
8607 if (dev->mpls_features & NETIF_F_TSO)
8608 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
8609 if (dev->hw_enc_features & NETIF_F_TSO)
8610 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
8611
8612
8613
8614 dev->vlan_features |= NETIF_F_HIGHDMA;
8615
8616
8617
8618 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
8619
8620
8621
8622 dev->mpls_features |= NETIF_F_SG;
8623
8624 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
8625 ret = notifier_to_errno(ret);
8626 if (ret)
8627 goto err_uninit;
8628
8629 ret = netdev_register_kobject(dev);
8630 if (ret)
8631 goto err_uninit;
8632 dev->reg_state = NETREG_REGISTERED;
8633
8634 __netdev_update_features(dev);
8635
8636
8637
8638
8639
8640
8641 set_bit(__LINK_STATE_PRESENT, &dev->state);
8642
8643 linkwatch_init_dev(dev);
8644
8645 dev_init_scheduler(dev);
8646 dev_hold(dev);
8647 list_netdevice(dev);
8648 add_device_randomness(dev->dev_addr, dev->addr_len);
8649
8650
8651
8652
8653
8654 if (dev->addr_assign_type == NET_ADDR_PERM)
8655 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
8656
8657
8658 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
8659 ret = notifier_to_errno(ret);
8660 if (ret) {
8661 rollback_registered(dev);
8662 dev->reg_state = NETREG_UNREGISTERED;
8663 }
8664
8665
8666
8667
8668 if (!dev->rtnl_link_ops ||
8669 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
8670 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
8671
8672out:
8673 return ret;
8674
8675err_uninit:
8676 if (dev->netdev_ops->ndo_uninit)
8677 dev->netdev_ops->ndo_uninit(dev);
8678 if (dev->priv_destructor)
8679 dev->priv_destructor(dev);
8680 goto out;
8681}
8682EXPORT_SYMBOL(register_netdevice);
8683
8684
8685
8686
8687
8688
8689
8690
8691
8692
8693
8694int init_dummy_netdev(struct net_device *dev)
8695{
8696
8697
8698
8699
8700
8701 memset(dev, 0, sizeof(struct net_device));
8702
8703
8704
8705
8706 dev->reg_state = NETREG_DUMMY;
8707
8708
8709 INIT_LIST_HEAD(&dev->napi_list);
8710
8711
8712 set_bit(__LINK_STATE_PRESENT, &dev->state);
8713 set_bit(__LINK_STATE_START, &dev->state);
8714
8715
8716 dev_net_set(dev, &init_net);
8717
8718
8719
8720
8721
8722
8723 return 0;
8724}
8725EXPORT_SYMBOL_GPL(init_dummy_netdev);
8726
8727
8728
8729
8730
8731
8732
8733
8734
8735
8736
8737
8738
8739
8740
8741int register_netdev(struct net_device *dev)
8742{
8743 int err;
8744
8745 if (rtnl_lock_killable())
8746 return -EINTR;
8747 err = register_netdevice(dev);
8748 rtnl_unlock();
8749 return err;
8750}
8751EXPORT_SYMBOL(register_netdev);
8752
8753int netdev_refcnt_read(const struct net_device *dev)
8754{
8755 int i, refcnt = 0;
8756
8757 for_each_possible_cpu(i)
8758 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
8759 return refcnt;
8760}
8761EXPORT_SYMBOL(netdev_refcnt_read);
8762
8763
8764
8765
8766
8767
8768
8769
8770
8771
8772
8773
8774
8775static void netdev_wait_allrefs(struct net_device *dev)
8776{
8777 unsigned long rebroadcast_time, warning_time;
8778 int refcnt;
8779
8780 linkwatch_forget_dev(dev);
8781
8782 rebroadcast_time = warning_time = jiffies;
8783 refcnt = netdev_refcnt_read(dev);
8784
8785 while (refcnt != 0) {
8786 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
8787 rtnl_lock();
8788
8789
8790 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8791
8792 __rtnl_unlock();
8793 rcu_barrier();
8794 rtnl_lock();
8795
8796 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
8797 &dev->state)) {
8798
8799
8800
8801
8802
8803
8804 linkwatch_run_queue();
8805 }
8806
8807 __rtnl_unlock();
8808
8809 rebroadcast_time = jiffies;
8810 }
8811
8812 msleep(250);
8813
8814 refcnt = netdev_refcnt_read(dev);
8815
8816 if (time_after(jiffies, warning_time + 10 * HZ)) {
8817 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
8818 dev->name, refcnt);
8819 warning_time = jiffies;
8820 }
8821 }
8822}
8823
8824
8825
8826
8827
8828
8829
8830
8831
8832
8833
8834
8835
8836
8837
8838
8839
8840
8841
8842
8843
8844
8845
8846
8847
8848void netdev_run_todo(void)
8849{
8850 struct list_head list;
8851
8852
8853 list_replace_init(&net_todo_list, &list);
8854
8855 __rtnl_unlock();
8856
8857
8858
8859 if (!list_empty(&list))
8860 rcu_barrier();
8861
8862 while (!list_empty(&list)) {
8863 struct net_device *dev
8864 = list_first_entry(&list, struct net_device, todo_list);
8865 list_del(&dev->todo_list);
8866
8867 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
8868 pr_err("network todo '%s' but state %d\n",
8869 dev->name, dev->reg_state);
8870 dump_stack();
8871 continue;
8872 }
8873
8874 dev->reg_state = NETREG_UNREGISTERED;
8875
8876 netdev_wait_allrefs(dev);
8877
8878
8879 BUG_ON(netdev_refcnt_read(dev));
8880 BUG_ON(!list_empty(&dev->ptype_all));
8881 BUG_ON(!list_empty(&dev->ptype_specific));
8882 WARN_ON(rcu_access_pointer(dev->ip_ptr));
8883 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
8884#if IS_ENABLED(CONFIG_DECNET)
8885 WARN_ON(dev->dn_ptr);
8886#endif
8887 if (dev->priv_destructor)
8888 dev->priv_destructor(dev);
8889 if (dev->needs_free_netdev)
8890 free_netdev(dev);
8891
8892
8893 rtnl_lock();
8894 dev_net(dev)->dev_unreg_count--;
8895 __rtnl_unlock();
8896 wake_up(&netdev_unregistering_wq);
8897
8898
8899 kobject_put(&dev->dev.kobj);
8900 }
8901}
8902
8903
8904
8905
8906
8907
8908void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
8909 const struct net_device_stats *netdev_stats)
8910{
8911#if BITS_PER_LONG == 64
8912 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
8913 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
8914
8915 memset((char *)stats64 + sizeof(*netdev_stats), 0,
8916 sizeof(*stats64) - sizeof(*netdev_stats));
8917#else
8918 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
8919 const unsigned long *src = (const unsigned long *)netdev_stats;
8920 u64 *dst = (u64 *)stats64;
8921
8922 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
8923 for (i = 0; i < n; i++)
8924 dst[i] = src[i];
8925
8926 memset((char *)stats64 + n * sizeof(u64), 0,
8927 sizeof(*stats64) - n * sizeof(u64));
8928#endif
8929}
8930EXPORT_SYMBOL(netdev_stats_to_stats64);
8931
8932
8933
8934
8935
8936
8937
8938
8939
8940
8941
8942struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
8943 struct rtnl_link_stats64 *storage)
8944{
8945 const struct net_device_ops *ops = dev->netdev_ops;
8946
8947 if (ops->ndo_get_stats64) {
8948 memset(storage, 0, sizeof(*storage));
8949 ops->ndo_get_stats64(dev, storage);
8950 } else if (ops->ndo_get_stats) {
8951 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
8952 } else {
8953 netdev_stats_to_stats64(storage, &dev->stats);
8954 }
8955 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
8956 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
8957 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
8958 return storage;
8959}
8960EXPORT_SYMBOL(dev_get_stats);
8961
8962struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
8963{
8964 struct netdev_queue *queue = dev_ingress_queue(dev);
8965
8966#ifdef CONFIG_NET_CLS_ACT
8967 if (queue)
8968 return queue;
8969 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
8970 if (!queue)
8971 return NULL;
8972 netdev_init_one_queue(dev, queue, NULL);
8973 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
8974 queue->qdisc_sleeping = &noop_qdisc;
8975 rcu_assign_pointer(dev->ingress_queue, queue);
8976#endif
8977 return queue;
8978}
8979
8980static const struct ethtool_ops default_ethtool_ops;
8981
8982void netdev_set_default_ethtool_ops(struct net_device *dev,
8983 const struct ethtool_ops *ops)
8984{
8985 if (dev->ethtool_ops == &default_ethtool_ops)
8986 dev->ethtool_ops = ops;
8987}
8988EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
8989
8990void netdev_freemem(struct net_device *dev)
8991{
8992 char *addr = (char *)dev - dev->padded;
8993
8994 kvfree(addr);
8995}
8996
8997
8998
8999
9000
9001
9002
9003
9004
9005
9006
9007
9008
9009
9010struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
9011 unsigned char name_assign_type,
9012 void (*setup)(struct net_device *),
9013 unsigned int txqs, unsigned int rxqs)
9014{
9015 struct net_device *dev;
9016 unsigned int alloc_size;
9017 struct net_device *p;
9018
9019 BUG_ON(strlen(name) >= sizeof(dev->name));
9020
9021 if (txqs < 1) {
9022 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
9023 return NULL;
9024 }
9025
9026 if (rxqs < 1) {
9027 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
9028 return NULL;
9029 }
9030
9031 alloc_size = sizeof(struct net_device);
9032 if (sizeof_priv) {
9033
9034 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
9035 alloc_size += sizeof_priv;
9036 }
9037
9038 alloc_size += NETDEV_ALIGN - 1;
9039
9040 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
9041 if (!p)
9042 return NULL;
9043
9044 dev = PTR_ALIGN(p, NETDEV_ALIGN);
9045 dev->padded = (char *)dev - (char *)p;
9046
9047 dev->pcpu_refcnt = alloc_percpu(int);
9048 if (!dev->pcpu_refcnt)
9049 goto free_dev;
9050
9051 if (dev_addr_init(dev))
9052 goto free_pcpu;
9053
9054 dev_mc_init(dev);
9055 dev_uc_init(dev);
9056
9057 dev_net_set(dev, &init_net);
9058
9059 dev->gso_max_size = GSO_MAX_SIZE;
9060 dev->gso_max_segs = GSO_MAX_SEGS;
9061
9062 INIT_LIST_HEAD(&dev->napi_list);
9063 INIT_LIST_HEAD(&dev->unreg_list);
9064 INIT_LIST_HEAD(&dev->close_list);
9065 INIT_LIST_HEAD(&dev->link_watch_list);
9066 INIT_LIST_HEAD(&dev->adj_list.upper);
9067 INIT_LIST_HEAD(&dev->adj_list.lower);
9068 INIT_LIST_HEAD(&dev->ptype_all);
9069 INIT_LIST_HEAD(&dev->ptype_specific);
9070#ifdef CONFIG_NET_SCHED
9071 hash_init(dev->qdisc_hash);
9072#endif
9073 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
9074 setup(dev);
9075
9076 if (!dev->tx_queue_len) {
9077 dev->priv_flags |= IFF_NO_QUEUE;
9078 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
9079 }
9080
9081 dev->num_tx_queues = txqs;
9082 dev->real_num_tx_queues = txqs;
9083 if (netif_alloc_netdev_queues(dev))
9084 goto free_all;
9085
9086 dev->num_rx_queues = rxqs;
9087 dev->real_num_rx_queues = rxqs;
9088 if (netif_alloc_rx_queues(dev))
9089 goto free_all;
9090
9091 strcpy(dev->name, name);
9092 dev->name_assign_type = name_assign_type;
9093 dev->group = INIT_NETDEV_GROUP;
9094 if (!dev->ethtool_ops)
9095 dev->ethtool_ops = &default_ethtool_ops;
9096
9097 nf_hook_ingress_init(dev);
9098
9099 return dev;
9100
9101free_all:
9102 free_netdev(dev);
9103 return NULL;
9104
9105free_pcpu:
9106 free_percpu(dev->pcpu_refcnt);
9107free_dev:
9108 netdev_freemem(dev);
9109 return NULL;
9110}
9111EXPORT_SYMBOL(alloc_netdev_mqs);
9112
9113
9114
9115
9116
9117
9118
9119
9120
9121
9122void free_netdev(struct net_device *dev)
9123{
9124 struct napi_struct *p, *n;
9125
9126 might_sleep();
9127 netif_free_tx_queues(dev);
9128 netif_free_rx_queues(dev);
9129
9130 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
9131
9132
9133 dev_addr_flush(dev);
9134
9135 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
9136 netif_napi_del(p);
9137
9138 free_percpu(dev->pcpu_refcnt);
9139 dev->pcpu_refcnt = NULL;
9140
9141
9142 if (dev->reg_state == NETREG_UNINITIALIZED) {
9143 netdev_freemem(dev);
9144 return;
9145 }
9146
9147 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
9148 dev->reg_state = NETREG_RELEASED;
9149
9150
9151 put_device(&dev->dev);
9152}
9153EXPORT_SYMBOL(free_netdev);
9154
9155
9156
9157
9158
9159
9160
9161void synchronize_net(void)
9162{
9163 might_sleep();
9164 if (rtnl_is_locked())
9165 synchronize_rcu_expedited();
9166 else
9167 synchronize_rcu();
9168}
9169EXPORT_SYMBOL(synchronize_net);
9170
9171
9172
9173
9174
9175
9176
9177
9178
9179
9180
9181
9182
9183
9184void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
9185{
9186 ASSERT_RTNL();
9187
9188 if (head) {
9189 list_move_tail(&dev->unreg_list, head);
9190 } else {
9191 rollback_registered(dev);
9192
9193 net_set_todo(dev);
9194 }
9195}
9196EXPORT_SYMBOL(unregister_netdevice_queue);
9197
9198
9199
9200
9201
9202
9203
9204
9205void unregister_netdevice_many(struct list_head *head)
9206{
9207 struct net_device *dev;
9208
9209 if (!list_empty(head)) {
9210 rollback_registered_many(head);
9211 list_for_each_entry(dev, head, unreg_list)
9212 net_set_todo(dev);
9213 list_del(head);
9214 }
9215}
9216EXPORT_SYMBOL(unregister_netdevice_many);
9217
9218
9219
9220
9221
9222
9223
9224
9225
9226
9227
9228
9229void unregister_netdev(struct net_device *dev)
9230{
9231 rtnl_lock();
9232 unregister_netdevice(dev);
9233 rtnl_unlock();
9234}
9235EXPORT_SYMBOL(unregister_netdev);
9236
9237
9238
9239
9240
9241
9242
9243
9244
9245
9246
9247
9248
9249
9250
9251int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
9252{
9253 int err, new_nsid, new_ifindex;
9254
9255 ASSERT_RTNL();
9256
9257
9258 err = -EINVAL;
9259 if (dev->features & NETIF_F_NETNS_LOCAL)
9260 goto out;
9261
9262
9263 if (dev->reg_state != NETREG_REGISTERED)
9264 goto out;
9265
9266
9267 err = 0;
9268 if (net_eq(dev_net(dev), net))
9269 goto out;
9270
9271
9272
9273
9274 err = -EEXIST;
9275 if (__dev_get_by_name(net, dev->name)) {
9276
9277 if (!pat)
9278 goto out;
9279 err = dev_get_valid_name(net, dev, pat);
9280 if (err < 0)
9281 goto out;
9282 }
9283
9284
9285
9286
9287
9288
9289 dev_close(dev);
9290
9291
9292 unlist_netdevice(dev);
9293
9294 synchronize_net();
9295
9296
9297 dev_shutdown(dev);
9298
9299
9300
9301
9302
9303
9304
9305
9306 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
9307 rcu_barrier();
9308
9309 new_nsid = peernet2id_alloc(dev_net(dev), net);
9310
9311 if (__dev_get_by_index(net, dev->ifindex))
9312 new_ifindex = dev_new_index(net);
9313 else
9314 new_ifindex = dev->ifindex;
9315
9316 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
9317 new_ifindex);
9318
9319
9320
9321
9322 dev_uc_flush(dev);
9323 dev_mc_flush(dev);
9324
9325
9326 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
9327 netdev_adjacent_del_links(dev);
9328
9329
9330 dev_net_set(dev, net);
9331 dev->ifindex = new_ifindex;
9332
9333
9334 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
9335 netdev_adjacent_add_links(dev);
9336
9337
9338 err = device_rename(&dev->dev, dev->name);
9339 WARN_ON(err);
9340
9341
9342 list_netdevice(dev);
9343
9344
9345 call_netdevice_notifiers(NETDEV_REGISTER, dev);
9346
9347
9348
9349
9350
9351 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
9352
9353 synchronize_net();
9354 err = 0;
9355out:
9356 return err;
9357}
9358EXPORT_SYMBOL_GPL(dev_change_net_namespace);
9359
9360static int dev_cpu_dead(unsigned int oldcpu)
9361{
9362 struct sk_buff **list_skb;
9363 struct sk_buff *skb;
9364 unsigned int cpu;
9365 struct softnet_data *sd, *oldsd, *remsd = NULL;
9366
9367 local_irq_disable();
9368 cpu = smp_processor_id();
9369 sd = &per_cpu(softnet_data, cpu);
9370 oldsd = &per_cpu(softnet_data, oldcpu);
9371
9372
9373 list_skb = &sd->completion_queue;
9374 while (*list_skb)
9375 list_skb = &(*list_skb)->next;
9376
9377 *list_skb = oldsd->completion_queue;
9378 oldsd->completion_queue = NULL;
9379
9380
9381 if (oldsd->output_queue) {
9382 *sd->output_queue_tailp = oldsd->output_queue;
9383 sd->output_queue_tailp = oldsd->output_queue_tailp;
9384 oldsd->output_queue = NULL;
9385 oldsd->output_queue_tailp = &oldsd->output_queue;
9386 }
9387
9388
9389
9390
9391 while (!list_empty(&oldsd->poll_list)) {
9392 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
9393 struct napi_struct,
9394 poll_list);
9395
9396 list_del_init(&napi->poll_list);
9397 if (napi->poll == process_backlog)
9398 napi->state = 0;
9399 else
9400 ____napi_schedule(sd, napi);
9401 }
9402
9403 raise_softirq_irqoff(NET_TX_SOFTIRQ);
9404 local_irq_enable();
9405
9406#ifdef CONFIG_RPS
9407 remsd = oldsd->rps_ipi_list;
9408 oldsd->rps_ipi_list = NULL;
9409#endif
9410
9411 net_rps_send_ipi(remsd);
9412
9413
9414 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
9415 netif_rx_ni(skb);
9416 input_queue_head_incr(oldsd);
9417 }
9418 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
9419 netif_rx_ni(skb);
9420 input_queue_head_incr(oldsd);
9421 }
9422
9423 return 0;
9424}
9425
9426
9427
9428
9429
9430
9431
9432
9433
9434
9435
9436netdev_features_t netdev_increment_features(netdev_features_t all,
9437 netdev_features_t one, netdev_features_t mask)
9438{
9439 if (mask & NETIF_F_HW_CSUM)
9440 mask |= NETIF_F_CSUM_MASK;
9441 mask |= NETIF_F_VLAN_CHALLENGED;
9442
9443 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
9444 all &= one | ~NETIF_F_ALL_FOR_ALL;
9445
9446
9447 if (all & NETIF_F_HW_CSUM)
9448 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
9449
9450 return all;
9451}
9452EXPORT_SYMBOL(netdev_increment_features);
9453
9454static struct hlist_head * __net_init netdev_create_hash(void)
9455{
9456 int i;
9457 struct hlist_head *hash;
9458
9459 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
9460 if (hash != NULL)
9461 for (i = 0; i < NETDEV_HASHENTRIES; i++)
9462 INIT_HLIST_HEAD(&hash[i]);
9463
9464 return hash;
9465}
9466
9467
9468static int __net_init netdev_init(struct net *net)
9469{
9470 BUILD_BUG_ON(GRO_HASH_BUCKETS >
9471 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
9472
9473 if (net != &init_net)
9474 INIT_LIST_HEAD(&net->dev_base_head);
9475
9476 net->dev_name_head = netdev_create_hash();
9477 if (net->dev_name_head == NULL)
9478 goto err_name;
9479
9480 net->dev_index_head = netdev_create_hash();
9481 if (net->dev_index_head == NULL)
9482 goto err_idx;
9483
9484 return 0;
9485
9486err_idx:
9487 kfree(net->dev_name_head);
9488err_name:
9489 return -ENOMEM;
9490}
9491
9492
9493
9494
9495
9496
9497
9498const char *netdev_drivername(const struct net_device *dev)
9499{
9500 const struct device_driver *driver;
9501 const struct device *parent;
9502 const char *empty = "";
9503
9504 parent = dev->dev.parent;
9505 if (!parent)
9506 return empty;
9507
9508 driver = parent->driver;
9509 if (driver && driver->name)
9510 return driver->name;
9511 return empty;
9512}
9513
9514static void __netdev_printk(const char *level, const struct net_device *dev,
9515 struct va_format *vaf)
9516{
9517 if (dev && dev->dev.parent) {
9518 dev_printk_emit(level[1] - '0',
9519 dev->dev.parent,
9520 "%s %s %s%s: %pV",
9521 dev_driver_string(dev->dev.parent),
9522 dev_name(dev->dev.parent),
9523 netdev_name(dev), netdev_reg_state(dev),
9524 vaf);
9525 } else if (dev) {
9526 printk("%s%s%s: %pV",
9527 level, netdev_name(dev), netdev_reg_state(dev), vaf);
9528 } else {
9529 printk("%s(NULL net_device): %pV", level, vaf);
9530 }
9531}
9532
9533void netdev_printk(const char *level, const struct net_device *dev,
9534 const char *format, ...)
9535{
9536 struct va_format vaf;
9537 va_list args;
9538
9539 va_start(args, format);
9540
9541 vaf.fmt = format;
9542 vaf.va = &args;
9543
9544 __netdev_printk(level, dev, &vaf);
9545
9546 va_end(args);
9547}
9548EXPORT_SYMBOL(netdev_printk);
9549
9550#define define_netdev_printk_level(func, level) \
9551void func(const struct net_device *dev, const char *fmt, ...) \
9552{ \
9553 struct va_format vaf; \
9554 va_list args; \
9555 \
9556 va_start(args, fmt); \
9557 \
9558 vaf.fmt = fmt; \
9559 vaf.va = &args; \
9560 \
9561 __netdev_printk(level, dev, &vaf); \
9562 \
9563 va_end(args); \
9564} \
9565EXPORT_SYMBOL(func);
9566
9567define_netdev_printk_level(netdev_emerg, KERN_EMERG);
9568define_netdev_printk_level(netdev_alert, KERN_ALERT);
9569define_netdev_printk_level(netdev_crit, KERN_CRIT);
9570define_netdev_printk_level(netdev_err, KERN_ERR);
9571define_netdev_printk_level(netdev_warn, KERN_WARNING);
9572define_netdev_printk_level(netdev_notice, KERN_NOTICE);
9573define_netdev_printk_level(netdev_info, KERN_INFO);
9574
9575static void __net_exit netdev_exit(struct net *net)
9576{
9577 kfree(net->dev_name_head);
9578 kfree(net->dev_index_head);
9579 if (net != &init_net)
9580 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
9581}
9582
9583static struct pernet_operations __net_initdata netdev_net_ops = {
9584 .init = netdev_init,
9585 .exit = netdev_exit,
9586};
9587
9588static void __net_exit default_device_exit(struct net *net)
9589{
9590 struct net_device *dev, *aux;
9591
9592
9593
9594
9595 rtnl_lock();
9596 for_each_netdev_safe(net, dev, aux) {
9597 int err;
9598 char fb_name[IFNAMSIZ];
9599
9600
9601 if (dev->features & NETIF_F_NETNS_LOCAL)
9602 continue;
9603
9604
9605 if (dev->rtnl_link_ops)
9606 continue;
9607
9608
9609 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
9610 err = dev_change_net_namespace(dev, &init_net, fb_name);
9611 if (err) {
9612 pr_emerg("%s: failed to move %s to init_net: %d\n",
9613 __func__, dev->name, err);
9614 BUG();
9615 }
9616 }
9617 rtnl_unlock();
9618}
9619
9620static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
9621{
9622
9623
9624
9625 struct net *net;
9626 bool unregistering;
9627 DEFINE_WAIT_FUNC(wait, woken_wake_function);
9628
9629 add_wait_queue(&netdev_unregistering_wq, &wait);
9630 for (;;) {
9631 unregistering = false;
9632 rtnl_lock();
9633 list_for_each_entry(net, net_list, exit_list) {
9634 if (net->dev_unreg_count > 0) {
9635 unregistering = true;
9636 break;
9637 }
9638 }
9639 if (!unregistering)
9640 break;
9641 __rtnl_unlock();
9642
9643 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
9644 }
9645 remove_wait_queue(&netdev_unregistering_wq, &wait);
9646}
9647
9648static void __net_exit default_device_exit_batch(struct list_head *net_list)
9649{
9650
9651
9652
9653
9654
9655 struct net_device *dev;
9656 struct net *net;
9657 LIST_HEAD(dev_kill_list);
9658
9659
9660
9661
9662
9663
9664
9665
9666
9667
9668
9669
9670 rtnl_lock_unregistering(net_list);
9671 list_for_each_entry(net, net_list, exit_list) {
9672 for_each_netdev_reverse(net, dev) {
9673 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
9674 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
9675 else
9676 unregister_netdevice_queue(dev, &dev_kill_list);
9677 }
9678 }
9679 unregister_netdevice_many(&dev_kill_list);
9680 rtnl_unlock();
9681}
9682
9683static struct pernet_operations __net_initdata default_device_ops = {
9684 .exit = default_device_exit,
9685 .exit_batch = default_device_exit_batch,
9686};
9687
9688
9689
9690
9691
9692
9693
9694
9695
9696
9697
9698
9699static int __init net_dev_init(void)
9700{
9701 int i, rc = -ENOMEM;
9702
9703 BUG_ON(!dev_boot_phase);
9704
9705 if (dev_proc_init())
9706 goto out;
9707
9708 if (netdev_kobject_init())
9709 goto out;
9710
9711 INIT_LIST_HEAD(&ptype_all);
9712 for (i = 0; i < PTYPE_HASH_SIZE; i++)
9713 INIT_LIST_HEAD(&ptype_base[i]);
9714
9715 INIT_LIST_HEAD(&offload_base);
9716
9717 if (register_pernet_subsys(&netdev_net_ops))
9718 goto out;
9719
9720
9721
9722
9723
9724 for_each_possible_cpu(i) {
9725 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
9726 struct softnet_data *sd = &per_cpu(softnet_data, i);
9727
9728 INIT_WORK(flush, flush_backlog);
9729
9730 skb_queue_head_init(&sd->input_pkt_queue);
9731 skb_queue_head_init(&sd->process_queue);
9732#ifdef CONFIG_XFRM_OFFLOAD
9733 skb_queue_head_init(&sd->xfrm_backlog);
9734#endif
9735 INIT_LIST_HEAD(&sd->poll_list);
9736 sd->output_queue_tailp = &sd->output_queue;
9737#ifdef CONFIG_RPS
9738 sd->csd.func = rps_trigger_softirq;
9739 sd->csd.info = sd;
9740 sd->cpu = i;
9741#endif
9742
9743 init_gro_hash(&sd->backlog);
9744 sd->backlog.poll = process_backlog;
9745 sd->backlog.weight = weight_p;
9746 }
9747
9748 dev_boot_phase = 0;
9749
9750
9751
9752
9753
9754
9755
9756
9757
9758
9759 if (register_pernet_device(&loopback_net_ops))
9760 goto out;
9761
9762 if (register_pernet_device(&default_device_ops))
9763 goto out;
9764
9765 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
9766 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
9767
9768 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
9769 NULL, dev_cpu_dead);
9770 WARN_ON(rc < 0);
9771 rc = 0;
9772out:
9773 return rc;
9774}
9775
9776subsys_initcall(net_dev_init);
9777