1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71#include <linux/uaccess.h>
72#include <linux/bitops.h>
73#include <linux/capability.h>
74#include <linux/cpu.h>
75#include <linux/types.h>
76#include <linux/kernel.h>
77#include <linux/hash.h>
78#include <linux/slab.h>
79#include <linux/sched.h>
80#include <linux/sched/mm.h>
81#include <linux/mutex.h>
82#include <linux/rwsem.h>
83#include <linux/string.h>
84#include <linux/mm.h>
85#include <linux/socket.h>
86#include <linux/sockios.h>
87#include <linux/errno.h>
88#include <linux/interrupt.h>
89#include <linux/if_ether.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/ethtool.h>
93#include <linux/skbuff.h>
94#include <linux/kthread.h>
95#include <linux/bpf.h>
96#include <linux/bpf_trace.h>
97#include <net/net_namespace.h>
98#include <net/sock.h>
99#include <net/busy_poll.h>
100#include <linux/rtnetlink.h>
101#include <linux/stat.h>
102#include <net/dsa.h>
103#include <net/dst.h>
104#include <net/dst_metadata.h>
105#include <net/gro.h>
106#include <net/pkt_sched.h>
107#include <net/pkt_cls.h>
108#include <net/checksum.h>
109#include <net/xfrm.h>
110#include <linux/highmem.h>
111#include <linux/init.h>
112#include <linux/module.h>
113#include <linux/netpoll.h>
114#include <linux/rcupdate.h>
115#include <linux/delay.h>
116#include <net/iw_handler.h>
117#include <asm/current.h>
118#include <linux/audit.h>
119#include <linux/dmaengine.h>
120#include <linux/err.h>
121#include <linux/ctype.h>
122#include <linux/if_arp.h>
123#include <linux/if_vlan.h>
124#include <linux/ip.h>
125#include <net/ip.h>
126#include <net/mpls.h>
127#include <linux/ipv6.h>
128#include <linux/in.h>
129#include <linux/jhash.h>
130#include <linux/random.h>
131#include <trace/events/napi.h>
132#include <trace/events/net.h>
133#include <trace/events/skb.h>
134#include <trace/events/qdisc.h>
135#include <linux/inetdevice.h>
136#include <linux/cpu_rmap.h>
137#include <linux/static_key.h>
138#include <linux/hashtable.h>
139#include <linux/vmalloc.h>
140#include <linux/if_macvlan.h>
141#include <linux/errqueue.h>
142#include <linux/hrtimer.h>
143#include <linux/netfilter_ingress.h>
144#include <linux/crash_dump.h>
145#include <linux/sctp.h>
146#include <net/udp_tunnel.h>
147#include <linux/net_namespace.h>
148#include <linux/indirect_call_wrapper.h>
149#include <net/devlink.h>
150#include <linux/pm_runtime.h>
151#include <linux/prandom.h>
152#include <linux/once_lite.h>
153
154#include "net-sysfs.h"
155
156#define MAX_GRO_SKBS 8
157
158
159#define GRO_MAX_HEAD (MAX_HEADER + 128)
160
161static DEFINE_SPINLOCK(ptype_lock);
162static DEFINE_SPINLOCK(offload_lock);
163struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
164struct list_head ptype_all __read_mostly;
165static struct list_head offload_base __read_mostly;
166
167static int netif_rx_internal(struct sk_buff *skb);
168static int call_netdevice_notifiers_info(unsigned long val,
169 struct netdev_notifier_info *info);
170static int call_netdevice_notifiers_extack(unsigned long val,
171 struct net_device *dev,
172 struct netlink_ext_ack *extack);
173static struct napi_struct *napi_by_id(unsigned int napi_id);
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194DEFINE_RWLOCK(dev_base_lock);
195EXPORT_SYMBOL(dev_base_lock);
196
197static DEFINE_MUTEX(ifalias_mutex);
198
199
200static DEFINE_SPINLOCK(napi_hash_lock);
201
202static unsigned int napi_gen_id = NR_CPUS;
203static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
204
205static DECLARE_RWSEM(devnet_rename_sem);
206
207static inline void dev_base_seq_inc(struct net *net)
208{
209 while (++net->dev_base_seq == 0)
210 ;
211}
212
213static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
214{
215 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
216
217 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
218}
219
220static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
221{
222 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
223}
224
225static inline void rps_lock(struct softnet_data *sd)
226{
227#ifdef CONFIG_RPS
228 spin_lock(&sd->input_pkt_queue.lock);
229#endif
230}
231
232static inline void rps_unlock(struct softnet_data *sd)
233{
234#ifdef CONFIG_RPS
235 spin_unlock(&sd->input_pkt_queue.lock);
236#endif
237}
238
239static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
240 const char *name)
241{
242 struct netdev_name_node *name_node;
243
244 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
245 if (!name_node)
246 return NULL;
247 INIT_HLIST_NODE(&name_node->hlist);
248 name_node->dev = dev;
249 name_node->name = name;
250 return name_node;
251}
252
253static struct netdev_name_node *
254netdev_name_node_head_alloc(struct net_device *dev)
255{
256 struct netdev_name_node *name_node;
257
258 name_node = netdev_name_node_alloc(dev, dev->name);
259 if (!name_node)
260 return NULL;
261 INIT_LIST_HEAD(&name_node->list);
262 return name_node;
263}
264
265static void netdev_name_node_free(struct netdev_name_node *name_node)
266{
267 kfree(name_node);
268}
269
270static void netdev_name_node_add(struct net *net,
271 struct netdev_name_node *name_node)
272{
273 hlist_add_head_rcu(&name_node->hlist,
274 dev_name_hash(net, name_node->name));
275}
276
277static void netdev_name_node_del(struct netdev_name_node *name_node)
278{
279 hlist_del_rcu(&name_node->hlist);
280}
281
282static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
283 const char *name)
284{
285 struct hlist_head *head = dev_name_hash(net, name);
286 struct netdev_name_node *name_node;
287
288 hlist_for_each_entry(name_node, head, hlist)
289 if (!strcmp(name_node->name, name))
290 return name_node;
291 return NULL;
292}
293
294static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
295 const char *name)
296{
297 struct hlist_head *head = dev_name_hash(net, name);
298 struct netdev_name_node *name_node;
299
300 hlist_for_each_entry_rcu(name_node, head, hlist)
301 if (!strcmp(name_node->name, name))
302 return name_node;
303 return NULL;
304}
305
306int netdev_name_node_alt_create(struct net_device *dev, const char *name)
307{
308 struct netdev_name_node *name_node;
309 struct net *net = dev_net(dev);
310
311 name_node = netdev_name_node_lookup(net, name);
312 if (name_node)
313 return -EEXIST;
314 name_node = netdev_name_node_alloc(dev, name);
315 if (!name_node)
316 return -ENOMEM;
317 netdev_name_node_add(net, name_node);
318
319 list_add_tail(&name_node->list, &dev->name_node->list);
320
321 return 0;
322}
323EXPORT_SYMBOL(netdev_name_node_alt_create);
324
325static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
326{
327 list_del(&name_node->list);
328 netdev_name_node_del(name_node);
329 kfree(name_node->name);
330 netdev_name_node_free(name_node);
331}
332
333int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
334{
335 struct netdev_name_node *name_node;
336 struct net *net = dev_net(dev);
337
338 name_node = netdev_name_node_lookup(net, name);
339 if (!name_node)
340 return -ENOENT;
341
342
343
344 if (name_node == dev->name_node || name_node->dev != dev)
345 return -EINVAL;
346
347 __netdev_name_node_alt_destroy(name_node);
348
349 return 0;
350}
351EXPORT_SYMBOL(netdev_name_node_alt_destroy);
352
353static void netdev_name_node_alt_flush(struct net_device *dev)
354{
355 struct netdev_name_node *name_node, *tmp;
356
357 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
358 __netdev_name_node_alt_destroy(name_node);
359}
360
361
362static void list_netdevice(struct net_device *dev)
363{
364 struct net *net = dev_net(dev);
365
366 ASSERT_RTNL();
367
368 write_lock_bh(&dev_base_lock);
369 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
370 netdev_name_node_add(net, dev->name_node);
371 hlist_add_head_rcu(&dev->index_hlist,
372 dev_index_hash(net, dev->ifindex));
373 write_unlock_bh(&dev_base_lock);
374
375 dev_base_seq_inc(net);
376}
377
378
379
380
381static void unlist_netdevice(struct net_device *dev)
382{
383 ASSERT_RTNL();
384
385
386 write_lock_bh(&dev_base_lock);
387 list_del_rcu(&dev->dev_list);
388 netdev_name_node_del(dev->name_node);
389 hlist_del_rcu(&dev->index_hlist);
390 write_unlock_bh(&dev_base_lock);
391
392 dev_base_seq_inc(dev_net(dev));
393}
394
395
396
397
398
399static RAW_NOTIFIER_HEAD(netdev_chain);
400
401
402
403
404
405
406DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
407EXPORT_PER_CPU_SYMBOL(softnet_data);
408
409#ifdef CONFIG_LOCKDEP
410
411
412
413
414static const unsigned short netdev_lock_type[] = {
415 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
416 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
417 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
418 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
419 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
420 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
421 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
422 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
423 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
424 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
425 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
426 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
427 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
428 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
429 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
430
431static const char *const netdev_lock_name[] = {
432 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
433 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
434 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
435 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
436 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
437 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
438 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
439 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
440 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
441 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
442 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
443 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
444 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
445 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
446 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
447
448static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
449static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
450
451static inline unsigned short netdev_lock_pos(unsigned short dev_type)
452{
453 int i;
454
455 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
456 if (netdev_lock_type[i] == dev_type)
457 return i;
458
459 return ARRAY_SIZE(netdev_lock_type) - 1;
460}
461
462static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
463 unsigned short dev_type)
464{
465 int i;
466
467 i = netdev_lock_pos(dev_type);
468 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
469 netdev_lock_name[i]);
470}
471
472static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
473{
474 int i;
475
476 i = netdev_lock_pos(dev->type);
477 lockdep_set_class_and_name(&dev->addr_list_lock,
478 &netdev_addr_lock_key[i],
479 netdev_lock_name[i]);
480}
481#else
482static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
483 unsigned short dev_type)
484{
485}
486
487static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
488{
489}
490#endif
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515static inline struct list_head *ptype_head(const struct packet_type *pt)
516{
517 if (pt->type == htons(ETH_P_ALL))
518 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
519 else
520 return pt->dev ? &pt->dev->ptype_specific :
521 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
522}
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537void dev_add_pack(struct packet_type *pt)
538{
539 struct list_head *head = ptype_head(pt);
540
541 spin_lock(&ptype_lock);
542 list_add_rcu(&pt->list, head);
543 spin_unlock(&ptype_lock);
544}
545EXPORT_SYMBOL(dev_add_pack);
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560void __dev_remove_pack(struct packet_type *pt)
561{
562 struct list_head *head = ptype_head(pt);
563 struct packet_type *pt1;
564
565 spin_lock(&ptype_lock);
566
567 list_for_each_entry(pt1, head, list) {
568 if (pt == pt1) {
569 list_del_rcu(&pt->list);
570 goto out;
571 }
572 }
573
574 pr_warn("dev_remove_pack: %p not found\n", pt);
575out:
576 spin_unlock(&ptype_lock);
577}
578EXPORT_SYMBOL(__dev_remove_pack);
579
580
581
582
583
584
585
586
587
588
589
590
591
592void dev_remove_pack(struct packet_type *pt)
593{
594 __dev_remove_pack(pt);
595
596 synchronize_net();
597}
598EXPORT_SYMBOL(dev_remove_pack);
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613void dev_add_offload(struct packet_offload *po)
614{
615 struct packet_offload *elem;
616
617 spin_lock(&offload_lock);
618 list_for_each_entry(elem, &offload_base, list) {
619 if (po->priority < elem->priority)
620 break;
621 }
622 list_add_rcu(&po->list, elem->list.prev);
623 spin_unlock(&offload_lock);
624}
625EXPORT_SYMBOL(dev_add_offload);
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640static void __dev_remove_offload(struct packet_offload *po)
641{
642 struct list_head *head = &offload_base;
643 struct packet_offload *po1;
644
645 spin_lock(&offload_lock);
646
647 list_for_each_entry(po1, head, list) {
648 if (po == po1) {
649 list_del_rcu(&po->list);
650 goto out;
651 }
652 }
653
654 pr_warn("dev_remove_offload: %p not found\n", po);
655out:
656 spin_unlock(&offload_lock);
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671void dev_remove_offload(struct packet_offload *po)
672{
673 __dev_remove_offload(po);
674
675 synchronize_net();
676}
677EXPORT_SYMBOL(dev_remove_offload);
678
679
680
681
682
683
684
685
686static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
687
688
689
690
691
692
693
694
695
696
697static int netdev_boot_setup_add(char *name, struct ifmap *map)
698{
699 struct netdev_boot_setup *s;
700 int i;
701
702 s = dev_boot_setup;
703 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
704 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
705 memset(s[i].name, 0, sizeof(s[i].name));
706 strlcpy(s[i].name, name, IFNAMSIZ);
707 memcpy(&s[i].map, map, sizeof(s[i].map));
708 break;
709 }
710 }
711
712 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
713}
714
715
716
717
718
719
720
721
722
723
724int netdev_boot_setup_check(struct net_device *dev)
725{
726 struct netdev_boot_setup *s = dev_boot_setup;
727 int i;
728
729 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
730 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
731 !strcmp(dev->name, s[i].name)) {
732 dev->irq = s[i].map.irq;
733 dev->base_addr = s[i].map.base_addr;
734 dev->mem_start = s[i].map.mem_start;
735 dev->mem_end = s[i].map.mem_end;
736 return 1;
737 }
738 }
739 return 0;
740}
741EXPORT_SYMBOL(netdev_boot_setup_check);
742
743
744
745
746
747
748
749
750
751
752
753
754unsigned long netdev_boot_base(const char *prefix, int unit)
755{
756 const struct netdev_boot_setup *s = dev_boot_setup;
757 char name[IFNAMSIZ];
758 int i;
759
760 sprintf(name, "%s%d", prefix, unit);
761
762
763
764
765
766 if (__dev_get_by_name(&init_net, name))
767 return 1;
768
769 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
770 if (!strcmp(name, s[i].name))
771 return s[i].map.base_addr;
772 return 0;
773}
774
775
776
777
778int __init netdev_boot_setup(char *str)
779{
780 int ints[5];
781 struct ifmap map;
782
783 str = get_options(str, ARRAY_SIZE(ints), ints);
784 if (!str || !*str)
785 return 0;
786
787
788 memset(&map, 0, sizeof(map));
789 if (ints[0] > 0)
790 map.irq = ints[1];
791 if (ints[0] > 1)
792 map.base_addr = ints[2];
793 if (ints[0] > 2)
794 map.mem_start = ints[3];
795 if (ints[0] > 3)
796 map.mem_end = ints[4];
797
798
799 return netdev_boot_setup_add(str, &map);
800}
801
802__setup("netdev=", netdev_boot_setup);
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818int dev_get_iflink(const struct net_device *dev)
819{
820 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
821 return dev->netdev_ops->ndo_get_iflink(dev);
822
823 return dev->ifindex;
824}
825EXPORT_SYMBOL(dev_get_iflink);
826
827
828
829
830
831
832
833
834
835
836int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
837{
838 struct ip_tunnel_info *info;
839
840 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
841 return -EINVAL;
842
843 info = skb_tunnel_info_unclone(skb);
844 if (!info)
845 return -ENOMEM;
846 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
847 return -EINVAL;
848
849 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
850}
851EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
852
853static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
854{
855 int k = stack->num_paths++;
856
857 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
858 return NULL;
859
860 return &stack->path[k];
861}
862
863int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
864 struct net_device_path_stack *stack)
865{
866 const struct net_device *last_dev;
867 struct net_device_path_ctx ctx = {
868 .dev = dev,
869 .daddr = daddr,
870 };
871 struct net_device_path *path;
872 int ret = 0;
873
874 stack->num_paths = 0;
875 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
876 last_dev = ctx.dev;
877 path = dev_fwd_path(stack);
878 if (!path)
879 return -1;
880
881 memset(path, 0, sizeof(struct net_device_path));
882 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
883 if (ret < 0)
884 return -1;
885
886 if (WARN_ON_ONCE(last_dev == ctx.dev))
887 return -1;
888 }
889 path = dev_fwd_path(stack);
890 if (!path)
891 return -1;
892 path->type = DEV_PATH_ETHERNET;
893 path->dev = ctx.dev;
894
895 return ret;
896}
897EXPORT_SYMBOL_GPL(dev_fill_forward_path);
898
899
900
901
902
903
904
905
906
907
908
909
910
911struct net_device *__dev_get_by_name(struct net *net, const char *name)
912{
913 struct netdev_name_node *node_name;
914
915 node_name = netdev_name_node_lookup(net, name);
916 return node_name ? node_name->dev : NULL;
917}
918EXPORT_SYMBOL(__dev_get_by_name);
919
920
921
922
923
924
925
926
927
928
929
930
931
932struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
933{
934 struct netdev_name_node *node_name;
935
936 node_name = netdev_name_node_lookup_rcu(net, name);
937 return node_name ? node_name->dev : NULL;
938}
939EXPORT_SYMBOL(dev_get_by_name_rcu);
940
941
942
943
944
945
946
947
948
949
950
951
952
953struct net_device *dev_get_by_name(struct net *net, const char *name)
954{
955 struct net_device *dev;
956
957 rcu_read_lock();
958 dev = dev_get_by_name_rcu(net, name);
959 if (dev)
960 dev_hold(dev);
961 rcu_read_unlock();
962 return dev;
963}
964EXPORT_SYMBOL(dev_get_by_name);
965
966
967
968
969
970
971
972
973
974
975
976
977
978struct net_device *__dev_get_by_index(struct net *net, int ifindex)
979{
980 struct net_device *dev;
981 struct hlist_head *head = dev_index_hash(net, ifindex);
982
983 hlist_for_each_entry(dev, head, index_hlist)
984 if (dev->ifindex == ifindex)
985 return dev;
986
987 return NULL;
988}
989EXPORT_SYMBOL(__dev_get_by_index);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
1003{
1004 struct net_device *dev;
1005 struct hlist_head *head = dev_index_hash(net, ifindex);
1006
1007 hlist_for_each_entry_rcu(dev, head, index_hlist)
1008 if (dev->ifindex == ifindex)
1009 return dev;
1010
1011 return NULL;
1012}
1013EXPORT_SYMBOL(dev_get_by_index_rcu);
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027struct net_device *dev_get_by_index(struct net *net, int ifindex)
1028{
1029 struct net_device *dev;
1030
1031 rcu_read_lock();
1032 dev = dev_get_by_index_rcu(net, ifindex);
1033 if (dev)
1034 dev_hold(dev);
1035 rcu_read_unlock();
1036 return dev;
1037}
1038EXPORT_SYMBOL(dev_get_by_index);
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050struct net_device *dev_get_by_napi_id(unsigned int napi_id)
1051{
1052 struct napi_struct *napi;
1053
1054 WARN_ON_ONCE(!rcu_read_lock_held());
1055
1056 if (napi_id < MIN_NAPI_ID)
1057 return NULL;
1058
1059 napi = napi_by_id(napi_id);
1060
1061 return napi ? napi->dev : NULL;
1062}
1063EXPORT_SYMBOL(dev_get_by_napi_id);
1064
1065
1066
1067
1068
1069
1070
1071int netdev_get_name(struct net *net, char *name, int ifindex)
1072{
1073 struct net_device *dev;
1074 int ret;
1075
1076 down_read(&devnet_rename_sem);
1077 rcu_read_lock();
1078
1079 dev = dev_get_by_index_rcu(net, ifindex);
1080 if (!dev) {
1081 ret = -ENODEV;
1082 goto out;
1083 }
1084
1085 strcpy(name, dev->name);
1086
1087 ret = 0;
1088out:
1089 rcu_read_unlock();
1090 up_read(&devnet_rename_sem);
1091 return ret;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1109 const char *ha)
1110{
1111 struct net_device *dev;
1112
1113 for_each_netdev_rcu(net, dev)
1114 if (dev->type == type &&
1115 !memcmp(dev->dev_addr, ha, dev->addr_len))
1116 return dev;
1117
1118 return NULL;
1119}
1120EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
1121
1122struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
1123{
1124 struct net_device *dev, *ret = NULL;
1125
1126 rcu_read_lock();
1127 for_each_netdev_rcu(net, dev)
1128 if (dev->type == type) {
1129 dev_hold(dev);
1130 ret = dev;
1131 break;
1132 }
1133 rcu_read_unlock();
1134 return ret;
1135}
1136EXPORT_SYMBOL(dev_getfirstbyhwtype);
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1150 unsigned short mask)
1151{
1152 struct net_device *dev, *ret;
1153
1154 ASSERT_RTNL();
1155
1156 ret = NULL;
1157 for_each_netdev(net, dev) {
1158 if (((dev->flags ^ if_flags) & mask) == 0) {
1159 ret = dev;
1160 break;
1161 }
1162 }
1163 return ret;
1164}
1165EXPORT_SYMBOL(__dev_get_by_flags);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175bool dev_valid_name(const char *name)
1176{
1177 if (*name == '\0')
1178 return false;
1179 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1180 return false;
1181 if (!strcmp(name, ".") || !strcmp(name, ".."))
1182 return false;
1183
1184 while (*name) {
1185 if (*name == '/' || *name == ':' || isspace(*name))
1186 return false;
1187 name++;
1188 }
1189 return true;
1190}
1191EXPORT_SYMBOL(dev_valid_name);
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1209{
1210 int i = 0;
1211 const char *p;
1212 const int max_netdevices = 8*PAGE_SIZE;
1213 unsigned long *inuse;
1214 struct net_device *d;
1215
1216 if (!dev_valid_name(name))
1217 return -EINVAL;
1218
1219 p = strchr(name, '%');
1220 if (p) {
1221
1222
1223
1224
1225
1226 if (p[1] != 'd' || strchr(p + 2, '%'))
1227 return -EINVAL;
1228
1229
1230 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1231 if (!inuse)
1232 return -ENOMEM;
1233
1234 for_each_netdev(net, d) {
1235 struct netdev_name_node *name_node;
1236 list_for_each_entry(name_node, &d->name_node->list, list) {
1237 if (!sscanf(name_node->name, name, &i))
1238 continue;
1239 if (i < 0 || i >= max_netdevices)
1240 continue;
1241
1242
1243 snprintf(buf, IFNAMSIZ, name, i);
1244 if (!strncmp(buf, name_node->name, IFNAMSIZ))
1245 set_bit(i, inuse);
1246 }
1247 if (!sscanf(d->name, name, &i))
1248 continue;
1249 if (i < 0 || i >= max_netdevices)
1250 continue;
1251
1252
1253 snprintf(buf, IFNAMSIZ, name, i);
1254 if (!strncmp(buf, d->name, IFNAMSIZ))
1255 set_bit(i, inuse);
1256 }
1257
1258 i = find_first_zero_bit(inuse, max_netdevices);
1259 free_page((unsigned long) inuse);
1260 }
1261
1262 snprintf(buf, IFNAMSIZ, name, i);
1263 if (!__dev_get_by_name(net, buf))
1264 return i;
1265
1266
1267
1268
1269
1270 return -ENFILE;
1271}
1272
1273static int dev_alloc_name_ns(struct net *net,
1274 struct net_device *dev,
1275 const char *name)
1276{
1277 char buf[IFNAMSIZ];
1278 int ret;
1279
1280 BUG_ON(!net);
1281 ret = __dev_alloc_name(net, name, buf);
1282 if (ret >= 0)
1283 strlcpy(dev->name, buf, IFNAMSIZ);
1284 return ret;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301int dev_alloc_name(struct net_device *dev, const char *name)
1302{
1303 return dev_alloc_name_ns(dev_net(dev), dev, name);
1304}
1305EXPORT_SYMBOL(dev_alloc_name);
1306
1307static int dev_get_valid_name(struct net *net, struct net_device *dev,
1308 const char *name)
1309{
1310 BUG_ON(!net);
1311
1312 if (!dev_valid_name(name))
1313 return -EINVAL;
1314
1315 if (strchr(name, '%'))
1316 return dev_alloc_name_ns(net, dev, name);
1317 else if (__dev_get_by_name(net, name))
1318 return -EEXIST;
1319 else if (dev->name != name)
1320 strlcpy(dev->name, name, IFNAMSIZ);
1321
1322 return 0;
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333int dev_change_name(struct net_device *dev, const char *newname)
1334{
1335 unsigned char old_assign_type;
1336 char oldname[IFNAMSIZ];
1337 int err = 0;
1338 int ret;
1339 struct net *net;
1340
1341 ASSERT_RTNL();
1342 BUG_ON(!dev_net(dev));
1343
1344 net = dev_net(dev);
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 if (dev->flags & IFF_UP &&
1359 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1360 return -EBUSY;
1361
1362 down_write(&devnet_rename_sem);
1363
1364 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1365 up_write(&devnet_rename_sem);
1366 return 0;
1367 }
1368
1369 memcpy(oldname, dev->name, IFNAMSIZ);
1370
1371 err = dev_get_valid_name(net, dev, newname);
1372 if (err < 0) {
1373 up_write(&devnet_rename_sem);
1374 return err;
1375 }
1376
1377 if (oldname[0] && !strchr(oldname, '%'))
1378 netdev_info(dev, "renamed from %s\n", oldname);
1379
1380 old_assign_type = dev->name_assign_type;
1381 dev->name_assign_type = NET_NAME_RENAMED;
1382
1383rollback:
1384 ret = device_rename(&dev->dev, dev->name);
1385 if (ret) {
1386 memcpy(dev->name, oldname, IFNAMSIZ);
1387 dev->name_assign_type = old_assign_type;
1388 up_write(&devnet_rename_sem);
1389 return ret;
1390 }
1391
1392 up_write(&devnet_rename_sem);
1393
1394 netdev_adjacent_rename_links(dev, oldname);
1395
1396 write_lock_bh(&dev_base_lock);
1397 netdev_name_node_del(dev->name_node);
1398 write_unlock_bh(&dev_base_lock);
1399
1400 synchronize_rcu();
1401
1402 write_lock_bh(&dev_base_lock);
1403 netdev_name_node_add(net, dev->name_node);
1404 write_unlock_bh(&dev_base_lock);
1405
1406 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1407 ret = notifier_to_errno(ret);
1408
1409 if (ret) {
1410
1411 if (err >= 0) {
1412 err = ret;
1413 down_write(&devnet_rename_sem);
1414 memcpy(dev->name, oldname, IFNAMSIZ);
1415 memcpy(oldname, newname, IFNAMSIZ);
1416 dev->name_assign_type = old_assign_type;
1417 old_assign_type = NET_NAME_RENAMED;
1418 goto rollback;
1419 } else {
1420 pr_err("%s: name change rollback failed: %d\n",
1421 dev->name, ret);
1422 }
1423 }
1424
1425 return err;
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1437{
1438 struct dev_ifalias *new_alias = NULL;
1439
1440 if (len >= IFALIASZ)
1441 return -EINVAL;
1442
1443 if (len) {
1444 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1445 if (!new_alias)
1446 return -ENOMEM;
1447
1448 memcpy(new_alias->ifalias, alias, len);
1449 new_alias->ifalias[len] = 0;
1450 }
1451
1452 mutex_lock(&ifalias_mutex);
1453 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1454 mutex_is_locked(&ifalias_mutex));
1455 mutex_unlock(&ifalias_mutex);
1456
1457 if (new_alias)
1458 kfree_rcu(new_alias, rcuhead);
1459
1460 return len;
1461}
1462EXPORT_SYMBOL(dev_set_alias);
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1474{
1475 const struct dev_ifalias *alias;
1476 int ret = 0;
1477
1478 rcu_read_lock();
1479 alias = rcu_dereference(dev->ifalias);
1480 if (alias)
1481 ret = snprintf(name, len, "%s", alias->ifalias);
1482 rcu_read_unlock();
1483
1484 return ret;
1485}
1486
1487
1488
1489
1490
1491
1492
1493void netdev_features_change(struct net_device *dev)
1494{
1495 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1496}
1497EXPORT_SYMBOL(netdev_features_change);
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507void netdev_state_change(struct net_device *dev)
1508{
1509 if (dev->flags & IFF_UP) {
1510 struct netdev_notifier_change_info change_info = {
1511 .info.dev = dev,
1512 };
1513
1514 call_netdevice_notifiers_info(NETDEV_CHANGE,
1515 &change_info.info);
1516 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1517 }
1518}
1519EXPORT_SYMBOL(netdev_state_change);
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532void __netdev_notify_peers(struct net_device *dev)
1533{
1534 ASSERT_RTNL();
1535 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1536 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1537}
1538EXPORT_SYMBOL(__netdev_notify_peers);
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550void netdev_notify_peers(struct net_device *dev)
1551{
1552 rtnl_lock();
1553 __netdev_notify_peers(dev);
1554 rtnl_unlock();
1555}
1556EXPORT_SYMBOL(netdev_notify_peers);
1557
1558static int napi_threaded_poll(void *data);
1559
1560static int napi_kthread_create(struct napi_struct *n)
1561{
1562 int err = 0;
1563
1564
1565
1566
1567
1568 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1569 n->dev->name, n->napi_id);
1570 if (IS_ERR(n->thread)) {
1571 err = PTR_ERR(n->thread);
1572 pr_err("kthread_run failed with err %d\n", err);
1573 n->thread = NULL;
1574 }
1575
1576 return err;
1577}
1578
1579static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1580{
1581 const struct net_device_ops *ops = dev->netdev_ops;
1582 int ret;
1583
1584 ASSERT_RTNL();
1585
1586 if (!netif_device_present(dev)) {
1587
1588 if (dev->dev.parent)
1589 pm_runtime_resume(dev->dev.parent);
1590 if (!netif_device_present(dev))
1591 return -ENODEV;
1592 }
1593
1594
1595
1596
1597
1598 netpoll_poll_disable(dev);
1599
1600 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1601 ret = notifier_to_errno(ret);
1602 if (ret)
1603 return ret;
1604
1605 set_bit(__LINK_STATE_START, &dev->state);
1606
1607 if (ops->ndo_validate_addr)
1608 ret = ops->ndo_validate_addr(dev);
1609
1610 if (!ret && ops->ndo_open)
1611 ret = ops->ndo_open(dev);
1612
1613 netpoll_poll_enable(dev);
1614
1615 if (ret)
1616 clear_bit(__LINK_STATE_START, &dev->state);
1617 else {
1618 dev->flags |= IFF_UP;
1619 dev_set_rx_mode(dev);
1620 dev_activate(dev);
1621 add_device_randomness(dev->dev_addr, dev->addr_len);
1622 }
1623
1624 return ret;
1625}
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1641{
1642 int ret;
1643
1644 if (dev->flags & IFF_UP)
1645 return 0;
1646
1647 ret = __dev_open(dev, extack);
1648 if (ret < 0)
1649 return ret;
1650
1651 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1652 call_netdevice_notifiers(NETDEV_UP, dev);
1653
1654 return ret;
1655}
1656EXPORT_SYMBOL(dev_open);
1657
1658static void __dev_close_many(struct list_head *head)
1659{
1660 struct net_device *dev;
1661
1662 ASSERT_RTNL();
1663 might_sleep();
1664
1665 list_for_each_entry(dev, head, close_list) {
1666
1667 netpoll_poll_disable(dev);
1668
1669 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1670
1671 clear_bit(__LINK_STATE_START, &dev->state);
1672
1673
1674
1675
1676
1677
1678
1679 smp_mb__after_atomic();
1680 }
1681
1682 dev_deactivate_many(head);
1683
1684 list_for_each_entry(dev, head, close_list) {
1685 const struct net_device_ops *ops = dev->netdev_ops;
1686
1687
1688
1689
1690
1691
1692
1693
1694 if (ops->ndo_stop)
1695 ops->ndo_stop(dev);
1696
1697 dev->flags &= ~IFF_UP;
1698 netpoll_poll_enable(dev);
1699 }
1700}
1701
1702static void __dev_close(struct net_device *dev)
1703{
1704 LIST_HEAD(single);
1705
1706 list_add(&dev->close_list, &single);
1707 __dev_close_many(&single);
1708 list_del(&single);
1709}
1710
1711void dev_close_many(struct list_head *head, bool unlink)
1712{
1713 struct net_device *dev, *tmp;
1714
1715
1716 list_for_each_entry_safe(dev, tmp, head, close_list)
1717 if (!(dev->flags & IFF_UP))
1718 list_del_init(&dev->close_list);
1719
1720 __dev_close_many(head);
1721
1722 list_for_each_entry_safe(dev, tmp, head, close_list) {
1723 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1724 call_netdevice_notifiers(NETDEV_DOWN, dev);
1725 if (unlink)
1726 list_del_init(&dev->close_list);
1727 }
1728}
1729EXPORT_SYMBOL(dev_close_many);
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740void dev_close(struct net_device *dev)
1741{
1742 if (dev->flags & IFF_UP) {
1743 LIST_HEAD(single);
1744
1745 list_add(&dev->close_list, &single);
1746 dev_close_many(&single, true);
1747 list_del(&single);
1748 }
1749}
1750EXPORT_SYMBOL(dev_close);
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761void dev_disable_lro(struct net_device *dev)
1762{
1763 struct net_device *lower_dev;
1764 struct list_head *iter;
1765
1766 dev->wanted_features &= ~NETIF_F_LRO;
1767 netdev_update_features(dev);
1768
1769 if (unlikely(dev->features & NETIF_F_LRO))
1770 netdev_WARN(dev, "failed to disable LRO!\n");
1771
1772 netdev_for_each_lower_dev(dev, lower_dev, iter)
1773 dev_disable_lro(lower_dev);
1774}
1775EXPORT_SYMBOL(dev_disable_lro);
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785static void dev_disable_gro_hw(struct net_device *dev)
1786{
1787 dev->wanted_features &= ~NETIF_F_GRO_HW;
1788 netdev_update_features(dev);
1789
1790 if (unlikely(dev->features & NETIF_F_GRO_HW))
1791 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1792}
1793
1794const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1795{
1796#define N(val) \
1797 case NETDEV_##val: \
1798 return "NETDEV_" __stringify(val);
1799 switch (cmd) {
1800 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1801 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1802 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1803 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1804 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1805 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1806 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1807 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1808 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1809 N(PRE_CHANGEADDR)
1810 }
1811#undef N
1812 return "UNKNOWN_NETDEV_EVENT";
1813}
1814EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1815
1816static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1817 struct net_device *dev)
1818{
1819 struct netdev_notifier_info info = {
1820 .dev = dev,
1821 };
1822
1823 return nb->notifier_call(nb, val, &info);
1824}
1825
1826static int call_netdevice_register_notifiers(struct notifier_block *nb,
1827 struct net_device *dev)
1828{
1829 int err;
1830
1831 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1832 err = notifier_to_errno(err);
1833 if (err)
1834 return err;
1835
1836 if (!(dev->flags & IFF_UP))
1837 return 0;
1838
1839 call_netdevice_notifier(nb, NETDEV_UP, dev);
1840 return 0;
1841}
1842
1843static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1844 struct net_device *dev)
1845{
1846 if (dev->flags & IFF_UP) {
1847 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1848 dev);
1849 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1850 }
1851 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1852}
1853
1854static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1855 struct net *net)
1856{
1857 struct net_device *dev;
1858 int err;
1859
1860 for_each_netdev(net, dev) {
1861 err = call_netdevice_register_notifiers(nb, dev);
1862 if (err)
1863 goto rollback;
1864 }
1865 return 0;
1866
1867rollback:
1868 for_each_netdev_continue_reverse(net, dev)
1869 call_netdevice_unregister_notifiers(nb, dev);
1870 return err;
1871}
1872
1873static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1874 struct net *net)
1875{
1876 struct net_device *dev;
1877
1878 for_each_netdev(net, dev)
1879 call_netdevice_unregister_notifiers(nb, dev);
1880}
1881
1882static int dev_boot_phase = 1;
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898int register_netdevice_notifier(struct notifier_block *nb)
1899{
1900 struct net *net;
1901 int err;
1902
1903
1904 down_write(&pernet_ops_rwsem);
1905 rtnl_lock();
1906 err = raw_notifier_chain_register(&netdev_chain, nb);
1907 if (err)
1908 goto unlock;
1909 if (dev_boot_phase)
1910 goto unlock;
1911 for_each_net(net) {
1912 err = call_netdevice_register_net_notifiers(nb, net);
1913 if (err)
1914 goto rollback;
1915 }
1916
1917unlock:
1918 rtnl_unlock();
1919 up_write(&pernet_ops_rwsem);
1920 return err;
1921
1922rollback:
1923 for_each_net_continue_reverse(net)
1924 call_netdevice_unregister_net_notifiers(nb, net);
1925
1926 raw_notifier_chain_unregister(&netdev_chain, nb);
1927 goto unlock;
1928}
1929EXPORT_SYMBOL(register_netdevice_notifier);
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945int unregister_netdevice_notifier(struct notifier_block *nb)
1946{
1947 struct net *net;
1948 int err;
1949
1950
1951 down_write(&pernet_ops_rwsem);
1952 rtnl_lock();
1953 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1954 if (err)
1955 goto unlock;
1956
1957 for_each_net(net)
1958 call_netdevice_unregister_net_notifiers(nb, net);
1959
1960unlock:
1961 rtnl_unlock();
1962 up_write(&pernet_ops_rwsem);
1963 return err;
1964}
1965EXPORT_SYMBOL(unregister_netdevice_notifier);
1966
1967static int __register_netdevice_notifier_net(struct net *net,
1968 struct notifier_block *nb,
1969 bool ignore_call_fail)
1970{
1971 int err;
1972
1973 err = raw_notifier_chain_register(&net->netdev_chain, nb);
1974 if (err)
1975 return err;
1976 if (dev_boot_phase)
1977 return 0;
1978
1979 err = call_netdevice_register_net_notifiers(nb, net);
1980 if (err && !ignore_call_fail)
1981 goto chain_unregister;
1982
1983 return 0;
1984
1985chain_unregister:
1986 raw_notifier_chain_unregister(&net->netdev_chain, nb);
1987 return err;
1988}
1989
1990static int __unregister_netdevice_notifier_net(struct net *net,
1991 struct notifier_block *nb)
1992{
1993 int err;
1994
1995 err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1996 if (err)
1997 return err;
1998
1999 call_netdevice_unregister_net_notifiers(nb, net);
2000 return 0;
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
2019{
2020 int err;
2021
2022 rtnl_lock();
2023 err = __register_netdevice_notifier_net(net, nb, false);
2024 rtnl_unlock();
2025 return err;
2026}
2027EXPORT_SYMBOL(register_netdevice_notifier_net);
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045int unregister_netdevice_notifier_net(struct net *net,
2046 struct notifier_block *nb)
2047{
2048 int err;
2049
2050 rtnl_lock();
2051 err = __unregister_netdevice_notifier_net(net, nb);
2052 rtnl_unlock();
2053 return err;
2054}
2055EXPORT_SYMBOL(unregister_netdevice_notifier_net);
2056
2057int register_netdevice_notifier_dev_net(struct net_device *dev,
2058 struct notifier_block *nb,
2059 struct netdev_net_notifier *nn)
2060{
2061 int err;
2062
2063 rtnl_lock();
2064 err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
2065 if (!err) {
2066 nn->nb = nb;
2067 list_add(&nn->list, &dev->net_notifier_list);
2068 }
2069 rtnl_unlock();
2070 return err;
2071}
2072EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
2073
2074int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2075 struct notifier_block *nb,
2076 struct netdev_net_notifier *nn)
2077{
2078 int err;
2079
2080 rtnl_lock();
2081 list_del(&nn->list);
2082 err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
2083 rtnl_unlock();
2084 return err;
2085}
2086EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
2087
2088static void move_netdevice_notifiers_dev_net(struct net_device *dev,
2089 struct net *net)
2090{
2091 struct netdev_net_notifier *nn;
2092
2093 list_for_each_entry(nn, &dev->net_notifier_list, list) {
2094 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
2095 __register_netdevice_notifier_net(net, nn->nb, true);
2096 }
2097}
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108static int call_netdevice_notifiers_info(unsigned long val,
2109 struct netdev_notifier_info *info)
2110{
2111 struct net *net = dev_net(info->dev);
2112 int ret;
2113
2114 ASSERT_RTNL();
2115
2116
2117
2118
2119
2120 ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
2121 if (ret & NOTIFY_STOP_MASK)
2122 return ret;
2123 return raw_notifier_call_chain(&netdev_chain, val, info);
2124}
2125
2126static int call_netdevice_notifiers_extack(unsigned long val,
2127 struct net_device *dev,
2128 struct netlink_ext_ack *extack)
2129{
2130 struct netdev_notifier_info info = {
2131 .dev = dev,
2132 .extack = extack,
2133 };
2134
2135 return call_netdevice_notifiers_info(val, &info);
2136}
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2148{
2149 return call_netdevice_notifiers_extack(val, dev, NULL);
2150}
2151EXPORT_SYMBOL(call_netdevice_notifiers);
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162static int call_netdevice_notifiers_mtu(unsigned long val,
2163 struct net_device *dev, u32 arg)
2164{
2165 struct netdev_notifier_info_ext info = {
2166 .info.dev = dev,
2167 .ext.mtu = arg,
2168 };
2169
2170 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2171
2172 return call_netdevice_notifiers_info(val, &info.info);
2173}
2174
2175#ifdef CONFIG_NET_INGRESS
2176static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2177
2178void net_inc_ingress_queue(void)
2179{
2180 static_branch_inc(&ingress_needed_key);
2181}
2182EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2183
2184void net_dec_ingress_queue(void)
2185{
2186 static_branch_dec(&ingress_needed_key);
2187}
2188EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2189#endif
2190
2191#ifdef CONFIG_NET_EGRESS
2192static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2193
2194void net_inc_egress_queue(void)
2195{
2196 static_branch_inc(&egress_needed_key);
2197}
2198EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2199
2200void net_dec_egress_queue(void)
2201{
2202 static_branch_dec(&egress_needed_key);
2203}
2204EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2205#endif
2206
2207static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2208#ifdef CONFIG_JUMP_LABEL
2209static atomic_t netstamp_needed_deferred;
2210static atomic_t netstamp_wanted;
2211static void netstamp_clear(struct work_struct *work)
2212{
2213 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2214 int wanted;
2215
2216 wanted = atomic_add_return(deferred, &netstamp_wanted);
2217 if (wanted > 0)
2218 static_branch_enable(&netstamp_needed_key);
2219 else
2220 static_branch_disable(&netstamp_needed_key);
2221}
2222static DECLARE_WORK(netstamp_work, netstamp_clear);
2223#endif
2224
2225void net_enable_timestamp(void)
2226{
2227#ifdef CONFIG_JUMP_LABEL
2228 int wanted;
2229
2230 while (1) {
2231 wanted = atomic_read(&netstamp_wanted);
2232 if (wanted <= 0)
2233 break;
2234 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2235 return;
2236 }
2237 atomic_inc(&netstamp_needed_deferred);
2238 schedule_work(&netstamp_work);
2239#else
2240 static_branch_inc(&netstamp_needed_key);
2241#endif
2242}
2243EXPORT_SYMBOL(net_enable_timestamp);
2244
2245void net_disable_timestamp(void)
2246{
2247#ifdef CONFIG_JUMP_LABEL
2248 int wanted;
2249
2250 while (1) {
2251 wanted = atomic_read(&netstamp_wanted);
2252 if (wanted <= 1)
2253 break;
2254 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2255 return;
2256 }
2257 atomic_dec(&netstamp_needed_deferred);
2258 schedule_work(&netstamp_work);
2259#else
2260 static_branch_dec(&netstamp_needed_key);
2261#endif
2262}
2263EXPORT_SYMBOL(net_disable_timestamp);
2264
2265static inline void net_timestamp_set(struct sk_buff *skb)
2266{
2267 skb->tstamp = 0;
2268 if (static_branch_unlikely(&netstamp_needed_key))
2269 __net_timestamp(skb);
2270}
2271
2272#define net_timestamp_check(COND, SKB) \
2273 if (static_branch_unlikely(&netstamp_needed_key)) { \
2274 if ((COND) && !(SKB)->tstamp) \
2275 __net_timestamp(SKB); \
2276 } \
2277
2278bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2279{
2280 return __is_skb_forwardable(dev, skb, true);
2281}
2282EXPORT_SYMBOL_GPL(is_skb_forwardable);
2283
2284static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2285 bool check_mtu)
2286{
2287 int ret = ____dev_forward_skb(dev, skb, check_mtu);
2288
2289 if (likely(!ret)) {
2290 skb->protocol = eth_type_trans(skb, dev);
2291 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2292 }
2293
2294 return ret;
2295}
2296
2297int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2298{
2299 return __dev_forward_skb2(dev, skb, true);
2300}
2301EXPORT_SYMBOL_GPL(__dev_forward_skb);
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2322{
2323 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2324}
2325EXPORT_SYMBOL_GPL(dev_forward_skb);
2326
2327int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2328{
2329 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2330}
2331
2332static inline int deliver_skb(struct sk_buff *skb,
2333 struct packet_type *pt_prev,
2334 struct net_device *orig_dev)
2335{
2336 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2337 return -ENOMEM;
2338 refcount_inc(&skb->users);
2339 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2340}
2341
2342static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2343 struct packet_type **pt,
2344 struct net_device *orig_dev,
2345 __be16 type,
2346 struct list_head *ptype_list)
2347{
2348 struct packet_type *ptype, *pt_prev = *pt;
2349
2350 list_for_each_entry_rcu(ptype, ptype_list, list) {
2351 if (ptype->type != type)
2352 continue;
2353 if (pt_prev)
2354 deliver_skb(skb, pt_prev, orig_dev);
2355 pt_prev = ptype;
2356 }
2357 *pt = pt_prev;
2358}
2359
2360static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2361{
2362 if (!ptype->af_packet_priv || !skb->sk)
2363 return false;
2364
2365 if (ptype->id_match)
2366 return ptype->id_match(ptype, skb->sk);
2367 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2368 return true;
2369
2370 return false;
2371}
2372
2373
2374
2375
2376
2377
2378bool dev_nit_active(struct net_device *dev)
2379{
2380 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2381}
2382EXPORT_SYMBOL_GPL(dev_nit_active);
2383
2384
2385
2386
2387
2388
2389void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2390{
2391 struct packet_type *ptype;
2392 struct sk_buff *skb2 = NULL;
2393 struct packet_type *pt_prev = NULL;
2394 struct list_head *ptype_list = &ptype_all;
2395
2396 rcu_read_lock();
2397again:
2398 list_for_each_entry_rcu(ptype, ptype_list, list) {
2399 if (ptype->ignore_outgoing)
2400 continue;
2401
2402
2403
2404
2405 if (skb_loop_sk(ptype, skb))
2406 continue;
2407
2408 if (pt_prev) {
2409 deliver_skb(skb2, pt_prev, skb->dev);
2410 pt_prev = ptype;
2411 continue;
2412 }
2413
2414
2415 skb2 = skb_clone(skb, GFP_ATOMIC);
2416 if (!skb2)
2417 goto out_unlock;
2418
2419 net_timestamp_set(skb2);
2420
2421
2422
2423
2424
2425 skb_reset_mac_header(skb2);
2426
2427 if (skb_network_header(skb2) < skb2->data ||
2428 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2429 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2430 ntohs(skb2->protocol),
2431 dev->name);
2432 skb_reset_network_header(skb2);
2433 }
2434
2435 skb2->transport_header = skb2->network_header;
2436 skb2->pkt_type = PACKET_OUTGOING;
2437 pt_prev = ptype;
2438 }
2439
2440 if (ptype_list == &ptype_all) {
2441 ptype_list = &dev->ptype_all;
2442 goto again;
2443 }
2444out_unlock:
2445 if (pt_prev) {
2446 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2447 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2448 else
2449 kfree_skb(skb2);
2450 }
2451 rcu_read_unlock();
2452}
2453EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2469{
2470 int i;
2471 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2472
2473
2474 if (tc->offset + tc->count > txq) {
2475 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2476 dev->num_tc = 0;
2477 return;
2478 }
2479
2480
2481 for (i = 1; i < TC_BITMASK + 1; i++) {
2482 int q = netdev_get_prio_tc_map(dev, i);
2483
2484 tc = &dev->tc_to_txq[q];
2485 if (tc->offset + tc->count > txq) {
2486 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2487 i, q);
2488 netdev_set_prio_tc_map(dev, i, 0);
2489 }
2490 }
2491}
2492
2493int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2494{
2495 if (dev->num_tc) {
2496 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2497 int i;
2498
2499
2500 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2501 if ((txq - tc->offset) < tc->count)
2502 return i;
2503 }
2504
2505
2506 return -1;
2507 }
2508
2509 return 0;
2510}
2511EXPORT_SYMBOL(netdev_txq_to_tc);
2512
2513#ifdef CONFIG_XPS
2514static struct static_key xps_needed __read_mostly;
2515static struct static_key xps_rxqs_needed __read_mostly;
2516static DEFINE_MUTEX(xps_map_mutex);
2517#define xmap_dereference(P) \
2518 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2519
2520static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2521 struct xps_dev_maps *old_maps, int tci, u16 index)
2522{
2523 struct xps_map *map = NULL;
2524 int pos;
2525
2526 if (dev_maps)
2527 map = xmap_dereference(dev_maps->attr_map[tci]);
2528 if (!map)
2529 return false;
2530
2531 for (pos = map->len; pos--;) {
2532 if (map->queues[pos] != index)
2533 continue;
2534
2535 if (map->len > 1) {
2536 map->queues[pos] = map->queues[--map->len];
2537 break;
2538 }
2539
2540 if (old_maps)
2541 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2542 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2543 kfree_rcu(map, rcu);
2544 return false;
2545 }
2546
2547 return true;
2548}
2549
2550static bool remove_xps_queue_cpu(struct net_device *dev,
2551 struct xps_dev_maps *dev_maps,
2552 int cpu, u16 offset, u16 count)
2553{
2554 int num_tc = dev_maps->num_tc;
2555 bool active = false;
2556 int tci;
2557
2558 for (tci = cpu * num_tc; num_tc--; tci++) {
2559 int i, j;
2560
2561 for (i = count, j = offset; i--; j++) {
2562 if (!remove_xps_queue(dev_maps, NULL, tci, j))
2563 break;
2564 }
2565
2566 active |= i < 0;
2567 }
2568
2569 return active;
2570}
2571
2572static void reset_xps_maps(struct net_device *dev,
2573 struct xps_dev_maps *dev_maps,
2574 enum xps_map_type type)
2575{
2576 static_key_slow_dec_cpuslocked(&xps_needed);
2577 if (type == XPS_RXQS)
2578 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2579
2580 RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2581
2582 kfree_rcu(dev_maps, rcu);
2583}
2584
2585static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2586 u16 offset, u16 count)
2587{
2588 struct xps_dev_maps *dev_maps;
2589 bool active = false;
2590 int i, j;
2591
2592 dev_maps = xmap_dereference(dev->xps_maps[type]);
2593 if (!dev_maps)
2594 return;
2595
2596 for (j = 0; j < dev_maps->nr_ids; j++)
2597 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2598 if (!active)
2599 reset_xps_maps(dev, dev_maps, type);
2600
2601 if (type == XPS_CPUS) {
2602 for (i = offset + (count - 1); count--; i--)
2603 netdev_queue_numa_node_write(
2604 netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2605 }
2606}
2607
2608static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2609 u16 count)
2610{
2611 if (!static_key_false(&xps_needed))
2612 return;
2613
2614 cpus_read_lock();
2615 mutex_lock(&xps_map_mutex);
2616
2617 if (static_key_false(&xps_rxqs_needed))
2618 clean_xps_maps(dev, XPS_RXQS, offset, count);
2619
2620 clean_xps_maps(dev, XPS_CPUS, offset, count);
2621
2622 mutex_unlock(&xps_map_mutex);
2623 cpus_read_unlock();
2624}
2625
2626static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2627{
2628 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2629}
2630
2631static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2632 u16 index, bool is_rxqs_map)
2633{
2634 struct xps_map *new_map;
2635 int alloc_len = XPS_MIN_MAP_ALLOC;
2636 int i, pos;
2637
2638 for (pos = 0; map && pos < map->len; pos++) {
2639 if (map->queues[pos] != index)
2640 continue;
2641 return map;
2642 }
2643
2644
2645 if (map) {
2646 if (pos < map->alloc_len)
2647 return map;
2648
2649 alloc_len = map->alloc_len * 2;
2650 }
2651
2652
2653
2654
2655 if (is_rxqs_map)
2656 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2657 else
2658 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2659 cpu_to_node(attr_index));
2660 if (!new_map)
2661 return NULL;
2662
2663 for (i = 0; i < pos; i++)
2664 new_map->queues[i] = map->queues[i];
2665 new_map->alloc_len = alloc_len;
2666 new_map->len = pos;
2667
2668 return new_map;
2669}
2670
2671
2672static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2673 struct xps_dev_maps *new_dev_maps, int index,
2674 int tc, bool skip_tc)
2675{
2676 int i, tci = index * dev_maps->num_tc;
2677 struct xps_map *map;
2678
2679
2680 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2681 if (i == tc && skip_tc)
2682 continue;
2683
2684
2685 map = xmap_dereference(dev_maps->attr_map[tci]);
2686 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2687 }
2688}
2689
2690
2691int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2692 u16 index, enum xps_map_type type)
2693{
2694 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2695 const unsigned long *online_mask = NULL;
2696 bool active = false, copy = false;
2697 int i, j, tci, numa_node_id = -2;
2698 int maps_sz, num_tc = 1, tc = 0;
2699 struct xps_map *map, *new_map;
2700 unsigned int nr_ids;
2701
2702 if (dev->num_tc) {
2703
2704 num_tc = dev->num_tc;
2705 if (num_tc < 0)
2706 return -EINVAL;
2707
2708
2709 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2710
2711 tc = netdev_txq_to_tc(dev, index);
2712 if (tc < 0)
2713 return -EINVAL;
2714 }
2715
2716 mutex_lock(&xps_map_mutex);
2717
2718 dev_maps = xmap_dereference(dev->xps_maps[type]);
2719 if (type == XPS_RXQS) {
2720 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2721 nr_ids = dev->num_rx_queues;
2722 } else {
2723 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2724 if (num_possible_cpus() > 1)
2725 online_mask = cpumask_bits(cpu_online_mask);
2726 nr_ids = nr_cpu_ids;
2727 }
2728
2729 if (maps_sz < L1_CACHE_BYTES)
2730 maps_sz = L1_CACHE_BYTES;
2731
2732
2733
2734
2735
2736
2737 if (dev_maps &&
2738 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2739 copy = true;
2740
2741
2742 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2743 j < nr_ids;) {
2744 if (!new_dev_maps) {
2745 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2746 if (!new_dev_maps) {
2747 mutex_unlock(&xps_map_mutex);
2748 return -ENOMEM;
2749 }
2750
2751 new_dev_maps->nr_ids = nr_ids;
2752 new_dev_maps->num_tc = num_tc;
2753 }
2754
2755 tci = j * num_tc + tc;
2756 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2757
2758 map = expand_xps_map(map, j, index, type == XPS_RXQS);
2759 if (!map)
2760 goto error;
2761
2762 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2763 }
2764
2765 if (!new_dev_maps)
2766 goto out_no_new_maps;
2767
2768 if (!dev_maps) {
2769
2770 static_key_slow_inc_cpuslocked(&xps_needed);
2771 if (type == XPS_RXQS)
2772 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2773 }
2774
2775 for (j = 0; j < nr_ids; j++) {
2776 bool skip_tc = false;
2777
2778 tci = j * num_tc + tc;
2779 if (netif_attr_test_mask(j, mask, nr_ids) &&
2780 netif_attr_test_online(j, online_mask, nr_ids)) {
2781
2782 int pos = 0;
2783
2784 skip_tc = true;
2785
2786 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2787 while ((pos < map->len) && (map->queues[pos] != index))
2788 pos++;
2789
2790 if (pos == map->len)
2791 map->queues[map->len++] = index;
2792#ifdef CONFIG_NUMA
2793 if (type == XPS_CPUS) {
2794 if (numa_node_id == -2)
2795 numa_node_id = cpu_to_node(j);
2796 else if (numa_node_id != cpu_to_node(j))
2797 numa_node_id = -1;
2798 }
2799#endif
2800 }
2801
2802 if (copy)
2803 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2804 skip_tc);
2805 }
2806
2807 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2808
2809
2810 if (!dev_maps)
2811 goto out_no_old_maps;
2812
2813 for (j = 0; j < dev_maps->nr_ids; j++) {
2814 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2815 map = xmap_dereference(dev_maps->attr_map[tci]);
2816 if (!map)
2817 continue;
2818
2819 if (copy) {
2820 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2821 if (map == new_map)
2822 continue;
2823 }
2824
2825 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2826 kfree_rcu(map, rcu);
2827 }
2828 }
2829
2830 old_dev_maps = dev_maps;
2831
2832out_no_old_maps:
2833 dev_maps = new_dev_maps;
2834 active = true;
2835
2836out_no_new_maps:
2837 if (type == XPS_CPUS)
2838
2839 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2840 (numa_node_id >= 0) ?
2841 numa_node_id : NUMA_NO_NODE);
2842
2843 if (!dev_maps)
2844 goto out_no_maps;
2845
2846
2847 for (j = 0; j < dev_maps->nr_ids; j++) {
2848 tci = j * dev_maps->num_tc;
2849
2850 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2851 if (i == tc &&
2852 netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2853 netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2854 continue;
2855
2856 active |= remove_xps_queue(dev_maps,
2857 copy ? old_dev_maps : NULL,
2858 tci, index);
2859 }
2860 }
2861
2862 if (old_dev_maps)
2863 kfree_rcu(old_dev_maps, rcu);
2864
2865
2866 if (!active)
2867 reset_xps_maps(dev, dev_maps, type);
2868
2869out_no_maps:
2870 mutex_unlock(&xps_map_mutex);
2871
2872 return 0;
2873error:
2874
2875 for (j = 0; j < nr_ids; j++) {
2876 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2877 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2878 map = copy ?
2879 xmap_dereference(dev_maps->attr_map[tci]) :
2880 NULL;
2881 if (new_map && new_map != map)
2882 kfree(new_map);
2883 }
2884 }
2885
2886 mutex_unlock(&xps_map_mutex);
2887
2888 kfree(new_dev_maps);
2889 return -ENOMEM;
2890}
2891EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2892
2893int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2894 u16 index)
2895{
2896 int ret;
2897
2898 cpus_read_lock();
2899 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2900 cpus_read_unlock();
2901
2902 return ret;
2903}
2904EXPORT_SYMBOL(netif_set_xps_queue);
2905
2906#endif
2907static void netdev_unbind_all_sb_channels(struct net_device *dev)
2908{
2909 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2910
2911
2912 while (txq-- != &dev->_tx[0]) {
2913 if (txq->sb_dev)
2914 netdev_unbind_sb_channel(dev, txq->sb_dev);
2915 }
2916}
2917
2918void netdev_reset_tc(struct net_device *dev)
2919{
2920#ifdef CONFIG_XPS
2921 netif_reset_xps_queues_gt(dev, 0);
2922#endif
2923 netdev_unbind_all_sb_channels(dev);
2924
2925
2926 dev->num_tc = 0;
2927 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2928 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2929}
2930EXPORT_SYMBOL(netdev_reset_tc);
2931
2932int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2933{
2934 if (tc >= dev->num_tc)
2935 return -EINVAL;
2936
2937#ifdef CONFIG_XPS
2938 netif_reset_xps_queues(dev, offset, count);
2939#endif
2940 dev->tc_to_txq[tc].count = count;
2941 dev->tc_to_txq[tc].offset = offset;
2942 return 0;
2943}
2944EXPORT_SYMBOL(netdev_set_tc_queue);
2945
2946int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2947{
2948 if (num_tc > TC_MAX_QUEUE)
2949 return -EINVAL;
2950
2951#ifdef CONFIG_XPS
2952 netif_reset_xps_queues_gt(dev, 0);
2953#endif
2954 netdev_unbind_all_sb_channels(dev);
2955
2956 dev->num_tc = num_tc;
2957 return 0;
2958}
2959EXPORT_SYMBOL(netdev_set_num_tc);
2960
2961void netdev_unbind_sb_channel(struct net_device *dev,
2962 struct net_device *sb_dev)
2963{
2964 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2965
2966#ifdef CONFIG_XPS
2967 netif_reset_xps_queues_gt(sb_dev, 0);
2968#endif
2969 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2970 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2971
2972 while (txq-- != &dev->_tx[0]) {
2973 if (txq->sb_dev == sb_dev)
2974 txq->sb_dev = NULL;
2975 }
2976}
2977EXPORT_SYMBOL(netdev_unbind_sb_channel);
2978
2979int netdev_bind_sb_channel_queue(struct net_device *dev,
2980 struct net_device *sb_dev,
2981 u8 tc, u16 count, u16 offset)
2982{
2983
2984 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2985 return -EINVAL;
2986
2987
2988 if ((offset + count) > dev->real_num_tx_queues)
2989 return -EINVAL;
2990
2991
2992 sb_dev->tc_to_txq[tc].count = count;
2993 sb_dev->tc_to_txq[tc].offset = offset;
2994
2995
2996
2997
2998 while (count--)
2999 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
3000
3001 return 0;
3002}
3003EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
3004
3005int netdev_set_sb_channel(struct net_device *dev, u16 channel)
3006{
3007
3008 if (netif_is_multiqueue(dev))
3009 return -ENODEV;
3010
3011
3012
3013
3014
3015
3016 if (channel > S16_MAX)
3017 return -EINVAL;
3018
3019 dev->num_tc = -channel;
3020
3021 return 0;
3022}
3023EXPORT_SYMBOL(netdev_set_sb_channel);
3024
3025
3026
3027
3028
3029int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
3030{
3031 bool disabling;
3032 int rc;
3033
3034 disabling = txq < dev->real_num_tx_queues;
3035
3036 if (txq < 1 || txq > dev->num_tx_queues)
3037 return -EINVAL;
3038
3039 if (dev->reg_state == NETREG_REGISTERED ||
3040 dev->reg_state == NETREG_UNREGISTERING) {
3041 ASSERT_RTNL();
3042
3043 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
3044 txq);
3045 if (rc)
3046 return rc;
3047
3048 if (dev->num_tc)
3049 netif_setup_tc(dev, txq);
3050
3051 dev->real_num_tx_queues = txq;
3052
3053 if (disabling) {
3054 synchronize_net();
3055 qdisc_reset_all_tx_gt(dev, txq);
3056#ifdef CONFIG_XPS
3057 netif_reset_xps_queues_gt(dev, txq);
3058#endif
3059 }
3060 } else {
3061 dev->real_num_tx_queues = txq;
3062 }
3063
3064 return 0;
3065}
3066EXPORT_SYMBOL(netif_set_real_num_tx_queues);
3067
3068#ifdef CONFIG_SYSFS
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
3080{
3081 int rc;
3082
3083 if (rxq < 1 || rxq > dev->num_rx_queues)
3084 return -EINVAL;
3085
3086 if (dev->reg_state == NETREG_REGISTERED) {
3087 ASSERT_RTNL();
3088
3089 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
3090 rxq);
3091 if (rc)
3092 return rc;
3093 }
3094
3095 dev->real_num_rx_queues = rxq;
3096 return 0;
3097}
3098EXPORT_SYMBOL(netif_set_real_num_rx_queues);
3099#endif
3100
3101
3102
3103
3104
3105
3106
3107int netif_get_num_default_rss_queues(void)
3108{
3109 return is_kdump_kernel() ?
3110 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
3111}
3112EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3113
3114static void __netif_reschedule(struct Qdisc *q)
3115{
3116 struct softnet_data *sd;
3117 unsigned long flags;
3118
3119 local_irq_save(flags);
3120 sd = this_cpu_ptr(&softnet_data);
3121 q->next_sched = NULL;
3122 *sd->output_queue_tailp = q;
3123 sd->output_queue_tailp = &q->next_sched;
3124 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3125 local_irq_restore(flags);
3126}
3127
3128void __netif_schedule(struct Qdisc *q)
3129{
3130 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3131 __netif_reschedule(q);
3132}
3133EXPORT_SYMBOL(__netif_schedule);
3134
3135struct dev_kfree_skb_cb {
3136 enum skb_free_reason reason;
3137};
3138
3139static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3140{
3141 return (struct dev_kfree_skb_cb *)skb->cb;
3142}
3143
3144void netif_schedule_queue(struct netdev_queue *txq)
3145{
3146 rcu_read_lock();
3147 if (!netif_xmit_stopped(txq)) {
3148 struct Qdisc *q = rcu_dereference(txq->qdisc);
3149
3150 __netif_schedule(q);
3151 }
3152 rcu_read_unlock();
3153}
3154EXPORT_SYMBOL(netif_schedule_queue);
3155
3156void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3157{
3158 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3159 struct Qdisc *q;
3160
3161 rcu_read_lock();
3162 q = rcu_dereference(dev_queue->qdisc);
3163 __netif_schedule(q);
3164 rcu_read_unlock();
3165 }
3166}
3167EXPORT_SYMBOL(netif_tx_wake_queue);
3168
3169void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
3170{
3171 unsigned long flags;
3172
3173 if (unlikely(!skb))
3174 return;
3175
3176 if (likely(refcount_read(&skb->users) == 1)) {
3177 smp_rmb();
3178 refcount_set(&skb->users, 0);
3179 } else if (likely(!refcount_dec_and_test(&skb->users))) {
3180 return;
3181 }
3182 get_kfree_skb_cb(skb)->reason = reason;
3183 local_irq_save(flags);
3184 skb->next = __this_cpu_read(softnet_data.completion_queue);
3185 __this_cpu_write(softnet_data.completion_queue, skb);
3186 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3187 local_irq_restore(flags);
3188}
3189EXPORT_SYMBOL(__dev_kfree_skb_irq);
3190
3191void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
3192{
3193 if (in_irq() || irqs_disabled())
3194 __dev_kfree_skb_irq(skb, reason);
3195 else
3196 dev_kfree_skb(skb);
3197}
3198EXPORT_SYMBOL(__dev_kfree_skb_any);
3199
3200
3201
3202
3203
3204
3205
3206
3207void netif_device_detach(struct net_device *dev)
3208{
3209 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3210 netif_running(dev)) {
3211 netif_tx_stop_all_queues(dev);
3212 }
3213}
3214EXPORT_SYMBOL(netif_device_detach);
3215
3216
3217
3218
3219
3220
3221
3222void netif_device_attach(struct net_device *dev)
3223{
3224 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3225 netif_running(dev)) {
3226 netif_tx_wake_all_queues(dev);
3227 __netdev_watchdog_up(dev);
3228 }
3229}
3230EXPORT_SYMBOL(netif_device_attach);
3231
3232
3233
3234
3235
3236static u16 skb_tx_hash(const struct net_device *dev,
3237 const struct net_device *sb_dev,
3238 struct sk_buff *skb)
3239{
3240 u32 hash;
3241 u16 qoffset = 0;
3242 u16 qcount = dev->real_num_tx_queues;
3243
3244 if (dev->num_tc) {
3245 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3246
3247 qoffset = sb_dev->tc_to_txq[tc].offset;
3248 qcount = sb_dev->tc_to_txq[tc].count;
3249 }
3250
3251 if (skb_rx_queue_recorded(skb)) {
3252 hash = skb_get_rx_queue(skb);
3253 if (hash >= qoffset)
3254 hash -= qoffset;
3255 while (unlikely(hash >= qcount))
3256 hash -= qcount;
3257 return hash + qoffset;
3258 }
3259
3260 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3261}
3262
3263static void skb_warn_bad_offload(const struct sk_buff *skb)
3264{
3265 static const netdev_features_t null_features;
3266 struct net_device *dev = skb->dev;
3267 const char *name = "";
3268
3269 if (!net_ratelimit())
3270 return;
3271
3272 if (dev) {
3273 if (dev->dev.parent)
3274 name = dev_driver_string(dev->dev.parent);
3275 else
3276 name = netdev_name(dev);
3277 }
3278 skb_dump(KERN_WARNING, skb, false);
3279 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3280 name, dev ? &dev->features : &null_features,
3281 skb->sk ? &skb->sk->sk_route_caps : &null_features);
3282}
3283
3284
3285
3286
3287
3288int skb_checksum_help(struct sk_buff *skb)
3289{
3290 __wsum csum;
3291 int ret = 0, offset;
3292
3293 if (skb->ip_summed == CHECKSUM_COMPLETE)
3294 goto out_set_summed;
3295
3296 if (unlikely(skb_is_gso(skb))) {
3297 skb_warn_bad_offload(skb);
3298 return -EINVAL;
3299 }
3300
3301
3302
3303
3304 if (skb_has_shared_frag(skb)) {
3305 ret = __skb_linearize(skb);
3306 if (ret)
3307 goto out;
3308 }
3309
3310 offset = skb_checksum_start_offset(skb);
3311 BUG_ON(offset >= skb_headlen(skb));
3312 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3313
3314 offset += skb->csum_offset;
3315 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3316
3317 ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3318 if (ret)
3319 goto out;
3320
3321 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3322out_set_summed:
3323 skb->ip_summed = CHECKSUM_NONE;
3324out:
3325 return ret;
3326}
3327EXPORT_SYMBOL(skb_checksum_help);
3328
3329int skb_crc32c_csum_help(struct sk_buff *skb)
3330{
3331 __le32 crc32c_csum;
3332 int ret = 0, offset, start;
3333
3334 if (skb->ip_summed != CHECKSUM_PARTIAL)
3335 goto out;
3336
3337 if (unlikely(skb_is_gso(skb)))
3338 goto out;
3339
3340
3341
3342
3343 if (unlikely(skb_has_shared_frag(skb))) {
3344 ret = __skb_linearize(skb);
3345 if (ret)
3346 goto out;
3347 }
3348 start = skb_checksum_start_offset(skb);
3349 offset = start + offsetof(struct sctphdr, checksum);
3350 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3351 ret = -EINVAL;
3352 goto out;
3353 }
3354
3355 ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3356 if (ret)
3357 goto out;
3358
3359 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3360 skb->len - start, ~(__u32)0,
3361 crc32c_csum_stub));
3362 *(__le32 *)(skb->data + offset) = crc32c_csum;
3363 skb->ip_summed = CHECKSUM_NONE;
3364 skb->csum_not_inet = 0;
3365out:
3366 return ret;
3367}
3368
3369__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3370{
3371 __be16 type = skb->protocol;
3372
3373
3374 if (type == htons(ETH_P_TEB)) {
3375 struct ethhdr *eth;
3376
3377 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3378 return 0;
3379
3380 eth = (struct ethhdr *)skb->data;
3381 type = eth->h_proto;
3382 }
3383
3384 return __vlan_get_protocol(skb, type, depth);
3385}
3386
3387
3388
3389
3390
3391
3392struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3393 netdev_features_t features)
3394{
3395 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3396 struct packet_offload *ptype;
3397 int vlan_depth = skb->mac_len;
3398 __be16 type = skb_network_protocol(skb, &vlan_depth);
3399
3400 if (unlikely(!type))
3401 return ERR_PTR(-EINVAL);
3402
3403 __skb_pull(skb, vlan_depth);
3404
3405 rcu_read_lock();
3406 list_for_each_entry_rcu(ptype, &offload_base, list) {
3407 if (ptype->type == type && ptype->callbacks.gso_segment) {
3408 segs = ptype->callbacks.gso_segment(skb, features);
3409 break;
3410 }
3411 }
3412 rcu_read_unlock();
3413
3414 __skb_push(skb, skb->data - skb_mac_header(skb));
3415
3416 return segs;
3417}
3418EXPORT_SYMBOL(skb_mac_gso_segment);
3419
3420
3421
3422
3423static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3424{
3425 if (tx_path)
3426 return skb->ip_summed != CHECKSUM_PARTIAL &&
3427 skb->ip_summed != CHECKSUM_UNNECESSARY;
3428
3429 return skb->ip_summed == CHECKSUM_NONE;
3430}
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3446 netdev_features_t features, bool tx_path)
3447{
3448 struct sk_buff *segs;
3449
3450 if (unlikely(skb_needs_check(skb, tx_path))) {
3451 int err;
3452
3453
3454 err = skb_cow_head(skb, 0);
3455 if (err < 0)
3456 return ERR_PTR(err);
3457 }
3458
3459
3460
3461
3462
3463 if (features & NETIF_F_GSO_PARTIAL) {
3464 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3465 struct net_device *dev = skb->dev;
3466
3467 partial_features |= dev->features & dev->gso_partial_features;
3468 if (!skb_gso_ok(skb, features | partial_features))
3469 features &= ~NETIF_F_GSO_PARTIAL;
3470 }
3471
3472 BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
3473 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3474
3475 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3476 SKB_GSO_CB(skb)->encap_level = 0;
3477
3478 skb_reset_mac_header(skb);
3479 skb_reset_mac_len(skb);
3480
3481 segs = skb_mac_gso_segment(skb, features);
3482
3483 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3484 skb_warn_bad_offload(skb);
3485
3486 return segs;
3487}
3488EXPORT_SYMBOL(__skb_gso_segment);
3489
3490
3491#ifdef CONFIG_BUG
3492static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3493{
3494 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3495 skb_dump(KERN_ERR, skb, true);
3496 dump_stack();
3497}
3498
3499void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3500{
3501 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3502}
3503EXPORT_SYMBOL(netdev_rx_csum_fault);
3504#endif
3505
3506
3507static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3508{
3509#ifdef CONFIG_HIGHMEM
3510 int i;
3511
3512 if (!(dev->features & NETIF_F_HIGHDMA)) {
3513 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3514 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3515
3516 if (PageHighMem(skb_frag_page(frag)))
3517 return 1;
3518 }
3519 }
3520#endif
3521 return 0;
3522}
3523
3524
3525
3526
3527#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3528static netdev_features_t net_mpls_features(struct sk_buff *skb,
3529 netdev_features_t features,
3530 __be16 type)
3531{
3532 if (eth_p_mpls(type))
3533 features &= skb->dev->mpls_features;
3534
3535 return features;
3536}
3537#else
3538static netdev_features_t net_mpls_features(struct sk_buff *skb,
3539 netdev_features_t features,
3540 __be16 type)
3541{
3542 return features;
3543}
3544#endif
3545
3546static netdev_features_t harmonize_features(struct sk_buff *skb,
3547 netdev_features_t features)
3548{
3549 __be16 type;
3550
3551 type = skb_network_protocol(skb, NULL);
3552 features = net_mpls_features(skb, features, type);
3553
3554 if (skb->ip_summed != CHECKSUM_NONE &&
3555 !can_checksum_protocol(features, type)) {
3556 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3557 }
3558 if (illegal_highdma(skb->dev, skb))
3559 features &= ~NETIF_F_SG;
3560
3561 return features;
3562}
3563
3564netdev_features_t passthru_features_check(struct sk_buff *skb,
3565 struct net_device *dev,
3566 netdev_features_t features)
3567{
3568 return features;
3569}
3570EXPORT_SYMBOL(passthru_features_check);
3571
3572static netdev_features_t dflt_features_check(struct sk_buff *skb,
3573 struct net_device *dev,
3574 netdev_features_t features)
3575{
3576 return vlan_features_check(skb, features);
3577}
3578
3579static netdev_features_t gso_features_check(const struct sk_buff *skb,
3580 struct net_device *dev,
3581 netdev_features_t features)
3582{
3583 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3584
3585 if (gso_segs > dev->gso_max_segs)
3586 return features & ~NETIF_F_GSO_MASK;
3587
3588 if (!skb_shinfo(skb)->gso_type) {
3589 skb_warn_bad_offload(skb);
3590 return features & ~NETIF_F_GSO_MASK;
3591 }
3592
3593
3594
3595
3596
3597
3598
3599 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3600 features &= ~dev->gso_partial_features;
3601
3602
3603
3604
3605 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3606 struct iphdr *iph = skb->encapsulation ?
3607 inner_ip_hdr(skb) : ip_hdr(skb);
3608
3609 if (!(iph->frag_off & htons(IP_DF)))
3610 features &= ~NETIF_F_TSO_MANGLEID;
3611 }
3612
3613 return features;
3614}
3615
3616netdev_features_t netif_skb_features(struct sk_buff *skb)
3617{
3618 struct net_device *dev = skb->dev;
3619 netdev_features_t features = dev->features;
3620
3621 if (skb_is_gso(skb))
3622 features = gso_features_check(skb, dev, features);
3623
3624
3625
3626
3627
3628 if (skb->encapsulation)
3629 features &= dev->hw_enc_features;
3630
3631 if (skb_vlan_tagged(skb))
3632 features = netdev_intersect_features(features,
3633 dev->vlan_features |
3634 NETIF_F_HW_VLAN_CTAG_TX |
3635 NETIF_F_HW_VLAN_STAG_TX);
3636
3637 if (dev->netdev_ops->ndo_features_check)
3638 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3639 features);
3640 else
3641 features &= dflt_features_check(skb, dev, features);
3642
3643 return harmonize_features(skb, features);
3644}
3645EXPORT_SYMBOL(netif_skb_features);
3646
3647static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3648 struct netdev_queue *txq, bool more)
3649{
3650 unsigned int len;
3651 int rc;
3652
3653 if (dev_nit_active(dev))
3654 dev_queue_xmit_nit(skb, dev);
3655
3656 len = skb->len;
3657 PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies);
3658 trace_net_dev_start_xmit(skb, dev);
3659 rc = netdev_start_xmit(skb, dev, txq, more);
3660 trace_net_dev_xmit(skb, rc, dev, len);
3661
3662 return rc;
3663}
3664
3665struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3666 struct netdev_queue *txq, int *ret)
3667{
3668 struct sk_buff *skb = first;
3669 int rc = NETDEV_TX_OK;
3670
3671 while (skb) {
3672 struct sk_buff *next = skb->next;
3673
3674 skb_mark_not_on_list(skb);
3675 rc = xmit_one(skb, dev, txq, next != NULL);
3676 if (unlikely(!dev_xmit_complete(rc))) {
3677 skb->next = next;
3678 goto out;
3679 }
3680
3681 skb = next;
3682 if (netif_tx_queue_stopped(txq) && skb) {
3683 rc = NETDEV_TX_BUSY;
3684 break;
3685 }
3686 }
3687
3688out:
3689 *ret = rc;
3690 return skb;
3691}
3692
3693static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3694 netdev_features_t features)
3695{
3696 if (skb_vlan_tag_present(skb) &&
3697 !vlan_hw_offload_capable(features, skb->vlan_proto))
3698 skb = __vlan_hwaccel_push_inside(skb);
3699 return skb;
3700}
3701
3702int skb_csum_hwoffload_help(struct sk_buff *skb,
3703 const netdev_features_t features)
3704{
3705 if (unlikely(skb_csum_is_sctp(skb)))
3706 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3707 skb_crc32c_csum_help(skb);
3708
3709 if (features & NETIF_F_HW_CSUM)
3710 return 0;
3711
3712 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3713 switch (skb->csum_offset) {
3714 case offsetof(struct tcphdr, check):
3715 case offsetof(struct udphdr, check):
3716 return 0;
3717 }
3718 }
3719
3720 return skb_checksum_help(skb);
3721}
3722EXPORT_SYMBOL(skb_csum_hwoffload_help);
3723
3724static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3725{
3726 netdev_features_t features;
3727
3728 features = netif_skb_features(skb);
3729 skb = validate_xmit_vlan(skb, features);
3730 if (unlikely(!skb))
3731 goto out_null;
3732
3733 skb = sk_validate_xmit_skb(skb, dev);
3734 if (unlikely(!skb))
3735 goto out_null;
3736
3737 if (netif_needs_gso(skb, features)) {
3738 struct sk_buff *segs;
3739
3740 segs = skb_gso_segment(skb, features);
3741 if (IS_ERR(segs)) {
3742 goto out_kfree_skb;
3743 } else if (segs) {
3744 consume_skb(skb);
3745 skb = segs;
3746 }
3747 } else {
3748 if (skb_needs_linearize(skb, features) &&
3749 __skb_linearize(skb))
3750 goto out_kfree_skb;
3751
3752
3753
3754
3755
3756 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3757 if (skb->encapsulation)
3758 skb_set_inner_transport_header(skb,
3759 skb_checksum_start_offset(skb));
3760 else
3761 skb_set_transport_header(skb,
3762 skb_checksum_start_offset(skb));
3763 if (skb_csum_hwoffload_help(skb, features))
3764 goto out_kfree_skb;
3765 }
3766 }
3767
3768 skb = validate_xmit_xfrm(skb, features, again);
3769
3770 return skb;
3771
3772out_kfree_skb:
3773 kfree_skb(skb);
3774out_null:
3775 atomic_long_inc(&dev->tx_dropped);
3776 return NULL;
3777}
3778
3779struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3780{
3781 struct sk_buff *next, *head = NULL, *tail;
3782
3783 for (; skb != NULL; skb = next) {
3784 next = skb->next;
3785 skb_mark_not_on_list(skb);
3786
3787
3788 skb->prev = skb;
3789
3790 skb = validate_xmit_skb(skb, dev, again);
3791 if (!skb)
3792 continue;
3793
3794 if (!head)
3795 head = skb;
3796 else
3797 tail->next = skb;
3798
3799
3800
3801 tail = skb->prev;
3802 }
3803 return head;
3804}
3805EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3806
3807static void qdisc_pkt_len_init(struct sk_buff *skb)
3808{
3809 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3810
3811 qdisc_skb_cb(skb)->pkt_len = skb->len;
3812
3813
3814
3815
3816 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3817 unsigned int hdr_len;
3818 u16 gso_segs = shinfo->gso_segs;
3819
3820
3821 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3822
3823
3824 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3825 const struct tcphdr *th;
3826 struct tcphdr _tcphdr;
3827
3828 th = skb_header_pointer(skb, skb_transport_offset(skb),
3829 sizeof(_tcphdr), &_tcphdr);
3830 if (likely(th))
3831 hdr_len += __tcp_hdrlen(th);
3832 } else {
3833 struct udphdr _udphdr;
3834
3835 if (skb_header_pointer(skb, skb_transport_offset(skb),
3836 sizeof(_udphdr), &_udphdr))
3837 hdr_len += sizeof(struct udphdr);
3838 }
3839
3840 if (shinfo->gso_type & SKB_GSO_DODGY)
3841 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3842 shinfo->gso_size);
3843
3844 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3845 }
3846}
3847
3848static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3849 struct sk_buff **to_free,
3850 struct netdev_queue *txq)
3851{
3852 int rc;
3853
3854 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3855 if (rc == NET_XMIT_SUCCESS)
3856 trace_qdisc_enqueue(q, txq, skb);
3857 return rc;
3858}
3859
3860static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3861 struct net_device *dev,
3862 struct netdev_queue *txq)
3863{
3864 spinlock_t *root_lock = qdisc_lock(q);
3865 struct sk_buff *to_free = NULL;
3866 bool contended;
3867 int rc;
3868
3869 qdisc_calculate_pkt_len(skb, q);
3870
3871 if (q->flags & TCQ_F_NOLOCK) {
3872 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3873 qdisc_run_begin(q)) {
3874
3875
3876
3877 if (unlikely(!nolock_qdisc_is_empty(q))) {
3878 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3879 __qdisc_run(q);
3880 qdisc_run_end(q);
3881
3882 goto no_lock_out;
3883 }
3884
3885 qdisc_bstats_cpu_update(q, skb);
3886 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3887 !nolock_qdisc_is_empty(q))
3888 __qdisc_run(q);
3889
3890 qdisc_run_end(q);
3891 return NET_XMIT_SUCCESS;
3892 }
3893
3894 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3895 qdisc_run(q);
3896
3897no_lock_out:
3898 if (unlikely(to_free))
3899 kfree_skb_list(to_free);
3900 return rc;
3901 }
3902
3903
3904
3905
3906
3907
3908
3909 contended = qdisc_is_running(q);
3910 if (unlikely(contended))
3911 spin_lock(&q->busylock);
3912
3913 spin_lock(root_lock);
3914 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3915 __qdisc_drop(skb, &to_free);
3916 rc = NET_XMIT_DROP;
3917 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3918 qdisc_run_begin(q)) {
3919
3920
3921
3922
3923
3924
3925 qdisc_bstats_update(q, skb);
3926
3927 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3928 if (unlikely(contended)) {
3929 spin_unlock(&q->busylock);
3930 contended = false;
3931 }
3932 __qdisc_run(q);
3933 }
3934
3935 qdisc_run_end(q);
3936 rc = NET_XMIT_SUCCESS;
3937 } else {
3938 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3939 if (qdisc_run_begin(q)) {
3940 if (unlikely(contended)) {
3941 spin_unlock(&q->busylock);
3942 contended = false;
3943 }
3944 __qdisc_run(q);
3945 qdisc_run_end(q);
3946 }
3947 }
3948 spin_unlock(root_lock);
3949 if (unlikely(to_free))
3950 kfree_skb_list(to_free);
3951 if (unlikely(contended))
3952 spin_unlock(&q->busylock);
3953 return rc;
3954}
3955
3956#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3957static void skb_update_prio(struct sk_buff *skb)
3958{
3959 const struct netprio_map *map;
3960 const struct sock *sk;
3961 unsigned int prioidx;
3962
3963 if (skb->priority)
3964 return;
3965 map = rcu_dereference_bh(skb->dev->priomap);
3966 if (!map)
3967 return;
3968 sk = skb_to_full_sk(skb);
3969 if (!sk)
3970 return;
3971
3972 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3973
3974 if (prioidx < map->priomap_len)
3975 skb->priority = map->priomap[prioidx];
3976}
3977#else
3978#define skb_update_prio(skb)
3979#endif
3980
3981
3982
3983
3984
3985
3986
3987int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3988{
3989 skb_reset_mac_header(skb);
3990 __skb_pull(skb, skb_network_offset(skb));
3991 skb->pkt_type = PACKET_LOOPBACK;
3992 skb->ip_summed = CHECKSUM_UNNECESSARY;
3993 WARN_ON(!skb_dst(skb));
3994 skb_dst_force(skb);
3995 netif_rx_ni(skb);
3996 return 0;
3997}
3998EXPORT_SYMBOL(dev_loopback_xmit);
3999
4000#ifdef CONFIG_NET_EGRESS
4001static struct sk_buff *
4002sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4003{
4004 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
4005 struct tcf_result cl_res;
4006
4007 if (!miniq)
4008 return skb;
4009
4010
4011 qdisc_skb_cb(skb)->mru = 0;
4012 qdisc_skb_cb(skb)->post_ct = false;
4013 mini_qdisc_bstats_cpu_update(miniq, skb);
4014
4015 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
4016 case TC_ACT_OK:
4017 case TC_ACT_RECLASSIFY:
4018 skb->tc_index = TC_H_MIN(cl_res.classid);
4019 break;
4020 case TC_ACT_SHOT:
4021 mini_qdisc_qstats_cpu_drop(miniq);
4022 *ret = NET_XMIT_DROP;
4023 kfree_skb(skb);
4024 return NULL;
4025 case TC_ACT_STOLEN:
4026 case TC_ACT_QUEUED:
4027 case TC_ACT_TRAP:
4028 *ret = NET_XMIT_SUCCESS;
4029 consume_skb(skb);
4030 return NULL;
4031 case TC_ACT_REDIRECT:
4032
4033 skb_do_redirect(skb);
4034 *ret = NET_XMIT_SUCCESS;
4035 return NULL;
4036 default:
4037 break;
4038 }
4039
4040 return skb;
4041}
4042#endif
4043
4044#ifdef CONFIG_XPS
4045static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4046 struct xps_dev_maps *dev_maps, unsigned int tci)
4047{
4048 int tc = netdev_get_prio_tc_map(dev, skb->priority);
4049 struct xps_map *map;
4050 int queue_index = -1;
4051
4052 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4053 return queue_index;
4054
4055 tci *= dev_maps->num_tc;
4056 tci += tc;
4057
4058 map = rcu_dereference(dev_maps->attr_map[tci]);
4059 if (map) {
4060 if (map->len == 1)
4061 queue_index = map->queues[0];
4062 else
4063 queue_index = map->queues[reciprocal_scale(
4064 skb_get_hash(skb), map->len)];
4065 if (unlikely(queue_index >= dev->real_num_tx_queues))
4066 queue_index = -1;
4067 }
4068 return queue_index;
4069}
4070#endif
4071
4072static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4073 struct sk_buff *skb)
4074{
4075#ifdef CONFIG_XPS
4076 struct xps_dev_maps *dev_maps;
4077 struct sock *sk = skb->sk;
4078 int queue_index = -1;
4079
4080 if (!static_key_false(&xps_needed))
4081 return -1;
4082
4083 rcu_read_lock();
4084 if (!static_key_false(&xps_rxqs_needed))
4085 goto get_cpus_map;
4086
4087 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4088 if (dev_maps) {
4089 int tci = sk_rx_queue_get(sk);
4090
4091 if (tci >= 0)
4092 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4093 tci);
4094 }
4095
4096get_cpus_map:
4097 if (queue_index < 0) {
4098 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4099 if (dev_maps) {
4100 unsigned int tci = skb->sender_cpu - 1;
4101
4102 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4103 tci);
4104 }
4105 }
4106 rcu_read_unlock();
4107
4108 return queue_index;
4109#else
4110 return -1;
4111#endif
4112}
4113
4114u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4115 struct net_device *sb_dev)
4116{
4117 return 0;
4118}
4119EXPORT_SYMBOL(dev_pick_tx_zero);
4120
4121u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4122 struct net_device *sb_dev)
4123{
4124 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4125}
4126EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4127
4128u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4129 struct net_device *sb_dev)
4130{
4131 struct sock *sk = skb->sk;
4132 int queue_index = sk_tx_queue_get(sk);
4133
4134 sb_dev = sb_dev ? : dev;
4135
4136 if (queue_index < 0 || skb->ooo_okay ||
4137 queue_index >= dev->real_num_tx_queues) {
4138 int new_index = get_xps_queue(dev, sb_dev, skb);
4139
4140 if (new_index < 0)
4141 new_index = skb_tx_hash(dev, sb_dev, skb);
4142
4143 if (queue_index != new_index && sk &&
4144 sk_fullsock(sk) &&
4145 rcu_access_pointer(sk->sk_dst_cache))
4146 sk_tx_queue_set(sk, new_index);
4147
4148 queue_index = new_index;
4149 }
4150
4151 return queue_index;
4152}
4153EXPORT_SYMBOL(netdev_pick_tx);
4154
4155struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4156 struct sk_buff *skb,
4157 struct net_device *sb_dev)
4158{
4159 int queue_index = 0;
4160
4161#ifdef CONFIG_XPS
4162 u32 sender_cpu = skb->sender_cpu - 1;
4163
4164 if (sender_cpu >= (u32)NR_CPUS)
4165 skb->sender_cpu = raw_smp_processor_id() + 1;
4166#endif
4167
4168 if (dev->real_num_tx_queues != 1) {
4169 const struct net_device_ops *ops = dev->netdev_ops;
4170
4171 if (ops->ndo_select_queue)
4172 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4173 else
4174 queue_index = netdev_pick_tx(dev, skb, sb_dev);
4175
4176 queue_index = netdev_cap_txqueue(dev, queue_index);
4177 }
4178
4179 skb_set_queue_mapping(skb, queue_index);
4180 return netdev_get_tx_queue(dev, queue_index);
4181}
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4210{
4211 struct net_device *dev = skb->dev;
4212 struct netdev_queue *txq;
4213 struct Qdisc *q;
4214 int rc = -ENOMEM;
4215 bool again = false;
4216
4217 skb_reset_mac_header(skb);
4218
4219 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4220 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4221
4222
4223
4224
4225 rcu_read_lock_bh();
4226
4227 skb_update_prio(skb);
4228
4229 qdisc_pkt_len_init(skb);
4230#ifdef CONFIG_NET_CLS_ACT
4231 skb->tc_at_ingress = 0;
4232# ifdef CONFIG_NET_EGRESS
4233 if (static_branch_unlikely(&egress_needed_key)) {
4234 skb = sch_handle_egress(skb, &rc, dev);
4235 if (!skb)
4236 goto out;
4237 }
4238# endif
4239#endif
4240
4241
4242
4243 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4244 skb_dst_drop(skb);
4245 else
4246 skb_dst_force(skb);
4247
4248 txq = netdev_core_pick_tx(dev, skb, sb_dev);
4249 q = rcu_dereference_bh(txq->qdisc);
4250
4251 trace_net_dev_queue(skb);
4252 if (q->enqueue) {
4253 rc = __dev_xmit_skb(skb, q, dev, txq);
4254 goto out;
4255 }
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269 if (dev->flags & IFF_UP) {
4270 int cpu = smp_processor_id();
4271
4272 if (txq->xmit_lock_owner != cpu) {
4273 if (dev_xmit_recursion())
4274 goto recursion_alert;
4275
4276 skb = validate_xmit_skb(skb, dev, &again);
4277 if (!skb)
4278 goto out;
4279
4280 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4281 HARD_TX_LOCK(dev, txq, cpu);
4282
4283 if (!netif_xmit_stopped(txq)) {
4284 dev_xmit_recursion_inc();
4285 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4286 dev_xmit_recursion_dec();
4287 if (dev_xmit_complete(rc)) {
4288 HARD_TX_UNLOCK(dev, txq);
4289 goto out;
4290 }
4291 }
4292 HARD_TX_UNLOCK(dev, txq);
4293 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4294 dev->name);
4295 } else {
4296
4297
4298
4299recursion_alert:
4300 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4301 dev->name);
4302 }
4303 }
4304
4305 rc = -ENETDOWN;
4306 rcu_read_unlock_bh();
4307
4308 atomic_long_inc(&dev->tx_dropped);
4309 kfree_skb_list(skb);
4310 return rc;
4311out:
4312 rcu_read_unlock_bh();
4313 return rc;
4314}
4315
4316int dev_queue_xmit(struct sk_buff *skb)
4317{
4318 return __dev_queue_xmit(skb, NULL);
4319}
4320EXPORT_SYMBOL(dev_queue_xmit);
4321
4322int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
4323{
4324 return __dev_queue_xmit(skb, sb_dev);
4325}
4326EXPORT_SYMBOL(dev_queue_xmit_accel);
4327
4328int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4329{
4330 struct net_device *dev = skb->dev;
4331 struct sk_buff *orig_skb = skb;
4332 struct netdev_queue *txq;
4333 int ret = NETDEV_TX_BUSY;
4334 bool again = false;
4335
4336 if (unlikely(!netif_running(dev) ||
4337 !netif_carrier_ok(dev)))
4338 goto drop;
4339
4340 skb = validate_xmit_skb_list(skb, dev, &again);
4341 if (skb != orig_skb)
4342 goto drop;
4343
4344 skb_set_queue_mapping(skb, queue_id);
4345 txq = skb_get_tx_queue(dev, skb);
4346 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4347
4348 local_bh_disable();
4349
4350 dev_xmit_recursion_inc();
4351 HARD_TX_LOCK(dev, txq, smp_processor_id());
4352 if (!netif_xmit_frozen_or_drv_stopped(txq))
4353 ret = netdev_start_xmit(skb, dev, txq, false);
4354 HARD_TX_UNLOCK(dev, txq);
4355 dev_xmit_recursion_dec();
4356
4357 local_bh_enable();
4358 return ret;
4359drop:
4360 atomic_long_inc(&dev->tx_dropped);
4361 kfree_skb_list(skb);
4362 return NET_XMIT_DROP;
4363}
4364EXPORT_SYMBOL(__dev_direct_xmit);
4365
4366
4367
4368
4369
4370int netdev_max_backlog __read_mostly = 1000;
4371EXPORT_SYMBOL(netdev_max_backlog);
4372
4373int netdev_tstamp_prequeue __read_mostly = 1;
4374int netdev_budget __read_mostly = 300;
4375
4376unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4377int weight_p __read_mostly = 64;
4378int dev_weight_rx_bias __read_mostly = 1;
4379int dev_weight_tx_bias __read_mostly = 1;
4380int dev_rx_weight __read_mostly = 64;
4381int dev_tx_weight __read_mostly = 64;
4382
4383int gro_normal_batch __read_mostly = 8;
4384
4385
4386static inline void ____napi_schedule(struct softnet_data *sd,
4387 struct napi_struct *napi)
4388{
4389 struct task_struct *thread;
4390
4391 if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4392
4393
4394
4395
4396
4397
4398 thread = READ_ONCE(napi->thread);
4399 if (thread) {
4400
4401
4402
4403
4404
4405 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4406 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4407 wake_up_process(thread);
4408 return;
4409 }
4410 }
4411
4412 list_add_tail(&napi->poll_list, &sd->poll_list);
4413 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4414}
4415
4416#ifdef CONFIG_RPS
4417
4418
4419struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4420EXPORT_SYMBOL(rps_sock_flow_table);
4421u32 rps_cpu_mask __read_mostly;
4422EXPORT_SYMBOL(rps_cpu_mask);
4423
4424struct static_key_false rps_needed __read_mostly;
4425EXPORT_SYMBOL(rps_needed);
4426struct static_key_false rfs_needed __read_mostly;
4427EXPORT_SYMBOL(rfs_needed);
4428
4429static struct rps_dev_flow *
4430set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4431 struct rps_dev_flow *rflow, u16 next_cpu)
4432{
4433 if (next_cpu < nr_cpu_ids) {
4434#ifdef CONFIG_RFS_ACCEL
4435 struct netdev_rx_queue *rxqueue;
4436 struct rps_dev_flow_table *flow_table;
4437 struct rps_dev_flow *old_rflow;
4438 u32 flow_id;
4439 u16 rxq_index;
4440 int rc;
4441
4442
4443 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4444 !(dev->features & NETIF_F_NTUPLE))
4445 goto out;
4446 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4447 if (rxq_index == skb_get_rx_queue(skb))
4448 goto out;
4449
4450 rxqueue = dev->_rx + rxq_index;
4451 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4452 if (!flow_table)
4453 goto out;
4454 flow_id = skb_get_hash(skb) & flow_table->mask;
4455 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4456 rxq_index, flow_id);
4457 if (rc < 0)
4458 goto out;
4459 old_rflow = rflow;
4460 rflow = &flow_table->flows[flow_id];
4461 rflow->filter = rc;
4462 if (old_rflow->filter == rflow->filter)
4463 old_rflow->filter = RPS_NO_FILTER;
4464 out:
4465#endif
4466 rflow->last_qtail =
4467 per_cpu(softnet_data, next_cpu).input_queue_head;
4468 }
4469
4470 rflow->cpu = next_cpu;
4471 return rflow;
4472}
4473
4474
4475
4476
4477
4478
4479static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4480 struct rps_dev_flow **rflowp)
4481{
4482 const struct rps_sock_flow_table *sock_flow_table;
4483 struct netdev_rx_queue *rxqueue = dev->_rx;
4484 struct rps_dev_flow_table *flow_table;
4485 struct rps_map *map;
4486 int cpu = -1;
4487 u32 tcpu;
4488 u32 hash;
4489
4490 if (skb_rx_queue_recorded(skb)) {
4491 u16 index = skb_get_rx_queue(skb);
4492
4493 if (unlikely(index >= dev->real_num_rx_queues)) {
4494 WARN_ONCE(dev->real_num_rx_queues > 1,
4495 "%s received packet on queue %u, but number "
4496 "of RX queues is %u\n",
4497 dev->name, index, dev->real_num_rx_queues);
4498 goto done;
4499 }
4500 rxqueue += index;
4501 }
4502
4503
4504
4505 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4506 map = rcu_dereference(rxqueue->rps_map);
4507 if (!flow_table && !map)
4508 goto done;
4509
4510 skb_reset_network_header(skb);
4511 hash = skb_get_hash(skb);
4512 if (!hash)
4513 goto done;
4514
4515 sock_flow_table = rcu_dereference(rps_sock_flow_table);
4516 if (flow_table && sock_flow_table) {
4517 struct rps_dev_flow *rflow;
4518 u32 next_cpu;
4519 u32 ident;
4520
4521
4522 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4523 if ((ident ^ hash) & ~rps_cpu_mask)
4524 goto try_rps;
4525
4526 next_cpu = ident & rps_cpu_mask;
4527
4528
4529
4530
4531 rflow = &flow_table->flows[hash & flow_table->mask];
4532 tcpu = rflow->cpu;
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545 if (unlikely(tcpu != next_cpu) &&
4546 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4547 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4548 rflow->last_qtail)) >= 0)) {
4549 tcpu = next_cpu;
4550 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4551 }
4552
4553 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4554 *rflowp = rflow;
4555 cpu = tcpu;
4556 goto done;
4557 }
4558 }
4559
4560try_rps:
4561
4562 if (map) {
4563 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4564 if (cpu_online(tcpu)) {
4565 cpu = tcpu;
4566 goto done;
4567 }
4568 }
4569
4570done:
4571 return cpu;
4572}
4573
4574#ifdef CONFIG_RFS_ACCEL
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4588 u32 flow_id, u16 filter_id)
4589{
4590 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4591 struct rps_dev_flow_table *flow_table;
4592 struct rps_dev_flow *rflow;
4593 bool expire = true;
4594 unsigned int cpu;
4595
4596 rcu_read_lock();
4597 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4598 if (flow_table && flow_id <= flow_table->mask) {
4599 rflow = &flow_table->flows[flow_id];
4600 cpu = READ_ONCE(rflow->cpu);
4601 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4602 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4603 rflow->last_qtail) <
4604 (int)(10 * flow_table->mask)))
4605 expire = false;
4606 }
4607 rcu_read_unlock();
4608 return expire;
4609}
4610EXPORT_SYMBOL(rps_may_expire_flow);
4611
4612#endif
4613
4614
4615static void rps_trigger_softirq(void *data)
4616{
4617 struct softnet_data *sd = data;
4618
4619 ____napi_schedule(sd, &sd->backlog);
4620 sd->received_rps++;
4621}
4622
4623#endif
4624
4625
4626
4627
4628
4629
4630static int rps_ipi_queued(struct softnet_data *sd)
4631{
4632#ifdef CONFIG_RPS
4633 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4634
4635 if (sd != mysd) {
4636 sd->rps_ipi_next = mysd->rps_ipi_list;
4637 mysd->rps_ipi_list = sd;
4638
4639 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4640 return 1;
4641 }
4642#endif
4643 return 0;
4644}
4645
4646#ifdef CONFIG_NET_FLOW_LIMIT
4647int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4648#endif
4649
4650static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4651{
4652#ifdef CONFIG_NET_FLOW_LIMIT
4653 struct sd_flow_limit *fl;
4654 struct softnet_data *sd;
4655 unsigned int old_flow, new_flow;
4656
4657 if (qlen < (netdev_max_backlog >> 1))
4658 return false;
4659
4660 sd = this_cpu_ptr(&softnet_data);
4661
4662 rcu_read_lock();
4663 fl = rcu_dereference(sd->flow_limit);
4664 if (fl) {
4665 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4666 old_flow = fl->history[fl->history_head];
4667 fl->history[fl->history_head] = new_flow;
4668
4669 fl->history_head++;
4670 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4671
4672 if (likely(fl->buckets[old_flow]))
4673 fl->buckets[old_flow]--;
4674
4675 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4676 fl->count++;
4677 rcu_read_unlock();
4678 return true;
4679 }
4680 }
4681 rcu_read_unlock();
4682#endif
4683 return false;
4684}
4685
4686
4687
4688
4689
4690static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4691 unsigned int *qtail)
4692{
4693 struct softnet_data *sd;
4694 unsigned long flags;
4695 unsigned int qlen;
4696
4697 sd = &per_cpu(softnet_data, cpu);
4698
4699 local_irq_save(flags);
4700
4701 rps_lock(sd);
4702 if (!netif_running(skb->dev))
4703 goto drop;
4704 qlen = skb_queue_len(&sd->input_pkt_queue);
4705 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4706 if (qlen) {
4707enqueue:
4708 __skb_queue_tail(&sd->input_pkt_queue, skb);
4709 input_queue_tail_incr_save(sd, qtail);
4710 rps_unlock(sd);
4711 local_irq_restore(flags);
4712 return NET_RX_SUCCESS;
4713 }
4714
4715
4716
4717
4718 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4719 if (!rps_ipi_queued(sd))
4720 ____napi_schedule(sd, &sd->backlog);
4721 }
4722 goto enqueue;
4723 }
4724
4725drop:
4726 sd->dropped++;
4727 rps_unlock(sd);
4728
4729 local_irq_restore(flags);
4730
4731 atomic_long_inc(&skb->dev->rx_dropped);
4732 kfree_skb(skb);
4733 return NET_RX_DROP;
4734}
4735
4736static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4737{
4738 struct net_device *dev = skb->dev;
4739 struct netdev_rx_queue *rxqueue;
4740
4741 rxqueue = dev->_rx;
4742
4743 if (skb_rx_queue_recorded(skb)) {
4744 u16 index = skb_get_rx_queue(skb);
4745
4746 if (unlikely(index >= dev->real_num_rx_queues)) {
4747 WARN_ONCE(dev->real_num_rx_queues > 1,
4748 "%s received packet on queue %u, but number "
4749 "of RX queues is %u\n",
4750 dev->name, index, dev->real_num_rx_queues);
4751
4752 return rxqueue;
4753 }
4754 rxqueue += index;
4755 }
4756 return rxqueue;
4757}
4758
4759static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4760 struct xdp_buff *xdp,
4761 struct bpf_prog *xdp_prog)
4762{
4763 void *orig_data, *orig_data_end, *hard_start;
4764 struct netdev_rx_queue *rxqueue;
4765 u32 metalen, act = XDP_DROP;
4766 bool orig_bcast, orig_host;
4767 u32 mac_len, frame_sz;
4768 __be16 orig_eth_type;
4769 struct ethhdr *eth;
4770 int off;
4771
4772
4773
4774
4775 if (skb_is_redirected(skb))
4776 return XDP_PASS;
4777
4778
4779
4780
4781
4782 if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4783 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4784 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4785 int troom = skb->tail + skb->data_len - skb->end;
4786
4787
4788
4789
4790 if (pskb_expand_head(skb,
4791 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4792 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4793 goto do_drop;
4794 if (skb_linearize(skb))
4795 goto do_drop;
4796 }
4797
4798
4799
4800
4801 mac_len = skb->data - skb_mac_header(skb);
4802 hard_start = skb->data - skb_headroom(skb);
4803
4804
4805 frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4806 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4807
4808 rxqueue = netif_get_rxqueue(skb);
4809 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4810 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4811 skb_headlen(skb) + mac_len, true);
4812
4813 orig_data_end = xdp->data_end;
4814 orig_data = xdp->data;
4815 eth = (struct ethhdr *)xdp->data;
4816 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4817 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4818 orig_eth_type = eth->h_proto;
4819
4820 act = bpf_prog_run_xdp(xdp_prog, xdp);
4821
4822
4823 off = xdp->data - orig_data;
4824 if (off) {
4825 if (off > 0)
4826 __skb_pull(skb, off);
4827 else if (off < 0)
4828 __skb_push(skb, -off);
4829
4830 skb->mac_header += off;
4831 skb_reset_network_header(skb);
4832 }
4833
4834
4835 off = xdp->data_end - orig_data_end;
4836 if (off != 0) {
4837 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4838 skb->len += off;
4839 }
4840
4841
4842 eth = (struct ethhdr *)xdp->data;
4843 if ((orig_eth_type != eth->h_proto) ||
4844 (orig_host != ether_addr_equal_64bits(eth->h_dest,
4845 skb->dev->dev_addr)) ||
4846 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4847 __skb_push(skb, ETH_HLEN);
4848 skb->pkt_type = PACKET_HOST;
4849 skb->protocol = eth_type_trans(skb, skb->dev);
4850 }
4851
4852 switch (act) {
4853 case XDP_REDIRECT:
4854 case XDP_TX:
4855 __skb_push(skb, mac_len);
4856 break;
4857 case XDP_PASS:
4858 metalen = xdp->data - xdp->data_meta;
4859 if (metalen)
4860 skb_metadata_set(skb, metalen);
4861 break;
4862 default:
4863 bpf_warn_invalid_xdp_action(act);
4864 fallthrough;
4865 case XDP_ABORTED:
4866 trace_xdp_exception(skb->dev, xdp_prog, act);
4867 fallthrough;
4868 case XDP_DROP:
4869 do_drop:
4870 kfree_skb(skb);
4871 break;
4872 }
4873
4874 return act;
4875}
4876
4877
4878
4879
4880void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4881{
4882 struct net_device *dev = skb->dev;
4883 struct netdev_queue *txq;
4884 bool free_skb = true;
4885 int cpu, rc;
4886
4887 txq = netdev_core_pick_tx(dev, skb, NULL);
4888 cpu = smp_processor_id();
4889 HARD_TX_LOCK(dev, txq, cpu);
4890 if (!netif_xmit_stopped(txq)) {
4891 rc = netdev_start_xmit(skb, dev, txq, 0);
4892 if (dev_xmit_complete(rc))
4893 free_skb = false;
4894 }
4895 HARD_TX_UNLOCK(dev, txq);
4896 if (free_skb) {
4897 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4898 kfree_skb(skb);
4899 }
4900}
4901
4902static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4903
4904int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4905{
4906 if (xdp_prog) {
4907 struct xdp_buff xdp;
4908 u32 act;
4909 int err;
4910
4911 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4912 if (act != XDP_PASS) {
4913 switch (act) {
4914 case XDP_REDIRECT:
4915 err = xdp_do_generic_redirect(skb->dev, skb,
4916 &xdp, xdp_prog);
4917 if (err)
4918 goto out_redir;
4919 break;
4920 case XDP_TX:
4921 generic_xdp_tx(skb, xdp_prog);
4922 break;
4923 }
4924 return XDP_DROP;
4925 }
4926 }
4927 return XDP_PASS;
4928out_redir:
4929 kfree_skb(skb);
4930 return XDP_DROP;
4931}
4932EXPORT_SYMBOL_GPL(do_xdp_generic);
4933
4934static int netif_rx_internal(struct sk_buff *skb)
4935{
4936 int ret;
4937
4938 net_timestamp_check(netdev_tstamp_prequeue, skb);
4939
4940 trace_netif_rx(skb);
4941
4942#ifdef CONFIG_RPS
4943 if (static_branch_unlikely(&rps_needed)) {
4944 struct rps_dev_flow voidflow, *rflow = &voidflow;
4945 int cpu;
4946
4947 preempt_disable();
4948 rcu_read_lock();
4949
4950 cpu = get_rps_cpu(skb->dev, skb, &rflow);
4951 if (cpu < 0)
4952 cpu = smp_processor_id();
4953
4954 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4955
4956 rcu_read_unlock();
4957 preempt_enable();
4958 } else
4959#endif
4960 {
4961 unsigned int qtail;
4962
4963 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4964 put_cpu();
4965 }
4966 return ret;
4967}
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984int netif_rx(struct sk_buff *skb)
4985{
4986 int ret;
4987
4988 trace_netif_rx_entry(skb);
4989
4990 ret = netif_rx_internal(skb);
4991 trace_netif_rx_exit(ret);
4992
4993 return ret;
4994}
4995EXPORT_SYMBOL(netif_rx);
4996
4997int netif_rx_ni(struct sk_buff *skb)
4998{
4999 int err;
5000
5001 trace_netif_rx_ni_entry(skb);
5002
5003 preempt_disable();
5004 err = netif_rx_internal(skb);
5005 if (local_softirq_pending())
5006 do_softirq();
5007 preempt_enable();
5008 trace_netif_rx_ni_exit(err);
5009
5010 return err;
5011}
5012EXPORT_SYMBOL(netif_rx_ni);
5013
5014int netif_rx_any_context(struct sk_buff *skb)
5015{
5016
5017
5018
5019
5020
5021
5022 if (in_interrupt())
5023 return netif_rx(skb);
5024 else
5025 return netif_rx_ni(skb);
5026}
5027EXPORT_SYMBOL(netif_rx_any_context);
5028
5029static __latent_entropy void net_tx_action(struct softirq_action *h)
5030{
5031 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5032
5033 if (sd->completion_queue) {
5034 struct sk_buff *clist;
5035
5036 local_irq_disable();
5037 clist = sd->completion_queue;
5038 sd->completion_queue = NULL;
5039 local_irq_enable();
5040
5041 while (clist) {
5042 struct sk_buff *skb = clist;
5043
5044 clist = clist->next;
5045
5046 WARN_ON(refcount_read(&skb->users));
5047 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
5048 trace_consume_skb(skb);
5049 else
5050 trace_kfree_skb(skb, net_tx_action);
5051
5052 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5053 __kfree_skb(skb);
5054 else
5055 __kfree_skb_defer(skb);
5056 }
5057 }
5058
5059 if (sd->output_queue) {
5060 struct Qdisc *head;
5061
5062 local_irq_disable();
5063 head = sd->output_queue;
5064 sd->output_queue = NULL;
5065 sd->output_queue_tailp = &sd->output_queue;
5066 local_irq_enable();
5067
5068 rcu_read_lock();
5069
5070 while (head) {
5071 struct Qdisc *q = head;
5072 spinlock_t *root_lock = NULL;
5073
5074 head = head->next_sched;
5075
5076
5077
5078
5079 smp_mb__before_atomic();
5080
5081 if (!(q->flags & TCQ_F_NOLOCK)) {
5082 root_lock = qdisc_lock(q);
5083 spin_lock(root_lock);
5084 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5085 &q->state))) {
5086
5087
5088
5089
5090
5091
5092
5093
5094 clear_bit(__QDISC_STATE_SCHED, &q->state);
5095 continue;
5096 }
5097
5098 clear_bit(__QDISC_STATE_SCHED, &q->state);
5099 qdisc_run(q);
5100 if (root_lock)
5101 spin_unlock(root_lock);
5102 }
5103
5104 rcu_read_unlock();
5105 }
5106
5107 xfrm_dev_backlog(sd);
5108}
5109
5110#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5111
5112int (*br_fdb_test_addr_hook)(struct net_device *dev,
5113 unsigned char *addr) __read_mostly;
5114EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5115#endif
5116
5117static inline struct sk_buff *
5118sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
5119 struct net_device *orig_dev, bool *another)
5120{
5121#ifdef CONFIG_NET_CLS_ACT
5122 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
5123 struct tcf_result cl_res;
5124
5125
5126
5127
5128
5129
5130 if (!miniq)
5131 return skb;
5132
5133 if (*pt_prev) {
5134 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5135 *pt_prev = NULL;
5136 }
5137
5138 qdisc_skb_cb(skb)->pkt_len = skb->len;
5139 qdisc_skb_cb(skb)->mru = 0;
5140 qdisc_skb_cb(skb)->post_ct = false;
5141 skb->tc_at_ingress = 1;
5142 mini_qdisc_bstats_cpu_update(miniq, skb);
5143
5144 switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list,
5145 &cl_res, false)) {
5146 case TC_ACT_OK:
5147 case TC_ACT_RECLASSIFY:
5148 skb->tc_index = TC_H_MIN(cl_res.classid);
5149 break;
5150 case TC_ACT_SHOT:
5151 mini_qdisc_qstats_cpu_drop(miniq);
5152 kfree_skb(skb);
5153 return NULL;
5154 case TC_ACT_STOLEN:
5155 case TC_ACT_QUEUED:
5156 case TC_ACT_TRAP:
5157 consume_skb(skb);
5158 return NULL;
5159 case TC_ACT_REDIRECT:
5160
5161
5162
5163
5164 __skb_push(skb, skb->mac_len);
5165 if (skb_do_redirect(skb) == -EAGAIN) {
5166 __skb_pull(skb, skb->mac_len);
5167 *another = true;
5168 break;
5169 }
5170 return NULL;
5171 case TC_ACT_CONSUMED:
5172 return NULL;
5173 default:
5174 break;
5175 }
5176#endif
5177 return skb;
5178}
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189bool netdev_is_rx_handler_busy(struct net_device *dev)
5190{
5191 ASSERT_RTNL();
5192 return dev && rtnl_dereference(dev->rx_handler);
5193}
5194EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210int netdev_rx_handler_register(struct net_device *dev,
5211 rx_handler_func_t *rx_handler,
5212 void *rx_handler_data)
5213{
5214 if (netdev_is_rx_handler_busy(dev))
5215 return -EBUSY;
5216
5217 if (dev->priv_flags & IFF_NO_RX_HANDLER)
5218 return -EINVAL;
5219
5220
5221 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5222 rcu_assign_pointer(dev->rx_handler, rx_handler);
5223
5224 return 0;
5225}
5226EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236void netdev_rx_handler_unregister(struct net_device *dev)
5237{
5238
5239 ASSERT_RTNL();
5240 RCU_INIT_POINTER(dev->rx_handler, NULL);
5241
5242
5243
5244
5245 synchronize_net();
5246 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5247}
5248EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5249
5250
5251
5252
5253
5254static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5255{
5256 switch (skb->protocol) {
5257 case htons(ETH_P_ARP):
5258 case htons(ETH_P_IP):
5259 case htons(ETH_P_IPV6):
5260 case htons(ETH_P_8021Q):
5261 case htons(ETH_P_8021AD):
5262 return true;
5263 default:
5264 return false;
5265 }
5266}
5267
5268static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5269 int *ret, struct net_device *orig_dev)
5270{
5271 if (nf_hook_ingress_active(skb)) {
5272 int ingress_retval;
5273
5274 if (*pt_prev) {
5275 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5276 *pt_prev = NULL;
5277 }
5278
5279 rcu_read_lock();
5280 ingress_retval = nf_hook_ingress(skb);
5281 rcu_read_unlock();
5282 return ingress_retval;
5283 }
5284 return 0;
5285}
5286
5287static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5288 struct packet_type **ppt_prev)
5289{
5290 struct packet_type *ptype, *pt_prev;
5291 rx_handler_func_t *rx_handler;
5292 struct sk_buff *skb = *pskb;
5293 struct net_device *orig_dev;
5294 bool deliver_exact = false;
5295 int ret = NET_RX_DROP;
5296 __be16 type;
5297
5298 net_timestamp_check(!netdev_tstamp_prequeue, skb);
5299
5300 trace_netif_receive_skb(skb);
5301
5302 orig_dev = skb->dev;
5303
5304 skb_reset_network_header(skb);
5305 if (!skb_transport_header_was_set(skb))
5306 skb_reset_transport_header(skb);
5307 skb_reset_mac_len(skb);
5308
5309 pt_prev = NULL;
5310
5311another_round:
5312 skb->skb_iif = skb->dev->ifindex;
5313
5314 __this_cpu_inc(softnet_data.processed);
5315
5316 if (static_branch_unlikely(&generic_xdp_needed_key)) {
5317 int ret2;
5318
5319 migrate_disable();
5320 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5321 migrate_enable();
5322
5323 if (ret2 != XDP_PASS) {
5324 ret = NET_RX_DROP;
5325 goto out;
5326 }
5327 skb_reset_mac_len(skb);
5328 }
5329
5330 if (eth_type_vlan(skb->protocol)) {
5331 skb = skb_vlan_untag(skb);
5332 if (unlikely(!skb))
5333 goto out;
5334 }
5335
5336 if (skb_skip_tc_classify(skb))
5337 goto skip_classify;
5338
5339 if (pfmemalloc)
5340 goto skip_taps;
5341
5342 list_for_each_entry_rcu(ptype, &ptype_all, list) {
5343 if (pt_prev)
5344 ret = deliver_skb(skb, pt_prev, orig_dev);
5345 pt_prev = ptype;
5346 }
5347
5348 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5349 if (pt_prev)
5350 ret = deliver_skb(skb, pt_prev, orig_dev);
5351 pt_prev = ptype;
5352 }
5353
5354skip_taps:
5355#ifdef CONFIG_NET_INGRESS
5356 if (static_branch_unlikely(&ingress_needed_key)) {
5357 bool another = false;
5358
5359 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5360 &another);
5361 if (another)
5362 goto another_round;
5363 if (!skb)
5364 goto out;
5365
5366 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5367 goto out;
5368 }
5369#endif
5370 skb_reset_redirect(skb);
5371skip_classify:
5372 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5373 goto drop;
5374
5375 if (skb_vlan_tag_present(skb)) {
5376 if (pt_prev) {
5377 ret = deliver_skb(skb, pt_prev, orig_dev);
5378 pt_prev = NULL;
5379 }
5380 if (vlan_do_receive(&skb))
5381 goto another_round;
5382 else if (unlikely(!skb))
5383 goto out;
5384 }
5385
5386 rx_handler = rcu_dereference(skb->dev->rx_handler);
5387 if (rx_handler) {
5388 if (pt_prev) {
5389 ret = deliver_skb(skb, pt_prev, orig_dev);
5390 pt_prev = NULL;
5391 }
5392 switch (rx_handler(&skb)) {
5393 case RX_HANDLER_CONSUMED:
5394 ret = NET_RX_SUCCESS;
5395 goto out;
5396 case RX_HANDLER_ANOTHER:
5397 goto another_round;
5398 case RX_HANDLER_EXACT:
5399 deliver_exact = true;
5400 break;
5401 case RX_HANDLER_PASS:
5402 break;
5403 default:
5404 BUG();
5405 }
5406 }
5407
5408 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5409check_vlan_id:
5410 if (skb_vlan_tag_get_id(skb)) {
5411
5412
5413
5414 skb->pkt_type = PACKET_OTHERHOST;
5415 } else if (eth_type_vlan(skb->protocol)) {
5416
5417
5418
5419
5420 __vlan_hwaccel_clear_tag(skb);
5421 skb = skb_vlan_untag(skb);
5422 if (unlikely(!skb))
5423 goto out;
5424 if (vlan_do_receive(&skb))
5425
5426
5427
5428 goto another_round;
5429 else if (unlikely(!skb))
5430 goto out;
5431 else
5432
5433
5434
5435
5436 goto check_vlan_id;
5437 }
5438
5439
5440
5441
5442 __vlan_hwaccel_clear_tag(skb);
5443 }
5444
5445 type = skb->protocol;
5446
5447
5448 if (likely(!deliver_exact)) {
5449 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5450 &ptype_base[ntohs(type) &
5451 PTYPE_HASH_MASK]);
5452 }
5453
5454 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5455 &orig_dev->ptype_specific);
5456
5457 if (unlikely(skb->dev != orig_dev)) {
5458 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5459 &skb->dev->ptype_specific);
5460 }
5461
5462 if (pt_prev) {
5463 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5464 goto drop;
5465 *ppt_prev = pt_prev;
5466 } else {
5467drop:
5468 if (!deliver_exact)
5469 atomic_long_inc(&skb->dev->rx_dropped);
5470 else
5471 atomic_long_inc(&skb->dev->rx_nohandler);
5472 kfree_skb(skb);
5473
5474
5475
5476 ret = NET_RX_DROP;
5477 }
5478
5479out:
5480
5481
5482
5483
5484
5485
5486 *pskb = skb;
5487 return ret;
5488}
5489
5490static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5491{
5492 struct net_device *orig_dev = skb->dev;
5493 struct packet_type *pt_prev = NULL;
5494 int ret;
5495
5496 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5497 if (pt_prev)
5498 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5499 skb->dev, pt_prev, orig_dev);
5500 return ret;
5501}
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518int netif_receive_skb_core(struct sk_buff *skb)
5519{
5520 int ret;
5521
5522 rcu_read_lock();
5523 ret = __netif_receive_skb_one_core(skb, false);
5524 rcu_read_unlock();
5525
5526 return ret;
5527}
5528EXPORT_SYMBOL(netif_receive_skb_core);
5529
5530static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5531 struct packet_type *pt_prev,
5532 struct net_device *orig_dev)
5533{
5534 struct sk_buff *skb, *next;
5535
5536 if (!pt_prev)
5537 return;
5538 if (list_empty(head))
5539 return;
5540 if (pt_prev->list_func != NULL)
5541 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5542 ip_list_rcv, head, pt_prev, orig_dev);
5543 else
5544 list_for_each_entry_safe(skb, next, head, list) {
5545 skb_list_del_init(skb);
5546 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5547 }
5548}
5549
5550static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5551{
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562 struct packet_type *pt_curr = NULL;
5563
5564 struct net_device *od_curr = NULL;
5565 struct list_head sublist;
5566 struct sk_buff *skb, *next;
5567
5568 INIT_LIST_HEAD(&sublist);
5569 list_for_each_entry_safe(skb, next, head, list) {
5570 struct net_device *orig_dev = skb->dev;
5571 struct packet_type *pt_prev = NULL;
5572
5573 skb_list_del_init(skb);
5574 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5575 if (!pt_prev)
5576 continue;
5577 if (pt_curr != pt_prev || od_curr != orig_dev) {
5578
5579 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5580
5581 INIT_LIST_HEAD(&sublist);
5582 pt_curr = pt_prev;
5583 od_curr = orig_dev;
5584 }
5585 list_add_tail(&skb->list, &sublist);
5586 }
5587
5588
5589 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5590}
5591
5592static int __netif_receive_skb(struct sk_buff *skb)
5593{
5594 int ret;
5595
5596 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5597 unsigned int noreclaim_flag;
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608 noreclaim_flag = memalloc_noreclaim_save();
5609 ret = __netif_receive_skb_one_core(skb, true);
5610 memalloc_noreclaim_restore(noreclaim_flag);
5611 } else
5612 ret = __netif_receive_skb_one_core(skb, false);
5613
5614 return ret;
5615}
5616
5617static void __netif_receive_skb_list(struct list_head *head)
5618{
5619 unsigned long noreclaim_flag = 0;
5620 struct sk_buff *skb, *next;
5621 bool pfmemalloc = false;
5622
5623 list_for_each_entry_safe(skb, next, head, list) {
5624 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5625 struct list_head sublist;
5626
5627
5628 list_cut_before(&sublist, head, &skb->list);
5629 if (!list_empty(&sublist))
5630 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5631 pfmemalloc = !pfmemalloc;
5632
5633 if (pfmemalloc)
5634 noreclaim_flag = memalloc_noreclaim_save();
5635 else
5636 memalloc_noreclaim_restore(noreclaim_flag);
5637 }
5638 }
5639
5640 if (!list_empty(head))
5641 __netif_receive_skb_list_core(head, pfmemalloc);
5642
5643 if (pfmemalloc)
5644 memalloc_noreclaim_restore(noreclaim_flag);
5645}
5646
5647static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5648{
5649 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5650 struct bpf_prog *new = xdp->prog;
5651 int ret = 0;
5652
5653 if (new) {
5654 u32 i;
5655
5656 mutex_lock(&new->aux->used_maps_mutex);
5657
5658
5659
5660
5661 for (i = 0; i < new->aux->used_map_cnt; i++) {
5662 if (dev_map_can_have_prog(new->aux->used_maps[i]) ||
5663 cpu_map_prog_allowed(new->aux->used_maps[i])) {
5664 mutex_unlock(&new->aux->used_maps_mutex);
5665 return -EINVAL;
5666 }
5667 }
5668
5669 mutex_unlock(&new->aux->used_maps_mutex);
5670 }
5671
5672 switch (xdp->command) {
5673 case XDP_SETUP_PROG:
5674 rcu_assign_pointer(dev->xdp_prog, new);
5675 if (old)
5676 bpf_prog_put(old);
5677
5678 if (old && !new) {
5679 static_branch_dec(&generic_xdp_needed_key);
5680 } else if (new && !old) {
5681 static_branch_inc(&generic_xdp_needed_key);
5682 dev_disable_lro(dev);
5683 dev_disable_gro_hw(dev);
5684 }
5685 break;
5686
5687 default:
5688 ret = -EINVAL;
5689 break;
5690 }
5691
5692 return ret;
5693}
5694
5695static int netif_receive_skb_internal(struct sk_buff *skb)
5696{
5697 int ret;
5698
5699 net_timestamp_check(netdev_tstamp_prequeue, skb);
5700
5701 if (skb_defer_rx_timestamp(skb))
5702 return NET_RX_SUCCESS;
5703
5704 rcu_read_lock();
5705#ifdef CONFIG_RPS
5706 if (static_branch_unlikely(&rps_needed)) {
5707 struct rps_dev_flow voidflow, *rflow = &voidflow;
5708 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5709
5710 if (cpu >= 0) {
5711 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5712 rcu_read_unlock();
5713 return ret;
5714 }
5715 }
5716#endif
5717 ret = __netif_receive_skb(skb);
5718 rcu_read_unlock();
5719 return ret;
5720}
5721
5722static void netif_receive_skb_list_internal(struct list_head *head)
5723{
5724 struct sk_buff *skb, *next;
5725 struct list_head sublist;
5726
5727 INIT_LIST_HEAD(&sublist);
5728 list_for_each_entry_safe(skb, next, head, list) {
5729 net_timestamp_check(netdev_tstamp_prequeue, skb);
5730 skb_list_del_init(skb);
5731 if (!skb_defer_rx_timestamp(skb))
5732 list_add_tail(&skb->list, &sublist);
5733 }
5734 list_splice_init(&sublist, head);
5735
5736 rcu_read_lock();
5737#ifdef CONFIG_RPS
5738 if (static_branch_unlikely(&rps_needed)) {
5739 list_for_each_entry_safe(skb, next, head, list) {
5740 struct rps_dev_flow voidflow, *rflow = &voidflow;
5741 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5742
5743 if (cpu >= 0) {
5744
5745 skb_list_del_init(skb);
5746 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5747 }
5748 }
5749 }
5750#endif
5751 __netif_receive_skb_list(head);
5752 rcu_read_unlock();
5753}
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770int netif_receive_skb(struct sk_buff *skb)
5771{
5772 int ret;
5773
5774 trace_netif_receive_skb_entry(skb);
5775
5776 ret = netif_receive_skb_internal(skb);
5777 trace_netif_receive_skb_exit(ret);
5778
5779 return ret;
5780}
5781EXPORT_SYMBOL(netif_receive_skb);
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793void netif_receive_skb_list(struct list_head *head)
5794{
5795 struct sk_buff *skb;
5796
5797 if (list_empty(head))
5798 return;
5799 if (trace_netif_receive_skb_list_entry_enabled()) {
5800 list_for_each_entry(skb, head, list)
5801 trace_netif_receive_skb_list_entry(skb);
5802 }
5803 netif_receive_skb_list_internal(head);
5804 trace_netif_receive_skb_list_exit(0);
5805}
5806EXPORT_SYMBOL(netif_receive_skb_list);
5807
5808static DEFINE_PER_CPU(struct work_struct, flush_works);
5809
5810
5811static void flush_backlog(struct work_struct *work)
5812{
5813 struct sk_buff *skb, *tmp;
5814 struct softnet_data *sd;
5815
5816 local_bh_disable();
5817 sd = this_cpu_ptr(&softnet_data);
5818
5819 local_irq_disable();
5820 rps_lock(sd);
5821 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5822 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5823 __skb_unlink(skb, &sd->input_pkt_queue);
5824 dev_kfree_skb_irq(skb);
5825 input_queue_head_incr(sd);
5826 }
5827 }
5828 rps_unlock(sd);
5829 local_irq_enable();
5830
5831 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5832 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5833 __skb_unlink(skb, &sd->process_queue);
5834 kfree_skb(skb);
5835 input_queue_head_incr(sd);
5836 }
5837 }
5838 local_bh_enable();
5839}
5840
5841static bool flush_required(int cpu)
5842{
5843#if IS_ENABLED(CONFIG_RPS)
5844 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5845 bool do_flush;
5846
5847 local_irq_disable();
5848 rps_lock(sd);
5849
5850
5851
5852
5853 do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5854 !skb_queue_empty_lockless(&sd->process_queue);
5855 rps_unlock(sd);
5856 local_irq_enable();
5857
5858 return do_flush;
5859#endif
5860
5861
5862
5863
5864
5865 return true;
5866}
5867
5868static void flush_all_backlogs(void)
5869{
5870 static cpumask_t flush_cpus;
5871 unsigned int cpu;
5872
5873
5874
5875
5876
5877 ASSERT_RTNL();
5878
5879 get_online_cpus();
5880
5881 cpumask_clear(&flush_cpus);
5882 for_each_online_cpu(cpu) {
5883 if (flush_required(cpu)) {
5884 queue_work_on(cpu, system_highpri_wq,
5885 per_cpu_ptr(&flush_works, cpu));
5886 cpumask_set_cpu(cpu, &flush_cpus);
5887 }
5888 }
5889
5890
5891
5892
5893
5894 for_each_cpu(cpu, &flush_cpus)
5895 flush_work(per_cpu_ptr(&flush_works, cpu));
5896
5897 put_online_cpus();
5898}
5899
5900
5901static void gro_normal_list(struct napi_struct *napi)
5902{
5903 if (!napi->rx_count)
5904 return;
5905 netif_receive_skb_list_internal(&napi->rx_list);
5906 INIT_LIST_HEAD(&napi->rx_list);
5907 napi->rx_count = 0;
5908}
5909
5910
5911
5912
5913static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
5914{
5915 list_add_tail(&skb->list, &napi->rx_list);
5916 napi->rx_count += segs;
5917 if (napi->rx_count >= gro_normal_batch)
5918 gro_normal_list(napi);
5919}
5920
5921static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
5922{
5923 struct packet_offload *ptype;
5924 __be16 type = skb->protocol;
5925 struct list_head *head = &offload_base;
5926 int err = -ENOENT;
5927
5928 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5929
5930 if (NAPI_GRO_CB(skb)->count == 1) {
5931 skb_shinfo(skb)->gso_size = 0;
5932 goto out;
5933 }
5934
5935 rcu_read_lock();
5936 list_for_each_entry_rcu(ptype, head, list) {
5937 if (ptype->type != type || !ptype->callbacks.gro_complete)
5938 continue;
5939
5940 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5941 ipv6_gro_complete, inet_gro_complete,
5942 skb, 0);
5943 break;
5944 }
5945 rcu_read_unlock();
5946
5947 if (err) {
5948 WARN_ON(&ptype->list == head);
5949 kfree_skb(skb);
5950 return NET_RX_SUCCESS;
5951 }
5952
5953out:
5954 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
5955 return NET_RX_SUCCESS;
5956}
5957
5958static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5959 bool flush_old)
5960{
5961 struct list_head *head = &napi->gro_hash[index].list;
5962 struct sk_buff *skb, *p;
5963
5964 list_for_each_entry_safe_reverse(skb, p, head, list) {
5965 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5966 return;
5967 skb_list_del_init(skb);
5968 napi_gro_complete(napi, skb);
5969 napi->gro_hash[index].count--;
5970 }
5971
5972 if (!napi->gro_hash[index].count)
5973 __clear_bit(index, &napi->gro_bitmask);
5974}
5975
5976
5977
5978
5979
5980void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5981{
5982 unsigned long bitmask = napi->gro_bitmask;
5983 unsigned int i, base = ~0U;
5984
5985 while ((i = ffs(bitmask)) != 0) {
5986 bitmask >>= i;
5987 base += i;
5988 __napi_gro_flush_chain(napi, base, flush_old);
5989 }
5990}
5991EXPORT_SYMBOL(napi_gro_flush);
5992
5993static void gro_list_prepare(const struct list_head *head,
5994 const struct sk_buff *skb)
5995{
5996 unsigned int maclen = skb->dev->hard_header_len;
5997 u32 hash = skb_get_hash_raw(skb);
5998 struct sk_buff *p;
5999
6000 list_for_each_entry(p, head, list) {
6001 unsigned long diffs;
6002
6003 NAPI_GRO_CB(p)->flush = 0;
6004
6005 if (hash != skb_get_hash_raw(p)) {
6006 NAPI_GRO_CB(p)->same_flow = 0;
6007 continue;
6008 }
6009
6010 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
6011 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
6012 if (skb_vlan_tag_present(p))
6013 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
6014 diffs |= skb_metadata_dst_cmp(p, skb);
6015 diffs |= skb_metadata_differs(p, skb);
6016 if (maclen == ETH_HLEN)
6017 diffs |= compare_ether_header(skb_mac_header(p),
6018 skb_mac_header(skb));
6019 else if (!diffs)
6020 diffs = memcmp(skb_mac_header(p),
6021 skb_mac_header(skb),
6022 maclen);
6023
6024 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
6025#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
6026 if (!diffs) {
6027 struct tc_skb_ext *skb_ext = skb_ext_find(skb, TC_SKB_EXT);
6028 struct tc_skb_ext *p_ext = skb_ext_find(p, TC_SKB_EXT);
6029
6030 diffs |= (!!p_ext) ^ (!!skb_ext);
6031 if (!diffs && unlikely(skb_ext))
6032 diffs |= p_ext->chain ^ skb_ext->chain;
6033 }
6034#endif
6035
6036 NAPI_GRO_CB(p)->same_flow = !diffs;
6037 }
6038}
6039
6040static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
6041{
6042 const struct skb_shared_info *pinfo = skb_shinfo(skb);
6043 const skb_frag_t *frag0 = &pinfo->frags[0];
6044
6045 NAPI_GRO_CB(skb)->data_offset = 0;
6046 NAPI_GRO_CB(skb)->frag0 = NULL;
6047 NAPI_GRO_CB(skb)->frag0_len = 0;
6048
6049 if (!skb_headlen(skb) && pinfo->nr_frags &&
6050 !PageHighMem(skb_frag_page(frag0)) &&
6051 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
6052 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
6053 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
6054 skb_frag_size(frag0),
6055 skb->end - skb->tail);
6056 }
6057}
6058
6059static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
6060{
6061 struct skb_shared_info *pinfo = skb_shinfo(skb);
6062
6063 BUG_ON(skb->end - skb->tail < grow);
6064
6065 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
6066
6067 skb->data_len -= grow;
6068 skb->tail += grow;
6069
6070 skb_frag_off_add(&pinfo->frags[0], grow);
6071 skb_frag_size_sub(&pinfo->frags[0], grow);
6072
6073 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
6074 skb_frag_unref(skb, 0);
6075 memmove(pinfo->frags, pinfo->frags + 1,
6076 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
6077 }
6078}
6079
6080static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
6081{
6082 struct sk_buff *oldest;
6083
6084 oldest = list_last_entry(head, struct sk_buff, list);
6085
6086
6087
6088
6089 if (WARN_ON_ONCE(!oldest))
6090 return;
6091
6092
6093
6094
6095 skb_list_del_init(oldest);
6096 napi_gro_complete(napi, oldest);
6097}
6098
6099static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
6100{
6101 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
6102 struct gro_list *gro_list = &napi->gro_hash[bucket];
6103 struct list_head *head = &offload_base;
6104 struct packet_offload *ptype;
6105 __be16 type = skb->protocol;
6106 struct sk_buff *pp = NULL;
6107 enum gro_result ret;
6108 int same_flow;
6109 int grow;
6110
6111 if (netif_elide_gro(skb->dev))
6112 goto normal;
6113
6114 gro_list_prepare(&gro_list->list, skb);
6115
6116 rcu_read_lock();
6117 list_for_each_entry_rcu(ptype, head, list) {
6118 if (ptype->type != type || !ptype->callbacks.gro_receive)
6119 continue;
6120
6121 skb_set_network_header(skb, skb_gro_offset(skb));
6122 skb_reset_mac_len(skb);
6123 NAPI_GRO_CB(skb)->same_flow = 0;
6124 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
6125 NAPI_GRO_CB(skb)->free = 0;
6126 NAPI_GRO_CB(skb)->encap_mark = 0;
6127 NAPI_GRO_CB(skb)->recursion_counter = 0;
6128 NAPI_GRO_CB(skb)->is_fou = 0;
6129 NAPI_GRO_CB(skb)->is_atomic = 1;
6130 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
6131
6132
6133 switch (skb->ip_summed) {
6134 case CHECKSUM_COMPLETE:
6135 NAPI_GRO_CB(skb)->csum = skb->csum;
6136 NAPI_GRO_CB(skb)->csum_valid = 1;
6137 NAPI_GRO_CB(skb)->csum_cnt = 0;
6138 break;
6139 case CHECKSUM_UNNECESSARY:
6140 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
6141 NAPI_GRO_CB(skb)->csum_valid = 0;
6142 break;
6143 default:
6144 NAPI_GRO_CB(skb)->csum_cnt = 0;
6145 NAPI_GRO_CB(skb)->csum_valid = 0;
6146 }
6147
6148 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
6149 ipv6_gro_receive, inet_gro_receive,
6150 &gro_list->list, skb);
6151 break;
6152 }
6153 rcu_read_unlock();
6154
6155 if (&ptype->list == head)
6156 goto normal;
6157
6158 if (PTR_ERR(pp) == -EINPROGRESS) {
6159 ret = GRO_CONSUMED;
6160 goto ok;
6161 }
6162
6163 same_flow = NAPI_GRO_CB(skb)->same_flow;
6164 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
6165
6166 if (pp) {
6167 skb_list_del_init(pp);
6168 napi_gro_complete(napi, pp);
6169 gro_list->count--;
6170 }
6171
6172 if (same_flow)
6173 goto ok;
6174
6175 if (NAPI_GRO_CB(skb)->flush)
6176 goto normal;
6177
6178 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
6179 gro_flush_oldest(napi, &gro_list->list);
6180 else
6181 gro_list->count++;
6182
6183 NAPI_GRO_CB(skb)->count = 1;
6184 NAPI_GRO_CB(skb)->age = jiffies;
6185 NAPI_GRO_CB(skb)->last = skb;
6186 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
6187 list_add(&skb->list, &gro_list->list);
6188 ret = GRO_HELD;
6189
6190pull:
6191 grow = skb_gro_offset(skb) - skb_headlen(skb);
6192 if (grow > 0)
6193 gro_pull_from_frag0(skb, grow);
6194ok:
6195 if (gro_list->count) {
6196 if (!test_bit(bucket, &napi->gro_bitmask))
6197 __set_bit(bucket, &napi->gro_bitmask);
6198 } else if (test_bit(bucket, &napi->gro_bitmask)) {
6199 __clear_bit(bucket, &napi->gro_bitmask);
6200 }
6201
6202 return ret;
6203
6204normal:
6205 ret = GRO_NORMAL;
6206 goto pull;
6207}
6208
6209struct packet_offload *gro_find_receive_by_type(__be16 type)
6210{
6211 struct list_head *offload_head = &offload_base;
6212 struct packet_offload *ptype;
6213
6214 list_for_each_entry_rcu(ptype, offload_head, list) {
6215 if (ptype->type != type || !ptype->callbacks.gro_receive)
6216 continue;
6217 return ptype;
6218 }
6219 return NULL;
6220}
6221EXPORT_SYMBOL(gro_find_receive_by_type);
6222
6223struct packet_offload *gro_find_complete_by_type(__be16 type)
6224{
6225 struct list_head *offload_head = &offload_base;
6226 struct packet_offload *ptype;
6227
6228 list_for_each_entry_rcu(ptype, offload_head, list) {
6229 if (ptype->type != type || !ptype->callbacks.gro_complete)
6230 continue;
6231 return ptype;
6232 }
6233 return NULL;
6234}
6235EXPORT_SYMBOL(gro_find_complete_by_type);
6236
6237static gro_result_t napi_skb_finish(struct napi_struct *napi,
6238 struct sk_buff *skb,
6239 gro_result_t ret)
6240{
6241 switch (ret) {
6242 case GRO_NORMAL:
6243 gro_normal_one(napi, skb, 1);
6244 break;
6245
6246 case GRO_MERGED_FREE:
6247 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6248 napi_skb_free_stolen_head(skb);
6249 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
6250 __kfree_skb(skb);
6251 else
6252 __kfree_skb_defer(skb);
6253 break;
6254
6255 case GRO_HELD:
6256 case GRO_MERGED:
6257 case GRO_CONSUMED:
6258 break;
6259 }
6260
6261 return ret;
6262}
6263
6264gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
6265{
6266 gro_result_t ret;
6267
6268 skb_mark_napi_id(skb, napi);
6269 trace_napi_gro_receive_entry(skb);
6270
6271 skb_gro_reset_offset(skb, 0);
6272
6273 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
6274 trace_napi_gro_receive_exit(ret);
6275
6276 return ret;
6277}
6278EXPORT_SYMBOL(napi_gro_receive);
6279
6280static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
6281{
6282 if (unlikely(skb->pfmemalloc)) {
6283 consume_skb(skb);
6284 return;
6285 }
6286 __skb_pull(skb, skb_headlen(skb));
6287
6288 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
6289 __vlan_hwaccel_clear_tag(skb);
6290 skb->dev = napi->dev;
6291 skb->skb_iif = 0;
6292
6293
6294 skb->pkt_type = PACKET_HOST;
6295
6296 skb->encapsulation = 0;
6297 skb_shinfo(skb)->gso_type = 0;
6298 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6299 skb_ext_reset(skb);
6300 nf_reset_ct(skb);
6301
6302 napi->skb = skb;
6303}
6304
6305struct sk_buff *napi_get_frags(struct napi_struct *napi)
6306{
6307 struct sk_buff *skb = napi->skb;
6308
6309 if (!skb) {
6310 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
6311 if (skb) {
6312 napi->skb = skb;
6313 skb_mark_napi_id(skb, napi);
6314 }
6315 }
6316 return skb;
6317}
6318EXPORT_SYMBOL(napi_get_frags);
6319
6320static gro_result_t napi_frags_finish(struct napi_struct *napi,
6321 struct sk_buff *skb,
6322 gro_result_t ret)
6323{
6324 switch (ret) {
6325 case GRO_NORMAL:
6326 case GRO_HELD:
6327 __skb_push(skb, ETH_HLEN);
6328 skb->protocol = eth_type_trans(skb, skb->dev);
6329 if (ret == GRO_NORMAL)
6330 gro_normal_one(napi, skb, 1);
6331 break;
6332
6333 case GRO_MERGED_FREE:
6334 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6335 napi_skb_free_stolen_head(skb);
6336 else
6337 napi_reuse_skb(napi, skb);
6338 break;
6339
6340 case GRO_MERGED:
6341 case GRO_CONSUMED:
6342 break;
6343 }
6344
6345 return ret;
6346}
6347
6348
6349
6350
6351
6352static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
6353{
6354 struct sk_buff *skb = napi->skb;
6355 const struct ethhdr *eth;
6356 unsigned int hlen = sizeof(*eth);
6357
6358 napi->skb = NULL;
6359
6360 skb_reset_mac_header(skb);
6361 skb_gro_reset_offset(skb, hlen);
6362
6363 if (unlikely(skb_gro_header_hard(skb, hlen))) {
6364 eth = skb_gro_header_slow(skb, hlen, 0);
6365 if (unlikely(!eth)) {
6366 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
6367 __func__, napi->dev->name);
6368 napi_reuse_skb(napi, skb);
6369 return NULL;
6370 }
6371 } else {
6372 eth = (const struct ethhdr *)skb->data;
6373 gro_pull_from_frag0(skb, hlen);
6374 NAPI_GRO_CB(skb)->frag0 += hlen;
6375 NAPI_GRO_CB(skb)->frag0_len -= hlen;
6376 }
6377 __skb_pull(skb, hlen);
6378
6379
6380
6381
6382
6383
6384 skb->protocol = eth->h_proto;
6385
6386 return skb;
6387}
6388
6389gro_result_t napi_gro_frags(struct napi_struct *napi)
6390{
6391 gro_result_t ret;
6392 struct sk_buff *skb = napi_frags_skb(napi);
6393
6394 trace_napi_gro_frags_entry(skb);
6395
6396 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
6397 trace_napi_gro_frags_exit(ret);
6398
6399 return ret;
6400}
6401EXPORT_SYMBOL(napi_gro_frags);
6402
6403
6404
6405
6406__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
6407{
6408 __wsum wsum;
6409 __sum16 sum;
6410
6411 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
6412
6413
6414 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
6415
6416 if (likely(!sum)) {
6417 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
6418 !skb->csum_complete_sw)
6419 netdev_rx_csum_fault(skb->dev, skb);
6420 }
6421
6422 NAPI_GRO_CB(skb)->csum = wsum;
6423 NAPI_GRO_CB(skb)->csum_valid = 1;
6424
6425 return sum;
6426}
6427EXPORT_SYMBOL(__skb_gro_checksum_complete);
6428
6429static void net_rps_send_ipi(struct softnet_data *remsd)
6430{
6431#ifdef CONFIG_RPS
6432 while (remsd) {
6433 struct softnet_data *next = remsd->rps_ipi_next;
6434
6435 if (cpu_online(remsd->cpu))
6436 smp_call_function_single_async(remsd->cpu, &remsd->csd);
6437 remsd = next;
6438 }
6439#endif
6440}
6441
6442
6443
6444
6445
6446static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6447{
6448#ifdef CONFIG_RPS
6449 struct softnet_data *remsd = sd->rps_ipi_list;
6450
6451 if (remsd) {
6452 sd->rps_ipi_list = NULL;
6453
6454 local_irq_enable();
6455
6456
6457 net_rps_send_ipi(remsd);
6458 } else
6459#endif
6460 local_irq_enable();
6461}
6462
6463static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6464{
6465#ifdef CONFIG_RPS
6466 return sd->rps_ipi_list != NULL;
6467#else
6468 return false;
6469#endif
6470}
6471
6472static int process_backlog(struct napi_struct *napi, int quota)
6473{
6474 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
6475 bool again = true;
6476 int work = 0;
6477
6478
6479
6480
6481 if (sd_has_rps_ipi_waiting(sd)) {
6482 local_irq_disable();
6483 net_rps_action_and_irq_enable(sd);
6484 }
6485
6486 napi->weight = dev_rx_weight;
6487 while (again) {
6488 struct sk_buff *skb;
6489
6490 while ((skb = __skb_dequeue(&sd->process_queue))) {
6491 rcu_read_lock();
6492 __netif_receive_skb(skb);
6493 rcu_read_unlock();
6494 input_queue_head_incr(sd);
6495 if (++work >= quota)
6496 return work;
6497
6498 }
6499
6500 local_irq_disable();
6501 rps_lock(sd);
6502 if (skb_queue_empty(&sd->input_pkt_queue)) {
6503
6504
6505
6506
6507
6508
6509
6510
6511 napi->state = 0;
6512 again = false;
6513 } else {
6514 skb_queue_splice_tail_init(&sd->input_pkt_queue,
6515 &sd->process_queue);
6516 }
6517 rps_unlock(sd);
6518 local_irq_enable();
6519 }
6520
6521 return work;
6522}
6523
6524
6525
6526
6527
6528
6529
6530
6531void __napi_schedule(struct napi_struct *n)
6532{
6533 unsigned long flags;
6534
6535 local_irq_save(flags);
6536 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6537 local_irq_restore(flags);
6538}
6539EXPORT_SYMBOL(__napi_schedule);
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550bool napi_schedule_prep(struct napi_struct *n)
6551{
6552 unsigned long val, new;
6553
6554 do {
6555 val = READ_ONCE(n->state);
6556 if (unlikely(val & NAPIF_STATE_DISABLE))
6557 return false;
6558 new = val | NAPIF_STATE_SCHED;
6559
6560
6561
6562
6563
6564
6565
6566 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6567 NAPIF_STATE_MISSED;
6568 } while (cmpxchg(&n->state, val, new) != val);
6569
6570 return !(val & NAPIF_STATE_SCHED);
6571}
6572EXPORT_SYMBOL(napi_schedule_prep);
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583
6584void __napi_schedule_irqoff(struct napi_struct *n)
6585{
6586 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6587 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6588 else
6589 __napi_schedule(n);
6590}
6591EXPORT_SYMBOL(__napi_schedule_irqoff);
6592
6593bool napi_complete_done(struct napi_struct *n, int work_done)
6594{
6595 unsigned long flags, val, new, timeout = 0;
6596 bool ret = true;
6597
6598
6599
6600
6601
6602
6603
6604 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6605 NAPIF_STATE_IN_BUSY_POLL)))
6606 return false;
6607
6608 if (work_done) {
6609 if (n->gro_bitmask)
6610 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6611 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6612 }
6613 if (n->defer_hard_irqs_count > 0) {
6614 n->defer_hard_irqs_count--;
6615 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6616 if (timeout)
6617 ret = false;
6618 }
6619 if (n->gro_bitmask) {
6620
6621
6622
6623
6624 napi_gro_flush(n, !!timeout);
6625 }
6626
6627 gro_normal_list(n);
6628
6629 if (unlikely(!list_empty(&n->poll_list))) {
6630
6631 local_irq_save(flags);
6632 list_del_init(&n->poll_list);
6633 local_irq_restore(flags);
6634 }
6635
6636 do {
6637 val = READ_ONCE(n->state);
6638
6639 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6640
6641 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6642 NAPIF_STATE_SCHED_THREADED |
6643 NAPIF_STATE_PREFER_BUSY_POLL);
6644
6645
6646
6647
6648
6649 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6650 NAPIF_STATE_SCHED;
6651 } while (cmpxchg(&n->state, val, new) != val);
6652
6653 if (unlikely(val & NAPIF_STATE_MISSED)) {
6654 __napi_schedule(n);
6655 return false;
6656 }
6657
6658 if (timeout)
6659 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6660 HRTIMER_MODE_REL_PINNED);
6661 return ret;
6662}
6663EXPORT_SYMBOL(napi_complete_done);
6664
6665
6666static struct napi_struct *napi_by_id(unsigned int napi_id)
6667{
6668 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6669 struct napi_struct *napi;
6670
6671 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6672 if (napi->napi_id == napi_id)
6673 return napi;
6674
6675 return NULL;
6676}
6677
6678#if defined(CONFIG_NET_RX_BUSY_POLL)
6679
6680static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6681{
6682 if (!skip_schedule) {
6683 gro_normal_list(napi);
6684 __napi_schedule(napi);
6685 return;
6686 }
6687
6688 if (napi->gro_bitmask) {
6689
6690
6691
6692 napi_gro_flush(napi, HZ >= 1000);
6693 }
6694
6695 gro_normal_list(napi);
6696 clear_bit(NAPI_STATE_SCHED, &napi->state);
6697}
6698
6699static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
6700 u16 budget)
6701{
6702 bool skip_schedule = false;
6703 unsigned long timeout;
6704 int rc;
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715 clear_bit(NAPI_STATE_MISSED, &napi->state);
6716 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6717
6718 local_bh_disable();
6719
6720 if (prefer_busy_poll) {
6721 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6722 timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6723 if (napi->defer_hard_irqs_count && timeout) {
6724 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6725 skip_schedule = true;
6726 }
6727 }
6728
6729
6730
6731
6732 rc = napi->poll(napi, budget);
6733
6734
6735
6736
6737 trace_napi_poll(napi, rc, budget);
6738 netpoll_poll_unlock(have_poll_lock);
6739 if (rc == budget)
6740 __busy_poll_stop(napi, skip_schedule);
6741 local_bh_enable();
6742}
6743
6744void napi_busy_loop(unsigned int napi_id,
6745 bool (*loop_end)(void *, unsigned long),
6746 void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6747{
6748 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6749 int (*napi_poll)(struct napi_struct *napi, int budget);
6750 void *have_poll_lock = NULL;
6751 struct napi_struct *napi;
6752
6753restart:
6754 napi_poll = NULL;
6755
6756 rcu_read_lock();
6757
6758 napi = napi_by_id(napi_id);
6759 if (!napi)
6760 goto out;
6761
6762 preempt_disable();
6763 for (;;) {
6764 int work = 0;
6765
6766 local_bh_disable();
6767 if (!napi_poll) {
6768 unsigned long val = READ_ONCE(napi->state);
6769
6770
6771
6772
6773 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6774 NAPIF_STATE_IN_BUSY_POLL)) {
6775 if (prefer_busy_poll)
6776 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6777 goto count;
6778 }
6779 if (cmpxchg(&napi->state, val,
6780 val | NAPIF_STATE_IN_BUSY_POLL |
6781 NAPIF_STATE_SCHED) != val) {
6782 if (prefer_busy_poll)
6783 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6784 goto count;
6785 }
6786 have_poll_lock = netpoll_poll_lock(napi);
6787 napi_poll = napi->poll;
6788 }
6789 work = napi_poll(napi, budget);
6790 trace_napi_poll(napi, work, budget);
6791 gro_normal_list(napi);
6792count:
6793 if (work > 0)
6794 __NET_ADD_STATS(dev_net(napi->dev),
6795 LINUX_MIB_BUSYPOLLRXPACKETS, work);
6796 local_bh_enable();
6797
6798 if (!loop_end || loop_end(loop_end_arg, start_time))
6799 break;
6800
6801 if (unlikely(need_resched())) {
6802 if (napi_poll)
6803 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6804 preempt_enable();
6805 rcu_read_unlock();
6806 cond_resched();
6807 if (loop_end(loop_end_arg, start_time))
6808 return;
6809 goto restart;
6810 }
6811 cpu_relax();
6812 }
6813 if (napi_poll)
6814 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6815 preempt_enable();
6816out:
6817 rcu_read_unlock();
6818}
6819EXPORT_SYMBOL(napi_busy_loop);
6820
6821#endif
6822
6823static void napi_hash_add(struct napi_struct *napi)
6824{
6825 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6826 return;
6827
6828 spin_lock(&napi_hash_lock);
6829
6830
6831 do {
6832 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6833 napi_gen_id = MIN_NAPI_ID;
6834 } while (napi_by_id(napi_gen_id));
6835 napi->napi_id = napi_gen_id;
6836
6837 hlist_add_head_rcu(&napi->napi_hash_node,
6838 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6839
6840 spin_unlock(&napi_hash_lock);
6841}
6842
6843
6844
6845
6846static void napi_hash_del(struct napi_struct *napi)
6847{
6848 spin_lock(&napi_hash_lock);
6849
6850 hlist_del_init_rcu(&napi->napi_hash_node);
6851
6852 spin_unlock(&napi_hash_lock);
6853}
6854
6855static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6856{
6857 struct napi_struct *napi;
6858
6859 napi = container_of(timer, struct napi_struct, timer);
6860
6861
6862
6863
6864 if (!napi_disable_pending(napi) &&
6865 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6866 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6867 __napi_schedule_irqoff(napi);
6868 }
6869
6870 return HRTIMER_NORESTART;
6871}
6872
6873static void init_gro_hash(struct napi_struct *napi)
6874{
6875 int i;
6876
6877 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6878 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6879 napi->gro_hash[i].count = 0;
6880 }
6881 napi->gro_bitmask = 0;
6882}
6883
6884int dev_set_threaded(struct net_device *dev, bool threaded)
6885{
6886 struct napi_struct *napi;
6887 int err = 0;
6888
6889 if (dev->threaded == threaded)
6890 return 0;
6891
6892 if (threaded) {
6893 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6894 if (!napi->thread) {
6895 err = napi_kthread_create(napi);
6896 if (err) {
6897 threaded = false;
6898 break;
6899 }
6900 }
6901 }
6902 }
6903
6904 dev->threaded = threaded;
6905
6906
6907
6908
6909 smp_mb__before_atomic();
6910
6911
6912
6913
6914
6915
6916
6917 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6918 if (threaded)
6919 set_bit(NAPI_STATE_THREADED, &napi->state);
6920 else
6921 clear_bit(NAPI_STATE_THREADED, &napi->state);
6922 }
6923
6924 return err;
6925}
6926EXPORT_SYMBOL(dev_set_threaded);
6927
6928void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6929 int (*poll)(struct napi_struct *, int), int weight)
6930{
6931 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6932 return;
6933
6934 INIT_LIST_HEAD(&napi->poll_list);
6935 INIT_HLIST_NODE(&napi->napi_hash_node);
6936 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6937 napi->timer.function = napi_watchdog;
6938 init_gro_hash(napi);
6939 napi->skb = NULL;
6940 INIT_LIST_HEAD(&napi->rx_list);
6941 napi->rx_count = 0;
6942 napi->poll = poll;
6943 if (weight > NAPI_POLL_WEIGHT)
6944 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6945 weight);
6946 napi->weight = weight;
6947 napi->dev = dev;
6948#ifdef CONFIG_NETPOLL
6949 napi->poll_owner = -1;
6950#endif
6951 set_bit(NAPI_STATE_SCHED, &napi->state);
6952 set_bit(NAPI_STATE_NPSVC, &napi->state);
6953 list_add_rcu(&napi->dev_list, &dev->napi_list);
6954 napi_hash_add(napi);
6955
6956
6957
6958
6959 if (dev->threaded && napi_kthread_create(napi))
6960 dev->threaded = 0;
6961}
6962EXPORT_SYMBOL(netif_napi_add);
6963
6964void napi_disable(struct napi_struct *n)
6965{
6966 might_sleep();
6967 set_bit(NAPI_STATE_DISABLE, &n->state);
6968
6969 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6970 msleep(1);
6971 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6972 msleep(1);
6973
6974 hrtimer_cancel(&n->timer);
6975
6976 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
6977 clear_bit(NAPI_STATE_DISABLE, &n->state);
6978 clear_bit(NAPI_STATE_THREADED, &n->state);
6979}
6980EXPORT_SYMBOL(napi_disable);
6981
6982
6983
6984
6985
6986
6987
6988
6989void napi_enable(struct napi_struct *n)
6990{
6991 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
6992 smp_mb__before_atomic();
6993 clear_bit(NAPI_STATE_SCHED, &n->state);
6994 clear_bit(NAPI_STATE_NPSVC, &n->state);
6995 if (n->dev->threaded && n->thread)
6996 set_bit(NAPI_STATE_THREADED, &n->state);
6997}
6998EXPORT_SYMBOL(napi_enable);
6999
7000static void flush_gro_hash(struct napi_struct *napi)
7001{
7002 int i;
7003
7004 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
7005 struct sk_buff *skb, *n;
7006
7007 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
7008 kfree_skb(skb);
7009 napi->gro_hash[i].count = 0;
7010 }
7011}
7012
7013
7014void __netif_napi_del(struct napi_struct *napi)
7015{
7016 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
7017 return;
7018
7019 napi_hash_del(napi);
7020 list_del_rcu(&napi->dev_list);
7021 napi_free_frags(napi);
7022
7023 flush_gro_hash(napi);
7024 napi->gro_bitmask = 0;
7025
7026 if (napi->thread) {
7027 kthread_stop(napi->thread);
7028 napi->thread = NULL;
7029 }
7030}
7031EXPORT_SYMBOL(__netif_napi_del);
7032
7033static int __napi_poll(struct napi_struct *n, bool *repoll)
7034{
7035 int work, weight;
7036
7037 weight = n->weight;
7038
7039
7040
7041
7042
7043
7044
7045 work = 0;
7046 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
7047 work = n->poll(n, weight);
7048 trace_napi_poll(n, work, weight);
7049 }
7050
7051 if (unlikely(work > weight))
7052 pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
7053 n->poll, work, weight);
7054
7055 if (likely(work < weight))
7056 return work;
7057
7058
7059
7060
7061
7062
7063 if (unlikely(napi_disable_pending(n))) {
7064 napi_complete(n);
7065 return work;
7066 }
7067
7068
7069
7070
7071 if (napi_prefer_busy_poll(n)) {
7072 if (napi_complete_done(n, work)) {
7073
7074
7075
7076 napi_schedule(n);
7077 }
7078 return work;
7079 }
7080
7081 if (n->gro_bitmask) {
7082
7083
7084
7085 napi_gro_flush(n, HZ >= 1000);
7086 }
7087
7088 gro_normal_list(n);
7089
7090
7091
7092
7093 if (unlikely(!list_empty(&n->poll_list))) {
7094 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
7095 n->dev ? n->dev->name : "backlog");
7096 return work;
7097 }
7098
7099 *repoll = true;
7100
7101 return work;
7102}
7103
7104static int napi_poll(struct napi_struct *n, struct list_head *repoll)
7105{
7106 bool do_repoll = false;
7107 void *have;
7108 int work;
7109
7110 list_del_init(&n->poll_list);
7111
7112 have = netpoll_poll_lock(n);
7113
7114 work = __napi_poll(n, &do_repoll);
7115
7116 if (do_repoll)
7117 list_add_tail(&n->poll_list, repoll);
7118
7119 netpoll_poll_unlock(have);
7120
7121 return work;
7122}
7123
7124static int napi_thread_wait(struct napi_struct *napi)
7125{
7126 bool woken = false;
7127
7128 set_current_state(TASK_INTERRUPTIBLE);
7129
7130 while (!kthread_should_stop()) {
7131
7132
7133
7134
7135
7136 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
7137 WARN_ON(!list_empty(&napi->poll_list));
7138 __set_current_state(TASK_RUNNING);
7139 return 0;
7140 }
7141
7142 schedule();
7143
7144 woken = true;
7145 set_current_state(TASK_INTERRUPTIBLE);
7146 }
7147 __set_current_state(TASK_RUNNING);
7148
7149 return -1;
7150}
7151
7152static int napi_threaded_poll(void *data)
7153{
7154 struct napi_struct *napi = data;
7155 void *have;
7156
7157 while (!napi_thread_wait(napi)) {
7158 for (;;) {
7159 bool repoll = false;
7160
7161 local_bh_disable();
7162
7163 have = netpoll_poll_lock(napi);
7164 __napi_poll(napi, &repoll);
7165 netpoll_poll_unlock(have);
7166
7167 local_bh_enable();
7168
7169 if (!repoll)
7170 break;
7171
7172 cond_resched();
7173 }
7174 }
7175 return 0;
7176}
7177
7178static __latent_entropy void net_rx_action(struct softirq_action *h)
7179{
7180 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
7181 unsigned long time_limit = jiffies +
7182 usecs_to_jiffies(netdev_budget_usecs);
7183 int budget = netdev_budget;
7184 LIST_HEAD(list);
7185 LIST_HEAD(repoll);
7186
7187 local_irq_disable();
7188 list_splice_init(&sd->poll_list, &list);
7189 local_irq_enable();
7190
7191 for (;;) {
7192 struct napi_struct *n;
7193
7194 if (list_empty(&list)) {
7195 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
7196 return;
7197 break;
7198 }
7199
7200 n = list_first_entry(&list, struct napi_struct, poll_list);
7201 budget -= napi_poll(n, &repoll);
7202
7203
7204
7205
7206
7207 if (unlikely(budget <= 0 ||
7208 time_after_eq(jiffies, time_limit))) {
7209 sd->time_squeeze++;
7210 break;
7211 }
7212 }
7213
7214 local_irq_disable();
7215
7216 list_splice_tail_init(&sd->poll_list, &list);
7217 list_splice_tail(&repoll, &list);
7218 list_splice(&list, &sd->poll_list);
7219 if (!list_empty(&sd->poll_list))
7220 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
7221
7222 net_rps_action_and_irq_enable(sd);
7223}
7224
7225struct netdev_adjacent {
7226 struct net_device *dev;
7227
7228
7229 bool master;
7230
7231
7232 bool ignore;
7233
7234
7235 u16 ref_nr;
7236
7237
7238 void *private;
7239
7240 struct list_head list;
7241 struct rcu_head rcu;
7242};
7243
7244static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
7245 struct list_head *adj_list)
7246{
7247 struct netdev_adjacent *adj;
7248
7249 list_for_each_entry(adj, adj_list, list) {
7250 if (adj->dev == adj_dev)
7251 return adj;
7252 }
7253 return NULL;
7254}
7255
7256static int ____netdev_has_upper_dev(struct net_device *upper_dev,
7257 struct netdev_nested_priv *priv)
7258{
7259 struct net_device *dev = (struct net_device *)priv->data;
7260
7261 return upper_dev == dev;
7262}
7263
7264
7265
7266
7267
7268
7269
7270
7271
7272
7273bool netdev_has_upper_dev(struct net_device *dev,
7274 struct net_device *upper_dev)
7275{
7276 struct netdev_nested_priv priv = {
7277 .data = (void *)upper_dev,
7278 };
7279
7280 ASSERT_RTNL();
7281
7282 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7283 &priv);
7284}
7285EXPORT_SYMBOL(netdev_has_upper_dev);
7286
7287
7288
7289
7290
7291
7292
7293
7294
7295
7296
7297bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
7298 struct net_device *upper_dev)
7299{
7300 struct netdev_nested_priv priv = {
7301 .data = (void *)upper_dev,
7302 };
7303
7304 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7305 &priv);
7306}
7307EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
7308
7309
7310
7311
7312
7313
7314
7315
7316bool netdev_has_any_upper_dev(struct net_device *dev)
7317{
7318 ASSERT_RTNL();
7319
7320 return !list_empty(&dev->adj_list.upper);
7321}
7322EXPORT_SYMBOL(netdev_has_any_upper_dev);
7323
7324
7325
7326
7327
7328
7329
7330
7331struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
7332{
7333 struct netdev_adjacent *upper;
7334
7335 ASSERT_RTNL();
7336
7337 if (list_empty(&dev->adj_list.upper))
7338 return NULL;
7339
7340 upper = list_first_entry(&dev->adj_list.upper,
7341 struct netdev_adjacent, list);
7342 if (likely(upper->master))
7343 return upper->dev;
7344 return NULL;
7345}
7346EXPORT_SYMBOL(netdev_master_upper_dev_get);
7347
7348static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
7349{
7350 struct netdev_adjacent *upper;
7351
7352 ASSERT_RTNL();
7353
7354 if (list_empty(&dev->adj_list.upper))
7355 return NULL;
7356
7357 upper = list_first_entry(&dev->adj_list.upper,
7358 struct netdev_adjacent, list);
7359 if (likely(upper->master) && !upper->ignore)
7360 return upper->dev;
7361 return NULL;
7362}
7363
7364
7365
7366
7367
7368
7369
7370
7371static bool netdev_has_any_lower_dev(struct net_device *dev)
7372{
7373 ASSERT_RTNL();
7374
7375 return !list_empty(&dev->adj_list.lower);
7376}
7377
7378void *netdev_adjacent_get_private(struct list_head *adj_list)
7379{
7380 struct netdev_adjacent *adj;
7381
7382 adj = list_entry(adj_list, struct netdev_adjacent, list);
7383
7384 return adj->private;
7385}
7386EXPORT_SYMBOL(netdev_adjacent_get_private);
7387
7388
7389
7390
7391
7392
7393
7394
7395
7396struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
7397 struct list_head **iter)
7398{
7399 struct netdev_adjacent *upper;
7400
7401 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7402
7403 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7404
7405 if (&upper->list == &dev->adj_list.upper)
7406 return NULL;
7407
7408 *iter = &upper->list;
7409
7410 return upper->dev;
7411}
7412EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
7413
7414static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
7415 struct list_head **iter,
7416 bool *ignore)
7417{
7418 struct netdev_adjacent *upper;
7419
7420 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
7421
7422 if (&upper->list == &dev->adj_list.upper)
7423 return NULL;
7424
7425 *iter = &upper->list;
7426 *ignore = upper->ignore;
7427
7428 return upper->dev;
7429}
7430
7431static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7432 struct list_head **iter)
7433{
7434 struct netdev_adjacent *upper;
7435
7436 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7437
7438 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7439
7440 if (&upper->list == &dev->adj_list.upper)
7441 return NULL;
7442
7443 *iter = &upper->list;
7444
7445 return upper->dev;
7446}
7447
7448static int __netdev_walk_all_upper_dev(struct net_device *dev,
7449 int (*fn)(struct net_device *dev,
7450 struct netdev_nested_priv *priv),
7451 struct netdev_nested_priv *priv)
7452{
7453 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7454 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7455 int ret, cur = 0;
7456 bool ignore;
7457
7458 now = dev;
7459 iter = &dev->adj_list.upper;
7460
7461 while (1) {
7462 if (now != dev) {
7463 ret = fn(now, priv);
7464 if (ret)
7465 return ret;
7466 }
7467
7468 next = NULL;
7469 while (1) {
7470 udev = __netdev_next_upper_dev(now, &iter, &ignore);
7471 if (!udev)
7472 break;
7473 if (ignore)
7474 continue;
7475
7476 next = udev;
7477 niter = &udev->adj_list.upper;
7478 dev_stack[cur] = now;
7479 iter_stack[cur++] = iter;
7480 break;
7481 }
7482
7483 if (!next) {
7484 if (!cur)
7485 return 0;
7486 next = dev_stack[--cur];
7487 niter = iter_stack[cur];
7488 }
7489
7490 now = next;
7491 iter = niter;
7492 }
7493
7494 return 0;
7495}
7496
7497int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7498 int (*fn)(struct net_device *dev,
7499 struct netdev_nested_priv *priv),
7500 struct netdev_nested_priv *priv)
7501{
7502 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7503 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7504 int ret, cur = 0;
7505
7506 now = dev;
7507 iter = &dev->adj_list.upper;
7508
7509 while (1) {
7510 if (now != dev) {
7511 ret = fn(now, priv);
7512 if (ret)
7513 return ret;
7514 }
7515
7516 next = NULL;
7517 while (1) {
7518 udev = netdev_next_upper_dev_rcu(now, &iter);
7519 if (!udev)
7520 break;
7521
7522 next = udev;
7523 niter = &udev->adj_list.upper;
7524 dev_stack[cur] = now;
7525 iter_stack[cur++] = iter;
7526 break;
7527 }
7528
7529 if (!next) {
7530 if (!cur)
7531 return 0;
7532 next = dev_stack[--cur];
7533 niter = iter_stack[cur];
7534 }
7535
7536 now = next;
7537 iter = niter;
7538 }
7539
7540 return 0;
7541}
7542EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7543
7544static bool __netdev_has_upper_dev(struct net_device *dev,
7545 struct net_device *upper_dev)
7546{
7547 struct netdev_nested_priv priv = {
7548 .flags = 0,
7549 .data = (void *)upper_dev,
7550 };
7551
7552 ASSERT_RTNL();
7553
7554 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7555 &priv);
7556}
7557
7558
7559
7560
7561
7562
7563
7564
7565
7566
7567
7568
7569void *netdev_lower_get_next_private(struct net_device *dev,
7570 struct list_head **iter)
7571{
7572 struct netdev_adjacent *lower;
7573
7574 lower = list_entry(*iter, struct netdev_adjacent, list);
7575
7576 if (&lower->list == &dev->adj_list.lower)
7577 return NULL;
7578
7579 *iter = lower->list.next;
7580
7581 return lower->private;
7582}
7583EXPORT_SYMBOL(netdev_lower_get_next_private);
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7596 struct list_head **iter)
7597{
7598 struct netdev_adjacent *lower;
7599
7600 WARN_ON_ONCE(!rcu_read_lock_held());
7601
7602 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7603
7604 if (&lower->list == &dev->adj_list.lower)
7605 return NULL;
7606
7607 *iter = &lower->list;
7608
7609 return lower->private;
7610}
7611EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7612
7613
7614
7615
7616
7617
7618
7619
7620
7621
7622
7623
7624void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7625{
7626 struct netdev_adjacent *lower;
7627
7628 lower = list_entry(*iter, struct netdev_adjacent, list);
7629
7630 if (&lower->list == &dev->adj_list.lower)
7631 return NULL;
7632
7633 *iter = lower->list.next;
7634
7635 return lower->dev;
7636}
7637EXPORT_SYMBOL(netdev_lower_get_next);
7638
7639static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7640 struct list_head **iter)
7641{
7642 struct netdev_adjacent *lower;
7643
7644 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7645
7646 if (&lower->list == &dev->adj_list.lower)
7647 return NULL;
7648
7649 *iter = &lower->list;
7650
7651 return lower->dev;
7652}
7653
7654static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7655 struct list_head **iter,
7656 bool *ignore)
7657{
7658 struct netdev_adjacent *lower;
7659
7660 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7661
7662 if (&lower->list == &dev->adj_list.lower)
7663 return NULL;
7664
7665 *iter = &lower->list;
7666 *ignore = lower->ignore;
7667
7668 return lower->dev;
7669}
7670
7671int netdev_walk_all_lower_dev(struct net_device *dev,
7672 int (*fn)(struct net_device *dev,
7673 struct netdev_nested_priv *priv),
7674 struct netdev_nested_priv *priv)
7675{
7676 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7677 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7678 int ret, cur = 0;
7679
7680 now = dev;
7681 iter = &dev->adj_list.lower;
7682
7683 while (1) {
7684 if (now != dev) {
7685 ret = fn(now, priv);
7686 if (ret)
7687 return ret;
7688 }
7689
7690 next = NULL;
7691 while (1) {
7692 ldev = netdev_next_lower_dev(now, &iter);
7693 if (!ldev)
7694 break;
7695
7696 next = ldev;
7697 niter = &ldev->adj_list.lower;
7698 dev_stack[cur] = now;
7699 iter_stack[cur++] = iter;
7700 break;
7701 }
7702
7703 if (!next) {
7704 if (!cur)
7705 return 0;
7706 next = dev_stack[--cur];
7707 niter = iter_stack[cur];
7708 }
7709
7710 now = next;
7711 iter = niter;
7712 }
7713
7714 return 0;
7715}
7716EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7717
7718static int __netdev_walk_all_lower_dev(struct net_device *dev,
7719 int (*fn)(struct net_device *dev,
7720 struct netdev_nested_priv *priv),
7721 struct netdev_nested_priv *priv)
7722{
7723 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7724 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7725 int ret, cur = 0;
7726 bool ignore;
7727
7728 now = dev;
7729 iter = &dev->adj_list.lower;
7730
7731 while (1) {
7732 if (now != dev) {
7733 ret = fn(now, priv);
7734 if (ret)
7735 return ret;
7736 }
7737
7738 next = NULL;
7739 while (1) {
7740 ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7741 if (!ldev)
7742 break;
7743 if (ignore)
7744 continue;
7745
7746 next = ldev;
7747 niter = &ldev->adj_list.lower;
7748 dev_stack[cur] = now;
7749 iter_stack[cur++] = iter;
7750 break;
7751 }
7752
7753 if (!next) {
7754 if (!cur)
7755 return 0;
7756 next = dev_stack[--cur];
7757 niter = iter_stack[cur];
7758 }
7759
7760 now = next;
7761 iter = niter;
7762 }
7763
7764 return 0;
7765}
7766
7767struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7768 struct list_head **iter)
7769{
7770 struct netdev_adjacent *lower;
7771
7772 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7773 if (&lower->list == &dev->adj_list.lower)
7774 return NULL;
7775
7776 *iter = &lower->list;
7777
7778 return lower->dev;
7779}
7780EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7781
7782static u8 __netdev_upper_depth(struct net_device *dev)
7783{
7784 struct net_device *udev;
7785 struct list_head *iter;
7786 u8 max_depth = 0;
7787 bool ignore;
7788
7789 for (iter = &dev->adj_list.upper,
7790 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7791 udev;
7792 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7793 if (ignore)
7794 continue;
7795 if (max_depth < udev->upper_level)
7796 max_depth = udev->upper_level;
7797 }
7798
7799 return max_depth;
7800}
7801
7802static u8 __netdev_lower_depth(struct net_device *dev)
7803{
7804 struct net_device *ldev;
7805 struct list_head *iter;
7806 u8 max_depth = 0;
7807 bool ignore;
7808
7809 for (iter = &dev->adj_list.lower,
7810 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7811 ldev;
7812 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7813 if (ignore)
7814 continue;
7815 if (max_depth < ldev->lower_level)
7816 max_depth = ldev->lower_level;
7817 }
7818
7819 return max_depth;
7820}
7821
7822static int __netdev_update_upper_level(struct net_device *dev,
7823 struct netdev_nested_priv *__unused)
7824{
7825 dev->upper_level = __netdev_upper_depth(dev) + 1;
7826 return 0;
7827}
7828
7829static int __netdev_update_lower_level(struct net_device *dev,
7830 struct netdev_nested_priv *priv)
7831{
7832 dev->lower_level = __netdev_lower_depth(dev) + 1;
7833
7834#ifdef CONFIG_LOCKDEP
7835 if (!priv)
7836 return 0;
7837
7838 if (priv->flags & NESTED_SYNC_IMM)
7839 dev->nested_level = dev->lower_level - 1;
7840 if (priv->flags & NESTED_SYNC_TODO)
7841 net_unlink_todo(dev);
7842#endif
7843 return 0;
7844}
7845
7846int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7847 int (*fn)(struct net_device *dev,
7848 struct netdev_nested_priv *priv),
7849 struct netdev_nested_priv *priv)
7850{
7851 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7852 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7853 int ret, cur = 0;
7854
7855 now = dev;
7856 iter = &dev->adj_list.lower;
7857
7858 while (1) {
7859 if (now != dev) {
7860 ret = fn(now, priv);
7861 if (ret)
7862 return ret;
7863 }
7864
7865 next = NULL;
7866 while (1) {
7867 ldev = netdev_next_lower_dev_rcu(now, &iter);
7868 if (!ldev)
7869 break;
7870
7871 next = ldev;
7872 niter = &ldev->adj_list.lower;
7873 dev_stack[cur] = now;
7874 iter_stack[cur++] = iter;
7875 break;
7876 }
7877
7878 if (!next) {
7879 if (!cur)
7880 return 0;
7881 next = dev_stack[--cur];
7882 niter = iter_stack[cur];
7883 }
7884
7885 now = next;
7886 iter = niter;
7887 }
7888
7889 return 0;
7890}
7891EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7903{
7904 struct netdev_adjacent *lower;
7905
7906 lower = list_first_or_null_rcu(&dev->adj_list.lower,
7907 struct netdev_adjacent, list);
7908 if (lower)
7909 return lower->private;
7910 return NULL;
7911}
7912EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7913
7914
7915
7916
7917
7918
7919
7920
7921struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7922{
7923 struct netdev_adjacent *upper;
7924
7925 upper = list_first_or_null_rcu(&dev->adj_list.upper,
7926 struct netdev_adjacent, list);
7927 if (upper && likely(upper->master))
7928 return upper->dev;
7929 return NULL;
7930}
7931EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7932
7933static int netdev_adjacent_sysfs_add(struct net_device *dev,
7934 struct net_device *adj_dev,
7935 struct list_head *dev_list)
7936{
7937 char linkname[IFNAMSIZ+7];
7938
7939 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7940 "upper_%s" : "lower_%s", adj_dev->name);
7941 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7942 linkname);
7943}
7944static void netdev_adjacent_sysfs_del(struct net_device *dev,
7945 char *name,
7946 struct list_head *dev_list)
7947{
7948 char linkname[IFNAMSIZ+7];
7949
7950 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7951 "upper_%s" : "lower_%s", name);
7952 sysfs_remove_link(&(dev->dev.kobj), linkname);
7953}
7954
7955static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7956 struct net_device *adj_dev,
7957 struct list_head *dev_list)
7958{
7959 return (dev_list == &dev->adj_list.upper ||
7960 dev_list == &dev->adj_list.lower) &&
7961 net_eq(dev_net(dev), dev_net(adj_dev));
7962}
7963
7964static int __netdev_adjacent_dev_insert(struct net_device *dev,
7965 struct net_device *adj_dev,
7966 struct list_head *dev_list,
7967 void *private, bool master)
7968{
7969 struct netdev_adjacent *adj;
7970 int ret;
7971
7972 adj = __netdev_find_adj(adj_dev, dev_list);
7973
7974 if (adj) {
7975 adj->ref_nr += 1;
7976 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7977 dev->name, adj_dev->name, adj->ref_nr);
7978
7979 return 0;
7980 }
7981
7982 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7983 if (!adj)
7984 return -ENOMEM;
7985
7986 adj->dev = adj_dev;
7987 adj->master = master;
7988 adj->ref_nr = 1;
7989 adj->private = private;
7990 adj->ignore = false;
7991 dev_hold(adj_dev);
7992
7993 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7994 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7995
7996 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7997 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7998 if (ret)
7999 goto free_adj;
8000 }
8001
8002
8003 if (master) {
8004 ret = sysfs_create_link(&(dev->dev.kobj),
8005 &(adj_dev->dev.kobj), "master");
8006 if (ret)
8007 goto remove_symlinks;
8008
8009 list_add_rcu(&adj->list, dev_list);
8010 } else {
8011 list_add_tail_rcu(&adj->list, dev_list);
8012 }
8013
8014 return 0;
8015
8016remove_symlinks:
8017 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
8018 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
8019free_adj:
8020 kfree(adj);
8021 dev_put(adj_dev);
8022
8023 return ret;
8024}
8025
8026static void __netdev_adjacent_dev_remove(struct net_device *dev,
8027 struct net_device *adj_dev,
8028 u16 ref_nr,
8029 struct list_head *dev_list)
8030{
8031 struct netdev_adjacent *adj;
8032
8033 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
8034 dev->name, adj_dev->name, ref_nr);
8035
8036 adj = __netdev_find_adj(adj_dev, dev_list);
8037
8038 if (!adj) {
8039 pr_err("Adjacency does not exist for device %s from %s\n",
8040 dev->name, adj_dev->name);
8041 WARN_ON(1);
8042 return;
8043 }
8044
8045 if (adj->ref_nr > ref_nr) {
8046 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
8047 dev->name, adj_dev->name, ref_nr,
8048 adj->ref_nr - ref_nr);
8049 adj->ref_nr -= ref_nr;
8050 return;
8051 }
8052
8053 if (adj->master)
8054 sysfs_remove_link(&(dev->dev.kobj), "master");
8055
8056 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
8057 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
8058
8059 list_del_rcu(&adj->list);
8060 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
8061 adj_dev->name, dev->name, adj_dev->name);
8062 dev_put(adj_dev);
8063 kfree_rcu(adj, rcu);
8064}
8065
8066static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
8067 struct net_device *upper_dev,
8068 struct list_head *up_list,
8069 struct list_head *down_list,
8070 void *private, bool master)
8071{
8072 int ret;
8073
8074 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
8075 private, master);
8076 if (ret)
8077 return ret;
8078
8079 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
8080 private, false);
8081 if (ret) {
8082 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
8083 return ret;
8084 }
8085
8086 return 0;
8087}
8088
8089static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
8090 struct net_device *upper_dev,
8091 u16 ref_nr,
8092 struct list_head *up_list,
8093 struct list_head *down_list)
8094{
8095 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
8096 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
8097}
8098
8099static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
8100 struct net_device *upper_dev,
8101 void *private, bool master)
8102{
8103 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
8104 &dev->adj_list.upper,
8105 &upper_dev->adj_list.lower,
8106 private, master);
8107}
8108
8109static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
8110 struct net_device *upper_dev)
8111{
8112 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
8113 &dev->adj_list.upper,
8114 &upper_dev->adj_list.lower);
8115}
8116
8117static int __netdev_upper_dev_link(struct net_device *dev,
8118 struct net_device *upper_dev, bool master,
8119 void *upper_priv, void *upper_info,
8120 struct netdev_nested_priv *priv,
8121 struct netlink_ext_ack *extack)
8122{
8123 struct netdev_notifier_changeupper_info changeupper_info = {
8124 .info = {
8125 .dev = dev,
8126 .extack = extack,
8127 },
8128 .upper_dev = upper_dev,
8129 .master = master,
8130 .linking = true,
8131 .upper_info = upper_info,
8132 };
8133 struct net_device *master_dev;
8134 int ret = 0;
8135
8136 ASSERT_RTNL();
8137
8138 if (dev == upper_dev)
8139 return -EBUSY;
8140
8141
8142 if (__netdev_has_upper_dev(upper_dev, dev))
8143 return -EBUSY;
8144
8145 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
8146 return -EMLINK;
8147
8148 if (!master) {
8149 if (__netdev_has_upper_dev(dev, upper_dev))
8150 return -EEXIST;
8151 } else {
8152 master_dev = __netdev_master_upper_dev_get(dev);
8153 if (master_dev)
8154 return master_dev == upper_dev ? -EEXIST : -EBUSY;
8155 }
8156
8157 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
8158 &changeupper_info.info);
8159 ret = notifier_to_errno(ret);
8160 if (ret)
8161 return ret;
8162
8163 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
8164 master);
8165 if (ret)
8166 return ret;
8167
8168 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
8169 &changeupper_info.info);
8170 ret = notifier_to_errno(ret);
8171 if (ret)
8172 goto rollback;
8173
8174 __netdev_update_upper_level(dev, NULL);
8175 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
8176
8177 __netdev_update_lower_level(upper_dev, priv);
8178 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
8179 priv);
8180
8181 return 0;
8182
8183rollback:
8184 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
8185
8186 return ret;
8187}
8188
8189
8190
8191
8192
8193
8194
8195
8196
8197
8198
8199
8200int netdev_upper_dev_link(struct net_device *dev,
8201 struct net_device *upper_dev,
8202 struct netlink_ext_ack *extack)
8203{
8204 struct netdev_nested_priv priv = {
8205 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8206 .data = NULL,
8207 };
8208
8209 return __netdev_upper_dev_link(dev, upper_dev, false,
8210 NULL, NULL, &priv, extack);
8211}
8212EXPORT_SYMBOL(netdev_upper_dev_link);
8213
8214
8215
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228int netdev_master_upper_dev_link(struct net_device *dev,
8229 struct net_device *upper_dev,
8230 void *upper_priv, void *upper_info,
8231 struct netlink_ext_ack *extack)
8232{
8233 struct netdev_nested_priv priv = {
8234 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8235 .data = NULL,
8236 };
8237
8238 return __netdev_upper_dev_link(dev, upper_dev, true,
8239 upper_priv, upper_info, &priv, extack);
8240}
8241EXPORT_SYMBOL(netdev_master_upper_dev_link);
8242
8243static void __netdev_upper_dev_unlink(struct net_device *dev,
8244 struct net_device *upper_dev,
8245 struct netdev_nested_priv *priv)
8246{
8247 struct netdev_notifier_changeupper_info changeupper_info = {
8248 .info = {
8249 .dev = dev,
8250 },
8251 .upper_dev = upper_dev,
8252 .linking = false,
8253 };
8254
8255 ASSERT_RTNL();
8256
8257 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
8258
8259 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
8260 &changeupper_info.info);
8261
8262 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
8263
8264 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
8265 &changeupper_info.info);
8266
8267 __netdev_update_upper_level(dev, NULL);
8268 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
8269
8270 __netdev_update_lower_level(upper_dev, priv);
8271 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
8272 priv);
8273}
8274
8275
8276
8277
8278
8279
8280
8281
8282
8283void netdev_upper_dev_unlink(struct net_device *dev,
8284 struct net_device *upper_dev)
8285{
8286 struct netdev_nested_priv priv = {
8287 .flags = NESTED_SYNC_TODO,
8288 .data = NULL,
8289 };
8290
8291 __netdev_upper_dev_unlink(dev, upper_dev, &priv);
8292}
8293EXPORT_SYMBOL(netdev_upper_dev_unlink);
8294
8295static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
8296 struct net_device *lower_dev,
8297 bool val)
8298{
8299 struct netdev_adjacent *adj;
8300
8301 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
8302 if (adj)
8303 adj->ignore = val;
8304
8305 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
8306 if (adj)
8307 adj->ignore = val;
8308}
8309
8310static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
8311 struct net_device *lower_dev)
8312{
8313 __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
8314}
8315
8316static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
8317 struct net_device *lower_dev)
8318{
8319 __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
8320}
8321
8322int netdev_adjacent_change_prepare(struct net_device *old_dev,
8323 struct net_device *new_dev,
8324 struct net_device *dev,
8325 struct netlink_ext_ack *extack)
8326{
8327 struct netdev_nested_priv priv = {
8328 .flags = 0,
8329 .data = NULL,
8330 };
8331 int err;
8332
8333 if (!new_dev)
8334 return 0;
8335
8336 if (old_dev && new_dev != old_dev)
8337 netdev_adjacent_dev_disable(dev, old_dev);
8338 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
8339 extack);
8340 if (err) {
8341 if (old_dev && new_dev != old_dev)
8342 netdev_adjacent_dev_enable(dev, old_dev);
8343 return err;
8344 }
8345
8346 return 0;
8347}
8348EXPORT_SYMBOL(netdev_adjacent_change_prepare);
8349
8350void netdev_adjacent_change_commit(struct net_device *old_dev,
8351 struct net_device *new_dev,
8352 struct net_device *dev)
8353{
8354 struct netdev_nested_priv priv = {
8355 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8356 .data = NULL,
8357 };
8358
8359 if (!new_dev || !old_dev)
8360 return;
8361
8362 if (new_dev == old_dev)
8363 return;
8364
8365 netdev_adjacent_dev_enable(dev, old_dev);
8366 __netdev_upper_dev_unlink(old_dev, dev, &priv);
8367}
8368EXPORT_SYMBOL(netdev_adjacent_change_commit);
8369
8370void netdev_adjacent_change_abort(struct net_device *old_dev,
8371 struct net_device *new_dev,
8372 struct net_device *dev)
8373{
8374 struct netdev_nested_priv priv = {
8375 .flags = 0,
8376 .data = NULL,
8377 };
8378
8379 if (!new_dev)
8380 return;
8381
8382 if (old_dev && new_dev != old_dev)
8383 netdev_adjacent_dev_enable(dev, old_dev);
8384
8385 __netdev_upper_dev_unlink(new_dev, dev, &priv);
8386}
8387EXPORT_SYMBOL(netdev_adjacent_change_abort);
8388
8389
8390
8391
8392
8393
8394
8395
8396
8397void netdev_bonding_info_change(struct net_device *dev,
8398 struct netdev_bonding_info *bonding_info)
8399{
8400 struct netdev_notifier_bonding_info info = {
8401 .info.dev = dev,
8402 };
8403
8404 memcpy(&info.bonding_info, bonding_info,
8405 sizeof(struct netdev_bonding_info));
8406 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
8407 &info.info);
8408}
8409EXPORT_SYMBOL(netdev_bonding_info_change);
8410
8411
8412
8413
8414
8415
8416
8417
8418
8419
8420
8421
8422struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8423 struct sk_buff *skb,
8424 bool all_slaves)
8425{
8426 const struct net_device_ops *ops = dev->netdev_ops;
8427
8428 if (!ops->ndo_get_xmit_slave)
8429 return NULL;
8430 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8431}
8432EXPORT_SYMBOL(netdev_get_xmit_slave);
8433
8434static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8435 struct sock *sk)
8436{
8437 const struct net_device_ops *ops = dev->netdev_ops;
8438
8439 if (!ops->ndo_sk_get_lower_dev)
8440 return NULL;
8441 return ops->ndo_sk_get_lower_dev(dev, sk);
8442}
8443
8444
8445
8446
8447
8448
8449
8450
8451
8452struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8453 struct sock *sk)
8454{
8455 struct net_device *lower;
8456
8457 lower = netdev_sk_get_lower_dev(dev, sk);
8458 while (lower) {
8459 dev = lower;
8460 lower = netdev_sk_get_lower_dev(dev, sk);
8461 }
8462
8463 return dev;
8464}
8465EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8466
8467static void netdev_adjacent_add_links(struct net_device *dev)
8468{
8469 struct netdev_adjacent *iter;
8470
8471 struct net *net = dev_net(dev);
8472
8473 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8474 if (!net_eq(net, dev_net(iter->dev)))
8475 continue;
8476 netdev_adjacent_sysfs_add(iter->dev, dev,
8477 &iter->dev->adj_list.lower);
8478 netdev_adjacent_sysfs_add(dev, iter->dev,
8479 &dev->adj_list.upper);
8480 }
8481
8482 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8483 if (!net_eq(net, dev_net(iter->dev)))
8484 continue;
8485 netdev_adjacent_sysfs_add(iter->dev, dev,
8486 &iter->dev->adj_list.upper);
8487 netdev_adjacent_sysfs_add(dev, iter->dev,
8488 &dev->adj_list.lower);
8489 }
8490}
8491
8492static void netdev_adjacent_del_links(struct net_device *dev)
8493{
8494 struct netdev_adjacent *iter;
8495
8496 struct net *net = dev_net(dev);
8497
8498 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8499 if (!net_eq(net, dev_net(iter->dev)))
8500 continue;
8501 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8502 &iter->dev->adj_list.lower);
8503 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8504 &dev->adj_list.upper);
8505 }
8506
8507 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8508 if (!net_eq(net, dev_net(iter->dev)))
8509 continue;
8510 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8511 &iter->dev->adj_list.upper);
8512 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8513 &dev->adj_list.lower);
8514 }
8515}
8516
8517void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8518{
8519 struct netdev_adjacent *iter;
8520
8521 struct net *net = dev_net(dev);
8522
8523 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8524 if (!net_eq(net, dev_net(iter->dev)))
8525 continue;
8526 netdev_adjacent_sysfs_del(iter->dev, oldname,
8527 &iter->dev->adj_list.lower);
8528 netdev_adjacent_sysfs_add(iter->dev, dev,
8529 &iter->dev->adj_list.lower);
8530 }
8531
8532 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8533 if (!net_eq(net, dev_net(iter->dev)))
8534 continue;
8535 netdev_adjacent_sysfs_del(iter->dev, oldname,
8536 &iter->dev->adj_list.upper);
8537 netdev_adjacent_sysfs_add(iter->dev, dev,
8538 &iter->dev->adj_list.upper);
8539 }
8540}
8541
8542void *netdev_lower_dev_get_private(struct net_device *dev,
8543 struct net_device *lower_dev)
8544{
8545 struct netdev_adjacent *lower;
8546
8547 if (!lower_dev)
8548 return NULL;
8549 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8550 if (!lower)
8551 return NULL;
8552
8553 return lower->private;
8554}
8555EXPORT_SYMBOL(netdev_lower_dev_get_private);
8556
8557
8558
8559
8560
8561
8562
8563
8564
8565
8566void netdev_lower_state_changed(struct net_device *lower_dev,
8567 void *lower_state_info)
8568{
8569 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8570 .info.dev = lower_dev,
8571 };
8572
8573 ASSERT_RTNL();
8574 changelowerstate_info.lower_state_info = lower_state_info;
8575 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8576 &changelowerstate_info.info);
8577}
8578EXPORT_SYMBOL(netdev_lower_state_changed);
8579
8580static void dev_change_rx_flags(struct net_device *dev, int flags)
8581{
8582 const struct net_device_ops *ops = dev->netdev_ops;
8583
8584 if (ops->ndo_change_rx_flags)
8585 ops->ndo_change_rx_flags(dev, flags);
8586}
8587
8588static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8589{
8590 unsigned int old_flags = dev->flags;
8591 kuid_t uid;
8592 kgid_t gid;
8593
8594 ASSERT_RTNL();
8595
8596 dev->flags |= IFF_PROMISC;
8597 dev->promiscuity += inc;
8598 if (dev->promiscuity == 0) {
8599
8600
8601
8602
8603 if (inc < 0)
8604 dev->flags &= ~IFF_PROMISC;
8605 else {
8606 dev->promiscuity -= inc;
8607 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
8608 dev->name);
8609 return -EOVERFLOW;
8610 }
8611 }
8612 if (dev->flags != old_flags) {
8613 pr_info("device %s %s promiscuous mode\n",
8614 dev->name,
8615 dev->flags & IFF_PROMISC ? "entered" : "left");
8616 if (audit_enabled) {
8617 current_uid_gid(&uid, &gid);
8618 audit_log(audit_context(), GFP_ATOMIC,
8619 AUDIT_ANOM_PROMISCUOUS,
8620 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8621 dev->name, (dev->flags & IFF_PROMISC),
8622 (old_flags & IFF_PROMISC),
8623 from_kuid(&init_user_ns, audit_get_loginuid(current)),
8624 from_kuid(&init_user_ns, uid),
8625 from_kgid(&init_user_ns, gid),
8626 audit_get_sessionid(current));
8627 }
8628
8629 dev_change_rx_flags(dev, IFF_PROMISC);
8630 }
8631 if (notify)
8632 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
8633 return 0;
8634}
8635
8636
8637
8638
8639
8640
8641
8642
8643
8644
8645
8646
8647int dev_set_promiscuity(struct net_device *dev, int inc)
8648{
8649 unsigned int old_flags = dev->flags;
8650 int err;
8651
8652 err = __dev_set_promiscuity(dev, inc, true);
8653 if (err < 0)
8654 return err;
8655 if (dev->flags != old_flags)
8656 dev_set_rx_mode(dev);
8657 return err;
8658}
8659EXPORT_SYMBOL(dev_set_promiscuity);
8660
8661static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8662{
8663 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8664
8665 ASSERT_RTNL();
8666
8667 dev->flags |= IFF_ALLMULTI;
8668 dev->allmulti += inc;
8669 if (dev->allmulti == 0) {
8670
8671
8672
8673
8674 if (inc < 0)
8675 dev->flags &= ~IFF_ALLMULTI;
8676 else {
8677 dev->allmulti -= inc;
8678 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
8679 dev->name);
8680 return -EOVERFLOW;
8681 }
8682 }
8683 if (dev->flags ^ old_flags) {
8684 dev_change_rx_flags(dev, IFF_ALLMULTI);
8685 dev_set_rx_mode(dev);
8686 if (notify)
8687 __dev_notify_flags(dev, old_flags,
8688 dev->gflags ^ old_gflags);
8689 }
8690 return 0;
8691}
8692
8693
8694
8695
8696
8697
8698
8699
8700
8701
8702
8703
8704
8705
8706int dev_set_allmulti(struct net_device *dev, int inc)
8707{
8708 return __dev_set_allmulti(dev, inc, true);
8709}
8710EXPORT_SYMBOL(dev_set_allmulti);
8711
8712
8713
8714
8715
8716
8717
8718void __dev_set_rx_mode(struct net_device *dev)
8719{
8720 const struct net_device_ops *ops = dev->netdev_ops;
8721
8722
8723 if (!(dev->flags&IFF_UP))
8724 return;
8725
8726 if (!netif_device_present(dev))
8727 return;
8728
8729 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8730
8731
8732
8733 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8734 __dev_set_promiscuity(dev, 1, false);
8735 dev->uc_promisc = true;
8736 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8737 __dev_set_promiscuity(dev, -1, false);
8738 dev->uc_promisc = false;
8739 }
8740 }
8741
8742 if (ops->ndo_set_rx_mode)
8743 ops->ndo_set_rx_mode(dev);
8744}
8745
8746void dev_set_rx_mode(struct net_device *dev)
8747{
8748 netif_addr_lock_bh(dev);
8749 __dev_set_rx_mode(dev);
8750 netif_addr_unlock_bh(dev);
8751}
8752
8753
8754
8755
8756
8757
8758
8759unsigned int dev_get_flags(const struct net_device *dev)
8760{
8761 unsigned int flags;
8762
8763 flags = (dev->flags & ~(IFF_PROMISC |
8764 IFF_ALLMULTI |
8765 IFF_RUNNING |
8766 IFF_LOWER_UP |
8767 IFF_DORMANT)) |
8768 (dev->gflags & (IFF_PROMISC |
8769 IFF_ALLMULTI));
8770
8771 if (netif_running(dev)) {
8772 if (netif_oper_up(dev))
8773 flags |= IFF_RUNNING;
8774 if (netif_carrier_ok(dev))
8775 flags |= IFF_LOWER_UP;
8776 if (netif_dormant(dev))
8777 flags |= IFF_DORMANT;
8778 }
8779
8780 return flags;
8781}
8782EXPORT_SYMBOL(dev_get_flags);
8783
8784int __dev_change_flags(struct net_device *dev, unsigned int flags,
8785 struct netlink_ext_ack *extack)
8786{
8787 unsigned int old_flags = dev->flags;
8788 int ret;
8789
8790 ASSERT_RTNL();
8791
8792
8793
8794
8795
8796 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8797 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8798 IFF_AUTOMEDIA)) |
8799 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8800 IFF_ALLMULTI));
8801
8802
8803
8804
8805
8806 if ((old_flags ^ flags) & IFF_MULTICAST)
8807 dev_change_rx_flags(dev, IFF_MULTICAST);
8808
8809 dev_set_rx_mode(dev);
8810
8811
8812
8813
8814
8815
8816
8817 ret = 0;
8818 if ((old_flags ^ flags) & IFF_UP) {
8819 if (old_flags & IFF_UP)
8820 __dev_close(dev);
8821 else
8822 ret = __dev_open(dev, extack);
8823 }
8824
8825 if ((flags ^ dev->gflags) & IFF_PROMISC) {
8826 int inc = (flags & IFF_PROMISC) ? 1 : -1;
8827 unsigned int old_flags = dev->flags;
8828
8829 dev->gflags ^= IFF_PROMISC;
8830
8831 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8832 if (dev->flags != old_flags)
8833 dev_set_rx_mode(dev);
8834 }
8835
8836
8837
8838
8839
8840 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8841 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8842
8843 dev->gflags ^= IFF_ALLMULTI;
8844 __dev_set_allmulti(dev, inc, false);
8845 }
8846
8847 return ret;
8848}
8849
8850void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8851 unsigned int gchanges)
8852{
8853 unsigned int changes = dev->flags ^ old_flags;
8854
8855 if (gchanges)
8856 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
8857
8858 if (changes & IFF_UP) {
8859 if (dev->flags & IFF_UP)
8860 call_netdevice_notifiers(NETDEV_UP, dev);
8861 else
8862 call_netdevice_notifiers(NETDEV_DOWN, dev);
8863 }
8864
8865 if (dev->flags & IFF_UP &&
8866 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8867 struct netdev_notifier_change_info change_info = {
8868 .info = {
8869 .dev = dev,
8870 },
8871 .flags_changed = changes,
8872 };
8873
8874 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8875 }
8876}
8877
8878
8879
8880
8881
8882
8883
8884
8885
8886
8887int dev_change_flags(struct net_device *dev, unsigned int flags,
8888 struct netlink_ext_ack *extack)
8889{
8890 int ret;
8891 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8892
8893 ret = __dev_change_flags(dev, flags, extack);
8894 if (ret < 0)
8895 return ret;
8896
8897 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8898 __dev_notify_flags(dev, old_flags, changes);
8899 return ret;
8900}
8901EXPORT_SYMBOL(dev_change_flags);
8902
8903int __dev_set_mtu(struct net_device *dev, int new_mtu)
8904{
8905 const struct net_device_ops *ops = dev->netdev_ops;
8906
8907 if (ops->ndo_change_mtu)
8908 return ops->ndo_change_mtu(dev, new_mtu);
8909
8910
8911 WRITE_ONCE(dev->mtu, new_mtu);
8912 return 0;
8913}
8914EXPORT_SYMBOL(__dev_set_mtu);
8915
8916int dev_validate_mtu(struct net_device *dev, int new_mtu,
8917 struct netlink_ext_ack *extack)
8918{
8919
8920 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8921 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8922 return -EINVAL;
8923 }
8924
8925 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8926 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8927 return -EINVAL;
8928 }
8929 return 0;
8930}
8931
8932
8933
8934
8935
8936
8937
8938
8939
8940int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8941 struct netlink_ext_ack *extack)
8942{
8943 int err, orig_mtu;
8944
8945 if (new_mtu == dev->mtu)
8946 return 0;
8947
8948 err = dev_validate_mtu(dev, new_mtu, extack);
8949 if (err)
8950 return err;
8951
8952 if (!netif_device_present(dev))
8953 return -ENODEV;
8954
8955 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8956 err = notifier_to_errno(err);
8957 if (err)
8958 return err;
8959
8960 orig_mtu = dev->mtu;
8961 err = __dev_set_mtu(dev, new_mtu);
8962
8963 if (!err) {
8964 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8965 orig_mtu);
8966 err = notifier_to_errno(err);
8967 if (err) {
8968
8969
8970
8971 __dev_set_mtu(dev, orig_mtu);
8972 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8973 new_mtu);
8974 }
8975 }
8976 return err;
8977}
8978
8979int dev_set_mtu(struct net_device *dev, int new_mtu)
8980{
8981 struct netlink_ext_ack extack;
8982 int err;
8983
8984 memset(&extack, 0, sizeof(extack));
8985 err = dev_set_mtu_ext(dev, new_mtu, &extack);
8986 if (err && extack._msg)
8987 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8988 return err;
8989}
8990EXPORT_SYMBOL(dev_set_mtu);
8991
8992
8993
8994
8995
8996
8997int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8998{
8999 unsigned int orig_len = dev->tx_queue_len;
9000 int res;
9001
9002 if (new_len != (unsigned int)new_len)
9003 return -ERANGE;
9004
9005 if (new_len != orig_len) {
9006 dev->tx_queue_len = new_len;
9007 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
9008 res = notifier_to_errno(res);
9009 if (res)
9010 goto err_rollback;
9011 res = dev_qdisc_change_tx_queue_len(dev);
9012 if (res)
9013 goto err_rollback;
9014 }
9015
9016 return 0;
9017
9018err_rollback:
9019 netdev_err(dev, "refused to change device tx_queue_len\n");
9020 dev->tx_queue_len = orig_len;
9021 return res;
9022}
9023
9024
9025
9026
9027
9028
9029void dev_set_group(struct net_device *dev, int new_group)
9030{
9031 dev->group = new_group;
9032}
9033EXPORT_SYMBOL(dev_set_group);
9034
9035
9036
9037
9038
9039
9040
9041int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
9042 struct netlink_ext_ack *extack)
9043{
9044 struct netdev_notifier_pre_changeaddr_info info = {
9045 .info.dev = dev,
9046 .info.extack = extack,
9047 .dev_addr = addr,
9048 };
9049 int rc;
9050
9051 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
9052 return notifier_to_errno(rc);
9053}
9054EXPORT_SYMBOL(dev_pre_changeaddr_notify);
9055
9056
9057
9058
9059
9060
9061
9062
9063
9064int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
9065 struct netlink_ext_ack *extack)
9066{
9067 const struct net_device_ops *ops = dev->netdev_ops;
9068 int err;
9069
9070 if (!ops->ndo_set_mac_address)
9071 return -EOPNOTSUPP;
9072 if (sa->sa_family != dev->type)
9073 return -EINVAL;
9074 if (!netif_device_present(dev))
9075 return -ENODEV;
9076 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
9077 if (err)
9078 return err;
9079 err = ops->ndo_set_mac_address(dev, sa);
9080 if (err)
9081 return err;
9082 dev->addr_assign_type = NET_ADDR_SET;
9083 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
9084 add_device_randomness(dev->dev_addr, dev->addr_len);
9085 return 0;
9086}
9087EXPORT_SYMBOL(dev_set_mac_address);
9088
9089static DECLARE_RWSEM(dev_addr_sem);
9090
9091int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
9092 struct netlink_ext_ack *extack)
9093{
9094 int ret;
9095
9096 down_write(&dev_addr_sem);
9097 ret = dev_set_mac_address(dev, sa, extack);
9098 up_write(&dev_addr_sem);
9099 return ret;
9100}
9101EXPORT_SYMBOL(dev_set_mac_address_user);
9102
9103int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
9104{
9105 size_t size = sizeof(sa->sa_data);
9106 struct net_device *dev;
9107 int ret = 0;
9108
9109 down_read(&dev_addr_sem);
9110 rcu_read_lock();
9111
9112 dev = dev_get_by_name_rcu(net, dev_name);
9113 if (!dev) {
9114 ret = -ENODEV;
9115 goto unlock;
9116 }
9117 if (!dev->addr_len)
9118 memset(sa->sa_data, 0, size);
9119 else
9120 memcpy(sa->sa_data, dev->dev_addr,
9121 min_t(size_t, size, dev->addr_len));
9122 sa->sa_family = dev->type;
9123
9124unlock:
9125 rcu_read_unlock();
9126 up_read(&dev_addr_sem);
9127 return ret;
9128}
9129EXPORT_SYMBOL(dev_get_mac_address);
9130
9131
9132
9133
9134
9135
9136
9137
9138int dev_change_carrier(struct net_device *dev, bool new_carrier)
9139{
9140 const struct net_device_ops *ops = dev->netdev_ops;
9141
9142 if (!ops->ndo_change_carrier)
9143 return -EOPNOTSUPP;
9144 if (!netif_device_present(dev))
9145 return -ENODEV;
9146 return ops->ndo_change_carrier(dev, new_carrier);
9147}
9148EXPORT_SYMBOL(dev_change_carrier);
9149
9150
9151
9152
9153
9154
9155
9156
9157int dev_get_phys_port_id(struct net_device *dev,
9158 struct netdev_phys_item_id *ppid)
9159{
9160 const struct net_device_ops *ops = dev->netdev_ops;
9161
9162 if (!ops->ndo_get_phys_port_id)
9163 return -EOPNOTSUPP;
9164 return ops->ndo_get_phys_port_id(dev, ppid);
9165}
9166EXPORT_SYMBOL(dev_get_phys_port_id);
9167
9168
9169
9170
9171
9172
9173
9174
9175
9176int dev_get_phys_port_name(struct net_device *dev,
9177 char *name, size_t len)
9178{
9179 const struct net_device_ops *ops = dev->netdev_ops;
9180 int err;
9181
9182 if (ops->ndo_get_phys_port_name) {
9183 err = ops->ndo_get_phys_port_name(dev, name, len);
9184 if (err != -EOPNOTSUPP)
9185 return err;
9186 }
9187 return devlink_compat_phys_port_name_get(dev, name, len);
9188}
9189EXPORT_SYMBOL(dev_get_phys_port_name);
9190
9191
9192
9193
9194
9195
9196
9197
9198
9199int dev_get_port_parent_id(struct net_device *dev,
9200 struct netdev_phys_item_id *ppid,
9201 bool recurse)
9202{
9203 const struct net_device_ops *ops = dev->netdev_ops;
9204 struct netdev_phys_item_id first = { };
9205 struct net_device *lower_dev;
9206 struct list_head *iter;
9207 int err;
9208
9209 if (ops->ndo_get_port_parent_id) {
9210 err = ops->ndo_get_port_parent_id(dev, ppid);
9211 if (err != -EOPNOTSUPP)
9212 return err;
9213 }
9214
9215 err = devlink_compat_switch_id_get(dev, ppid);
9216 if (!err || err != -EOPNOTSUPP)
9217 return err;
9218
9219 if (!recurse)
9220 return -EOPNOTSUPP;
9221
9222 netdev_for_each_lower_dev(dev, lower_dev, iter) {
9223 err = dev_get_port_parent_id(lower_dev, ppid, recurse);
9224 if (err)
9225 break;
9226 if (!first.id_len)
9227 first = *ppid;
9228 else if (memcmp(&first, ppid, sizeof(*ppid)))
9229 return -EOPNOTSUPP;
9230 }
9231
9232 return err;
9233}
9234EXPORT_SYMBOL(dev_get_port_parent_id);
9235
9236
9237
9238
9239
9240
9241
9242bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9243{
9244 struct netdev_phys_item_id a_id = { };
9245 struct netdev_phys_item_id b_id = { };
9246
9247 if (dev_get_port_parent_id(a, &a_id, true) ||
9248 dev_get_port_parent_id(b, &b_id, true))
9249 return false;
9250
9251 return netdev_phys_item_id_same(&a_id, &b_id);
9252}
9253EXPORT_SYMBOL(netdev_port_same_parent_id);
9254
9255
9256
9257
9258
9259
9260
9261
9262
9263int dev_change_proto_down(struct net_device *dev, bool proto_down)
9264{
9265 const struct net_device_ops *ops = dev->netdev_ops;
9266
9267 if (!ops->ndo_change_proto_down)
9268 return -EOPNOTSUPP;
9269 if (!netif_device_present(dev))
9270 return -ENODEV;
9271 return ops->ndo_change_proto_down(dev, proto_down);
9272}
9273EXPORT_SYMBOL(dev_change_proto_down);
9274
9275
9276
9277
9278
9279
9280
9281
9282
9283int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
9284{
9285 if (proto_down)
9286 netif_carrier_off(dev);
9287 else
9288 netif_carrier_on(dev);
9289 dev->proto_down = proto_down;
9290 return 0;
9291}
9292EXPORT_SYMBOL(dev_change_proto_down_generic);
9293
9294
9295
9296
9297
9298
9299
9300
9301void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9302 u32 value)
9303{
9304 int b;
9305
9306 if (!mask) {
9307 dev->proto_down_reason = value;
9308 } else {
9309 for_each_set_bit(b, &mask, 32) {
9310 if (value & (1 << b))
9311 dev->proto_down_reason |= BIT(b);
9312 else
9313 dev->proto_down_reason &= ~BIT(b);
9314 }
9315 }
9316}
9317EXPORT_SYMBOL(dev_change_proto_down_reason);
9318
9319struct bpf_xdp_link {
9320 struct bpf_link link;
9321 struct net_device *dev;
9322 int flags;
9323};
9324
9325static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9326{
9327 if (flags & XDP_FLAGS_HW_MODE)
9328 return XDP_MODE_HW;
9329 if (flags & XDP_FLAGS_DRV_MODE)
9330 return XDP_MODE_DRV;
9331 if (flags & XDP_FLAGS_SKB_MODE)
9332 return XDP_MODE_SKB;
9333 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9334}
9335
9336static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9337{
9338 switch (mode) {
9339 case XDP_MODE_SKB:
9340 return generic_xdp_install;
9341 case XDP_MODE_DRV:
9342 case XDP_MODE_HW:
9343 return dev->netdev_ops->ndo_bpf;
9344 default:
9345 return NULL;
9346 }
9347}
9348
9349static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9350 enum bpf_xdp_mode mode)
9351{
9352 return dev->xdp_state[mode].link;
9353}
9354
9355static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9356 enum bpf_xdp_mode mode)
9357{
9358 struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9359
9360 if (link)
9361 return link->link.prog;
9362 return dev->xdp_state[mode].prog;
9363}
9364
9365static u8 dev_xdp_prog_count(struct net_device *dev)
9366{
9367 u8 count = 0;
9368 int i;
9369
9370 for (i = 0; i < __MAX_XDP_MODE; i++)
9371 if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9372 count++;
9373 return count;
9374}
9375
9376u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9377{
9378 struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9379
9380 return prog ? prog->aux->id : 0;
9381}
9382
9383static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9384 struct bpf_xdp_link *link)
9385{
9386 dev->xdp_state[mode].link = link;
9387 dev->xdp_state[mode].prog = NULL;
9388}
9389
9390static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9391 struct bpf_prog *prog)
9392{
9393 dev->xdp_state[mode].link = NULL;
9394 dev->xdp_state[mode].prog = prog;
9395}
9396
9397static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9398 bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9399 u32 flags, struct bpf_prog *prog)
9400{
9401 struct netdev_bpf xdp;
9402 int err;
9403
9404 memset(&xdp, 0, sizeof(xdp));
9405 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9406 xdp.extack = extack;
9407 xdp.flags = flags;
9408 xdp.prog = prog;
9409
9410
9411
9412
9413
9414
9415
9416 if (prog)
9417 bpf_prog_inc(prog);
9418 err = bpf_op(dev, &xdp);
9419 if (err) {
9420 if (prog)
9421 bpf_prog_put(prog);
9422 return err;
9423 }
9424
9425 if (mode != XDP_MODE_HW)
9426 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9427
9428 return 0;
9429}
9430
9431static void dev_xdp_uninstall(struct net_device *dev)
9432{
9433 struct bpf_xdp_link *link;
9434 struct bpf_prog *prog;
9435 enum bpf_xdp_mode mode;
9436 bpf_op_t bpf_op;
9437
9438 ASSERT_RTNL();
9439
9440 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9441 prog = dev_xdp_prog(dev, mode);
9442 if (!prog)
9443 continue;
9444
9445 bpf_op = dev_xdp_bpf_op(dev, mode);
9446 if (!bpf_op)
9447 continue;
9448
9449 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9450
9451
9452 link = dev_xdp_link(dev, mode);
9453 if (link)
9454 link->dev = NULL;
9455 else
9456 bpf_prog_put(prog);
9457
9458 dev_xdp_set_link(dev, mode, NULL);
9459 }
9460}
9461
9462static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9463 struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9464 struct bpf_prog *old_prog, u32 flags)
9465{
9466 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9467 struct bpf_prog *cur_prog;
9468 enum bpf_xdp_mode mode;
9469 bpf_op_t bpf_op;
9470 int err;
9471
9472 ASSERT_RTNL();
9473
9474
9475 if (link && (new_prog || old_prog))
9476 return -EINVAL;
9477
9478 if (link && (flags & ~XDP_FLAGS_MODES)) {
9479 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9480 return -EINVAL;
9481 }
9482
9483 if (num_modes > 1) {
9484 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9485 return -EINVAL;
9486 }
9487
9488 if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9489 NL_SET_ERR_MSG(extack,
9490 "More than one program loaded, unset mode is ambiguous");
9491 return -EINVAL;
9492 }
9493
9494 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9495 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9496 return -EINVAL;
9497 }
9498
9499 mode = dev_xdp_mode(dev, flags);
9500
9501 if (dev_xdp_link(dev, mode)) {
9502 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9503 return -EBUSY;
9504 }
9505
9506 cur_prog = dev_xdp_prog(dev, mode);
9507
9508 if (link && cur_prog) {
9509 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9510 return -EBUSY;
9511 }
9512 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9513 NL_SET_ERR_MSG(extack, "Active program does not match expected");
9514 return -EEXIST;
9515 }
9516
9517
9518 if (link)
9519 new_prog = link->link.prog;
9520
9521 if (new_prog) {
9522 bool offload = mode == XDP_MODE_HW;
9523 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9524 ? XDP_MODE_DRV : XDP_MODE_SKB;
9525
9526 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9527 NL_SET_ERR_MSG(extack, "XDP program already attached");
9528 return -EBUSY;
9529 }
9530 if (!offload && dev_xdp_prog(dev, other_mode)) {
9531 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9532 return -EEXIST;
9533 }
9534 if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) {
9535 NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported");
9536 return -EINVAL;
9537 }
9538 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9539 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9540 return -EINVAL;
9541 }
9542 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9543 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9544 return -EINVAL;
9545 }
9546 }
9547
9548
9549 if (new_prog != cur_prog) {
9550 bpf_op = dev_xdp_bpf_op(dev, mode);
9551 if (!bpf_op) {
9552 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9553 return -EOPNOTSUPP;
9554 }
9555
9556 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9557 if (err)
9558 return err;
9559 }
9560
9561 if (link)
9562 dev_xdp_set_link(dev, mode, link);
9563 else
9564 dev_xdp_set_prog(dev, mode, new_prog);
9565 if (cur_prog)
9566 bpf_prog_put(cur_prog);
9567
9568 return 0;
9569}
9570
9571static int dev_xdp_attach_link(struct net_device *dev,
9572 struct netlink_ext_ack *extack,
9573 struct bpf_xdp_link *link)
9574{
9575 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9576}
9577
9578static int dev_xdp_detach_link(struct net_device *dev,
9579 struct netlink_ext_ack *extack,
9580 struct bpf_xdp_link *link)
9581{
9582 enum bpf_xdp_mode mode;
9583 bpf_op_t bpf_op;
9584
9585 ASSERT_RTNL();
9586
9587 mode = dev_xdp_mode(dev, link->flags);
9588 if (dev_xdp_link(dev, mode) != link)
9589 return -EINVAL;
9590
9591 bpf_op = dev_xdp_bpf_op(dev, mode);
9592 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9593 dev_xdp_set_link(dev, mode, NULL);
9594 return 0;
9595}
9596
9597static void bpf_xdp_link_release(struct bpf_link *link)
9598{
9599 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9600
9601 rtnl_lock();
9602
9603
9604
9605
9606 if (xdp_link->dev) {
9607 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9608 xdp_link->dev = NULL;
9609 }
9610
9611 rtnl_unlock();
9612}
9613
9614static int bpf_xdp_link_detach(struct bpf_link *link)
9615{
9616 bpf_xdp_link_release(link);
9617 return 0;
9618}
9619
9620static void bpf_xdp_link_dealloc(struct bpf_link *link)
9621{
9622 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9623
9624 kfree(xdp_link);
9625}
9626
9627static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9628 struct seq_file *seq)
9629{
9630 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9631 u32 ifindex = 0;
9632
9633 rtnl_lock();
9634 if (xdp_link->dev)
9635 ifindex = xdp_link->dev->ifindex;
9636 rtnl_unlock();
9637
9638 seq_printf(seq, "ifindex:\t%u\n", ifindex);
9639}
9640
9641static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9642 struct bpf_link_info *info)
9643{
9644 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9645 u32 ifindex = 0;
9646
9647 rtnl_lock();
9648 if (xdp_link->dev)
9649 ifindex = xdp_link->dev->ifindex;
9650 rtnl_unlock();
9651
9652 info->xdp.ifindex = ifindex;
9653 return 0;
9654}
9655
9656static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9657 struct bpf_prog *old_prog)
9658{
9659 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9660 enum bpf_xdp_mode mode;
9661 bpf_op_t bpf_op;
9662 int err = 0;
9663
9664 rtnl_lock();
9665
9666
9667 if (!xdp_link->dev) {
9668 err = -ENOLINK;
9669 goto out_unlock;
9670 }
9671
9672 if (old_prog && link->prog != old_prog) {
9673 err = -EPERM;
9674 goto out_unlock;
9675 }
9676 old_prog = link->prog;
9677 if (old_prog == new_prog) {
9678
9679 bpf_prog_put(new_prog);
9680 goto out_unlock;
9681 }
9682
9683 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9684 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9685 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9686 xdp_link->flags, new_prog);
9687 if (err)
9688 goto out_unlock;
9689
9690 old_prog = xchg(&link->prog, new_prog);
9691 bpf_prog_put(old_prog);
9692
9693out_unlock:
9694 rtnl_unlock();
9695 return err;
9696}
9697
9698static const struct bpf_link_ops bpf_xdp_link_lops = {
9699 .release = bpf_xdp_link_release,
9700 .dealloc = bpf_xdp_link_dealloc,
9701 .detach = bpf_xdp_link_detach,
9702 .show_fdinfo = bpf_xdp_link_show_fdinfo,
9703 .fill_link_info = bpf_xdp_link_fill_link_info,
9704 .update_prog = bpf_xdp_link_update,
9705};
9706
9707int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9708{
9709 struct net *net = current->nsproxy->net_ns;
9710 struct bpf_link_primer link_primer;
9711 struct bpf_xdp_link *link;
9712 struct net_device *dev;
9713 int err, fd;
9714
9715 rtnl_lock();
9716 dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9717 if (!dev) {
9718 rtnl_unlock();
9719 return -EINVAL;
9720 }
9721
9722 link = kzalloc(sizeof(*link), GFP_USER);
9723 if (!link) {
9724 err = -ENOMEM;
9725 goto unlock;
9726 }
9727
9728 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9729 link->dev = dev;
9730 link->flags = attr->link_create.flags;
9731
9732 err = bpf_link_prime(&link->link, &link_primer);
9733 if (err) {
9734 kfree(link);
9735 goto unlock;
9736 }
9737
9738 err = dev_xdp_attach_link(dev, NULL, link);
9739 rtnl_unlock();
9740
9741 if (err) {
9742 link->dev = NULL;
9743 bpf_link_cleanup(&link_primer);
9744 goto out_put_dev;
9745 }
9746
9747 fd = bpf_link_settle(&link_primer);
9748
9749 dev_put(dev);
9750 return fd;
9751
9752unlock:
9753 rtnl_unlock();
9754
9755out_put_dev:
9756 dev_put(dev);
9757 return err;
9758}
9759
9760
9761
9762
9763
9764
9765
9766
9767
9768
9769
9770int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9771 int fd, int expected_fd, u32 flags)
9772{
9773 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9774 struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9775 int err;
9776
9777 ASSERT_RTNL();
9778
9779 if (fd >= 0) {
9780 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9781 mode != XDP_MODE_SKB);
9782 if (IS_ERR(new_prog))
9783 return PTR_ERR(new_prog);
9784 }
9785
9786 if (expected_fd >= 0) {
9787 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9788 mode != XDP_MODE_SKB);
9789 if (IS_ERR(old_prog)) {
9790 err = PTR_ERR(old_prog);
9791 old_prog = NULL;
9792 goto err_out;
9793 }
9794 }
9795
9796 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9797
9798err_out:
9799 if (err && new_prog)
9800 bpf_prog_put(new_prog);
9801 if (old_prog)
9802 bpf_prog_put(old_prog);
9803 return err;
9804}
9805
9806
9807
9808
9809
9810
9811
9812
9813
9814static int dev_new_index(struct net *net)
9815{
9816 int ifindex = net->ifindex;
9817
9818 for (;;) {
9819 if (++ifindex <= 0)
9820 ifindex = 1;
9821 if (!__dev_get_by_index(net, ifindex))
9822 return net->ifindex = ifindex;
9823 }
9824}
9825
9826
9827static LIST_HEAD(net_todo_list);
9828DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9829
9830static void net_set_todo(struct net_device *dev)
9831{
9832 list_add_tail(&dev->todo_list, &net_todo_list);
9833 dev_net(dev)->dev_unreg_count++;
9834}
9835
9836static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9837 struct net_device *upper, netdev_features_t features)
9838{
9839 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9840 netdev_features_t feature;
9841 int feature_bit;
9842
9843 for_each_netdev_feature(upper_disables, feature_bit) {
9844 feature = __NETIF_F_BIT(feature_bit);
9845 if (!(upper->wanted_features & feature)
9846 && (features & feature)) {
9847 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9848 &feature, upper->name);
9849 features &= ~feature;
9850 }
9851 }
9852
9853 return features;
9854}
9855
9856static void netdev_sync_lower_features(struct net_device *upper,
9857 struct net_device *lower, netdev_features_t features)
9858{
9859 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9860 netdev_features_t feature;
9861 int feature_bit;
9862
9863 for_each_netdev_feature(upper_disables, feature_bit) {
9864 feature = __NETIF_F_BIT(feature_bit);
9865 if (!(features & feature) && (lower->features & feature)) {
9866 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9867 &feature, lower->name);
9868 lower->wanted_features &= ~feature;
9869 __netdev_update_features(lower);
9870
9871 if (unlikely(lower->features & feature))
9872 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9873 &feature, lower->name);
9874 else
9875 netdev_features_change(lower);
9876 }
9877 }
9878}
9879
9880static netdev_features_t netdev_fix_features(struct net_device *dev,
9881 netdev_features_t features)
9882{
9883
9884 if ((features & NETIF_F_HW_CSUM) &&
9885 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9886 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9887 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9888 }
9889
9890
9891 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9892 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9893 features &= ~NETIF_F_ALL_TSO;
9894 }
9895
9896 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9897 !(features & NETIF_F_IP_CSUM)) {
9898 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9899 features &= ~NETIF_F_TSO;
9900 features &= ~NETIF_F_TSO_ECN;
9901 }
9902
9903 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9904 !(features & NETIF_F_IPV6_CSUM)) {
9905 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9906 features &= ~NETIF_F_TSO6;
9907 }
9908
9909
9910 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9911 features &= ~NETIF_F_TSO_MANGLEID;
9912
9913
9914 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9915 features &= ~NETIF_F_TSO_ECN;
9916
9917
9918 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9919 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9920 features &= ~NETIF_F_GSO;
9921 }
9922
9923
9924 if ((features & dev->gso_partial_features) &&
9925 !(features & NETIF_F_GSO_PARTIAL)) {
9926 netdev_dbg(dev,
9927 "Dropping partially supported GSO features since no GSO partial.\n");
9928 features &= ~dev->gso_partial_features;
9929 }
9930
9931 if (!(features & NETIF_F_RXCSUM)) {
9932
9933
9934
9935
9936
9937 if (features & NETIF_F_GRO_HW) {
9938 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9939 features &= ~NETIF_F_GRO_HW;
9940 }
9941 }
9942
9943
9944 if (features & NETIF_F_RXFCS) {
9945 if (features & NETIF_F_LRO) {
9946 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9947 features &= ~NETIF_F_LRO;
9948 }
9949
9950 if (features & NETIF_F_GRO_HW) {
9951 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9952 features &= ~NETIF_F_GRO_HW;
9953 }
9954 }
9955
9956 if (features & NETIF_F_HW_TLS_TX) {
9957 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9958 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9959 bool hw_csum = features & NETIF_F_HW_CSUM;
9960
9961 if (!ip_csum && !hw_csum) {
9962 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9963 features &= ~NETIF_F_HW_TLS_TX;
9964 }
9965 }
9966
9967 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9968 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9969 features &= ~NETIF_F_HW_TLS_RX;
9970 }
9971
9972 return features;
9973}
9974
9975int __netdev_update_features(struct net_device *dev)
9976{
9977 struct net_device *upper, *lower;
9978 netdev_features_t features;
9979 struct list_head *iter;
9980 int err = -1;
9981
9982 ASSERT_RTNL();
9983
9984 features = netdev_get_wanted_features(dev);
9985
9986 if (dev->netdev_ops->ndo_fix_features)
9987 features = dev->netdev_ops->ndo_fix_features(dev, features);
9988
9989
9990 features = netdev_fix_features(dev, features);
9991
9992
9993 netdev_for_each_upper_dev_rcu(dev, upper, iter)
9994 features = netdev_sync_upper_features(dev, upper, features);
9995
9996 if (dev->features == features)
9997 goto sync_lower;
9998
9999 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
10000 &dev->features, &features);
10001
10002 if (dev->netdev_ops->ndo_set_features)
10003 err = dev->netdev_ops->ndo_set_features(dev, features);
10004 else
10005 err = 0;
10006
10007 if (unlikely(err < 0)) {
10008 netdev_err(dev,
10009 "set_features() failed (%d); wanted %pNF, left %pNF\n",
10010 err, &features, &dev->features);
10011
10012
10013
10014 return -1;
10015 }
10016
10017sync_lower:
10018
10019
10020
10021 netdev_for_each_lower_dev(dev, lower, iter)
10022 netdev_sync_lower_features(dev, lower, features);
10023
10024 if (!err) {
10025 netdev_features_t diff = features ^ dev->features;
10026
10027 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
10028
10029
10030
10031
10032
10033
10034
10035 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
10036 dev->features = features;
10037 udp_tunnel_get_rx_info(dev);
10038 } else {
10039 udp_tunnel_drop_rx_info(dev);
10040 }
10041 }
10042
10043 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
10044 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
10045 dev->features = features;
10046 err |= vlan_get_rx_ctag_filter_info(dev);
10047 } else {
10048 vlan_drop_rx_ctag_filter_info(dev);
10049 }
10050 }
10051
10052 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
10053 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
10054 dev->features = features;
10055 err |= vlan_get_rx_stag_filter_info(dev);
10056 } else {
10057 vlan_drop_rx_stag_filter_info(dev);
10058 }
10059 }
10060
10061 dev->features = features;
10062 }
10063
10064 return err < 0 ? 0 : 1;
10065}
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075void netdev_update_features(struct net_device *dev)
10076{
10077 if (__netdev_update_features(dev))
10078 netdev_features_change(dev);
10079}
10080EXPORT_SYMBOL(netdev_update_features);
10081
10082
10083
10084
10085
10086
10087
10088
10089
10090
10091
10092void netdev_change_features(struct net_device *dev)
10093{
10094 __netdev_update_features(dev);
10095 netdev_features_change(dev);
10096}
10097EXPORT_SYMBOL(netdev_change_features);
10098
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108void netif_stacked_transfer_operstate(const struct net_device *rootdev,
10109 struct net_device *dev)
10110{
10111 if (rootdev->operstate == IF_OPER_DORMANT)
10112 netif_dormant_on(dev);
10113 else
10114 netif_dormant_off(dev);
10115
10116 if (rootdev->operstate == IF_OPER_TESTING)
10117 netif_testing_on(dev);
10118 else
10119 netif_testing_off(dev);
10120
10121 if (netif_carrier_ok(rootdev))
10122 netif_carrier_on(dev);
10123 else
10124 netif_carrier_off(dev);
10125}
10126EXPORT_SYMBOL(netif_stacked_transfer_operstate);
10127
10128static int netif_alloc_rx_queues(struct net_device *dev)
10129{
10130 unsigned int i, count = dev->num_rx_queues;
10131 struct netdev_rx_queue *rx;
10132 size_t sz = count * sizeof(*rx);
10133 int err = 0;
10134
10135 BUG_ON(count < 1);
10136
10137 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
10138 if (!rx)
10139 return -ENOMEM;
10140
10141 dev->_rx = rx;
10142
10143 for (i = 0; i < count; i++) {
10144 rx[i].dev = dev;
10145
10146
10147 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
10148 if (err < 0)
10149 goto err_rxq_info;
10150 }
10151 return 0;
10152
10153err_rxq_info:
10154
10155 while (i--)
10156 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
10157 kvfree(dev->_rx);
10158 dev->_rx = NULL;
10159 return err;
10160}
10161
10162static void netif_free_rx_queues(struct net_device *dev)
10163{
10164 unsigned int i, count = dev->num_rx_queues;
10165
10166
10167 if (!dev->_rx)
10168 return;
10169
10170 for (i = 0; i < count; i++)
10171 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10172
10173 kvfree(dev->_rx);
10174}
10175
10176static void netdev_init_one_queue(struct net_device *dev,
10177 struct netdev_queue *queue, void *_unused)
10178{
10179
10180 spin_lock_init(&queue->_xmit_lock);
10181 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10182 queue->xmit_lock_owner = -1;
10183 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10184 queue->dev = dev;
10185#ifdef CONFIG_BQL
10186 dql_init(&queue->dql, HZ);
10187#endif
10188}
10189
10190static void netif_free_tx_queues(struct net_device *dev)
10191{
10192 kvfree(dev->_tx);
10193}
10194
10195static int netif_alloc_netdev_queues(struct net_device *dev)
10196{
10197 unsigned int count = dev->num_tx_queues;
10198 struct netdev_queue *tx;
10199 size_t sz = count * sizeof(*tx);
10200
10201 if (count < 1 || count > 0xffff)
10202 return -EINVAL;
10203
10204 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
10205 if (!tx)
10206 return -ENOMEM;
10207
10208 dev->_tx = tx;
10209
10210 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10211 spin_lock_init(&dev->tx_global_lock);
10212
10213 return 0;
10214}
10215
10216void netif_tx_stop_all_queues(struct net_device *dev)
10217{
10218 unsigned int i;
10219
10220 for (i = 0; i < dev->num_tx_queues; i++) {
10221 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10222
10223 netif_tx_stop_queue(txq);
10224 }
10225}
10226EXPORT_SYMBOL(netif_tx_stop_all_queues);
10227
10228
10229
10230
10231
10232
10233
10234
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244
10245int register_netdevice(struct net_device *dev)
10246{
10247 int ret;
10248 struct net *net = dev_net(dev);
10249
10250 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10251 NETDEV_FEATURE_COUNT);
10252 BUG_ON(dev_boot_phase);
10253 ASSERT_RTNL();
10254
10255 might_sleep();
10256
10257
10258 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10259 BUG_ON(!net);
10260
10261 ret = ethtool_check_ops(dev->ethtool_ops);
10262 if (ret)
10263 return ret;
10264
10265 spin_lock_init(&dev->addr_list_lock);
10266 netdev_set_addr_lockdep_class(dev);
10267
10268 ret = dev_get_valid_name(net, dev, dev->name);
10269 if (ret < 0)
10270 goto out;
10271
10272 ret = -ENOMEM;
10273 dev->name_node = netdev_name_node_head_alloc(dev);
10274 if (!dev->name_node)
10275 goto out;
10276
10277
10278 if (dev->netdev_ops->ndo_init) {
10279 ret = dev->netdev_ops->ndo_init(dev);
10280 if (ret) {
10281 if (ret > 0)
10282 ret = -EIO;
10283 goto err_free_name;
10284 }
10285 }
10286
10287 if (((dev->hw_features | dev->features) &
10288 NETIF_F_HW_VLAN_CTAG_FILTER) &&
10289 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10290 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10291 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10292 ret = -EINVAL;
10293 goto err_uninit;
10294 }
10295
10296 ret = -EBUSY;
10297 if (!dev->ifindex)
10298 dev->ifindex = dev_new_index(net);
10299 else if (__dev_get_by_index(net, dev->ifindex))
10300 goto err_uninit;
10301
10302
10303
10304
10305 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10306 dev->features |= NETIF_F_SOFT_FEATURES;
10307
10308 if (dev->udp_tunnel_nic_info) {
10309 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10310 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10311 }
10312
10313 dev->wanted_features = dev->features & dev->hw_features;
10314
10315 if (!(dev->flags & IFF_LOOPBACK))
10316 dev->hw_features |= NETIF_F_NOCACHE_COPY;
10317
10318
10319
10320
10321
10322
10323 if (dev->hw_features & NETIF_F_TSO)
10324 dev->hw_features |= NETIF_F_TSO_MANGLEID;
10325 if (dev->vlan_features & NETIF_F_TSO)
10326 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10327 if (dev->mpls_features & NETIF_F_TSO)
10328 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10329 if (dev->hw_enc_features & NETIF_F_TSO)
10330 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10331
10332
10333
10334 dev->vlan_features |= NETIF_F_HIGHDMA;
10335
10336
10337
10338 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10339
10340
10341
10342 dev->mpls_features |= NETIF_F_SG;
10343
10344 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10345 ret = notifier_to_errno(ret);
10346 if (ret)
10347 goto err_uninit;
10348
10349 ret = netdev_register_kobject(dev);
10350 if (ret) {
10351 dev->reg_state = NETREG_UNREGISTERED;
10352 goto err_uninit;
10353 }
10354 dev->reg_state = NETREG_REGISTERED;
10355
10356 __netdev_update_features(dev);
10357
10358
10359
10360
10361
10362
10363 set_bit(__LINK_STATE_PRESENT, &dev->state);
10364
10365 linkwatch_init_dev(dev);
10366
10367 dev_init_scheduler(dev);
10368 dev_hold(dev);
10369 list_netdevice(dev);
10370 add_device_randomness(dev->dev_addr, dev->addr_len);
10371
10372
10373
10374
10375
10376 if (dev->addr_assign_type == NET_ADDR_PERM)
10377 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10378
10379
10380 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10381 ret = notifier_to_errno(ret);
10382 if (ret) {
10383
10384 dev->needs_free_netdev = false;
10385 unregister_netdevice_queue(dev, NULL);
10386 goto out;
10387 }
10388
10389
10390
10391
10392 if (!dev->rtnl_link_ops ||
10393 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10394 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
10395
10396out:
10397 return ret;
10398
10399err_uninit:
10400 if (dev->netdev_ops->ndo_uninit)
10401 dev->netdev_ops->ndo_uninit(dev);
10402 if (dev->priv_destructor)
10403 dev->priv_destructor(dev);
10404err_free_name:
10405 netdev_name_node_free(dev->name_node);
10406 goto out;
10407}
10408EXPORT_SYMBOL(register_netdevice);
10409
10410
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420int init_dummy_netdev(struct net_device *dev)
10421{
10422
10423
10424
10425
10426
10427 memset(dev, 0, sizeof(struct net_device));
10428
10429
10430
10431
10432 dev->reg_state = NETREG_DUMMY;
10433
10434
10435 INIT_LIST_HEAD(&dev->napi_list);
10436
10437
10438 set_bit(__LINK_STATE_PRESENT, &dev->state);
10439 set_bit(__LINK_STATE_START, &dev->state);
10440
10441
10442 dev_net_set(dev, &init_net);
10443
10444
10445
10446
10447
10448
10449 return 0;
10450}
10451EXPORT_SYMBOL_GPL(init_dummy_netdev);
10452
10453
10454
10455
10456
10457
10458
10459
10460
10461
10462
10463
10464
10465
10466
10467int register_netdev(struct net_device *dev)
10468{
10469 int err;
10470
10471 if (rtnl_lock_killable())
10472 return -EINTR;
10473 err = register_netdevice(dev);
10474 rtnl_unlock();
10475 return err;
10476}
10477EXPORT_SYMBOL(register_netdev);
10478
10479int netdev_refcnt_read(const struct net_device *dev)
10480{
10481#ifdef CONFIG_PCPU_DEV_REFCNT
10482 int i, refcnt = 0;
10483
10484 for_each_possible_cpu(i)
10485 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10486 return refcnt;
10487#else
10488 return refcount_read(&dev->dev_refcnt);
10489#endif
10490}
10491EXPORT_SYMBOL(netdev_refcnt_read);
10492
10493int netdev_unregister_timeout_secs __read_mostly = 10;
10494
10495#define WAIT_REFS_MIN_MSECS 1
10496#define WAIT_REFS_MAX_MSECS 250
10497
10498
10499
10500
10501
10502
10503
10504
10505
10506
10507
10508
10509static void netdev_wait_allrefs(struct net_device *dev)
10510{
10511 unsigned long rebroadcast_time, warning_time;
10512 int wait = 0, refcnt;
10513
10514 linkwatch_forget_dev(dev);
10515
10516 rebroadcast_time = warning_time = jiffies;
10517 refcnt = netdev_refcnt_read(dev);
10518
10519 while (refcnt != 1) {
10520 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10521 rtnl_lock();
10522
10523
10524 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10525
10526 __rtnl_unlock();
10527 rcu_barrier();
10528 rtnl_lock();
10529
10530 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10531 &dev->state)) {
10532
10533
10534
10535
10536
10537
10538 linkwatch_run_queue();
10539 }
10540
10541 __rtnl_unlock();
10542
10543 rebroadcast_time = jiffies;
10544 }
10545
10546 if (!wait) {
10547 rcu_barrier();
10548 wait = WAIT_REFS_MIN_MSECS;
10549 } else {
10550 msleep(wait);
10551 wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10552 }
10553
10554 refcnt = netdev_refcnt_read(dev);
10555
10556 if (refcnt != 1 &&
10557 time_after(jiffies, warning_time +
10558 netdev_unregister_timeout_secs * HZ)) {
10559 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10560 dev->name, refcnt);
10561 warning_time = jiffies;
10562 }
10563 }
10564}
10565
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584
10585
10586
10587
10588
10589
10590void netdev_run_todo(void)
10591{
10592 struct list_head list;
10593#ifdef CONFIG_LOCKDEP
10594 struct list_head unlink_list;
10595
10596 list_replace_init(&net_unlink_list, &unlink_list);
10597
10598 while (!list_empty(&unlink_list)) {
10599 struct net_device *dev = list_first_entry(&unlink_list,
10600 struct net_device,
10601 unlink_list);
10602 list_del_init(&dev->unlink_list);
10603 dev->nested_level = dev->lower_level - 1;
10604 }
10605#endif
10606
10607
10608 list_replace_init(&net_todo_list, &list);
10609
10610 __rtnl_unlock();
10611
10612
10613
10614 if (!list_empty(&list))
10615 rcu_barrier();
10616
10617 while (!list_empty(&list)) {
10618 struct net_device *dev
10619 = list_first_entry(&list, struct net_device, todo_list);
10620 list_del(&dev->todo_list);
10621
10622 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10623 pr_err("network todo '%s' but state %d\n",
10624 dev->name, dev->reg_state);
10625 dump_stack();
10626 continue;
10627 }
10628
10629 dev->reg_state = NETREG_UNREGISTERED;
10630
10631 netdev_wait_allrefs(dev);
10632
10633
10634 BUG_ON(netdev_refcnt_read(dev) != 1);
10635 BUG_ON(!list_empty(&dev->ptype_all));
10636 BUG_ON(!list_empty(&dev->ptype_specific));
10637 WARN_ON(rcu_access_pointer(dev->ip_ptr));
10638 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10639#if IS_ENABLED(CONFIG_DECNET)
10640 WARN_ON(dev->dn_ptr);
10641#endif
10642 if (dev->priv_destructor)
10643 dev->priv_destructor(dev);
10644 if (dev->needs_free_netdev)
10645 free_netdev(dev);
10646
10647
10648 rtnl_lock();
10649 dev_net(dev)->dev_unreg_count--;
10650 __rtnl_unlock();
10651 wake_up(&netdev_unregistering_wq);
10652
10653
10654 kobject_put(&dev->dev.kobj);
10655 }
10656}
10657
10658
10659
10660
10661
10662
10663void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10664 const struct net_device_stats *netdev_stats)
10665{
10666#if BITS_PER_LONG == 64
10667 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
10668 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
10669
10670 memset((char *)stats64 + sizeof(*netdev_stats), 0,
10671 sizeof(*stats64) - sizeof(*netdev_stats));
10672#else
10673 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
10674 const unsigned long *src = (const unsigned long *)netdev_stats;
10675 u64 *dst = (u64 *)stats64;
10676
10677 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10678 for (i = 0; i < n; i++)
10679 dst[i] = src[i];
10680
10681 memset((char *)stats64 + n * sizeof(u64), 0,
10682 sizeof(*stats64) - n * sizeof(u64));
10683#endif
10684}
10685EXPORT_SYMBOL(netdev_stats_to_stats64);
10686
10687
10688
10689
10690
10691
10692
10693
10694
10695
10696
10697struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10698 struct rtnl_link_stats64 *storage)
10699{
10700 const struct net_device_ops *ops = dev->netdev_ops;
10701
10702 if (ops->ndo_get_stats64) {
10703 memset(storage, 0, sizeof(*storage));
10704 ops->ndo_get_stats64(dev, storage);
10705 } else if (ops->ndo_get_stats) {
10706 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10707 } else {
10708 netdev_stats_to_stats64(storage, &dev->stats);
10709 }
10710 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
10711 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
10712 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
10713 return storage;
10714}
10715EXPORT_SYMBOL(dev_get_stats);
10716
10717
10718
10719
10720
10721
10722
10723
10724void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10725 const struct pcpu_sw_netstats __percpu *netstats)
10726{
10727 int cpu;
10728
10729 for_each_possible_cpu(cpu) {
10730 const struct pcpu_sw_netstats *stats;
10731 struct pcpu_sw_netstats tmp;
10732 unsigned int start;
10733
10734 stats = per_cpu_ptr(netstats, cpu);
10735 do {
10736 start = u64_stats_fetch_begin_irq(&stats->syncp);
10737 tmp.rx_packets = stats->rx_packets;
10738 tmp.rx_bytes = stats->rx_bytes;
10739 tmp.tx_packets = stats->tx_packets;
10740 tmp.tx_bytes = stats->tx_bytes;
10741 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
10742
10743 s->rx_packets += tmp.rx_packets;
10744 s->rx_bytes += tmp.rx_bytes;
10745 s->tx_packets += tmp.tx_packets;
10746 s->tx_bytes += tmp.tx_bytes;
10747 }
10748}
10749EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10750
10751
10752
10753
10754
10755
10756
10757
10758
10759void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10760{
10761 netdev_stats_to_stats64(s, &dev->stats);
10762 dev_fetch_sw_netstats(s, dev->tstats);
10763}
10764EXPORT_SYMBOL_GPL(dev_get_tstats64);
10765
10766struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10767{
10768 struct netdev_queue *queue = dev_ingress_queue(dev);
10769
10770#ifdef CONFIG_NET_CLS_ACT
10771 if (queue)
10772 return queue;
10773 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10774 if (!queue)
10775 return NULL;
10776 netdev_init_one_queue(dev, queue, NULL);
10777 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10778 queue->qdisc_sleeping = &noop_qdisc;
10779 rcu_assign_pointer(dev->ingress_queue, queue);
10780#endif
10781 return queue;
10782}
10783
10784static const struct ethtool_ops default_ethtool_ops;
10785
10786void netdev_set_default_ethtool_ops(struct net_device *dev,
10787 const struct ethtool_ops *ops)
10788{
10789 if (dev->ethtool_ops == &default_ethtool_ops)
10790 dev->ethtool_ops = ops;
10791}
10792EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10793
10794void netdev_freemem(struct net_device *dev)
10795{
10796 char *addr = (char *)dev - dev->padded;
10797
10798 kvfree(addr);
10799}
10800
10801
10802
10803
10804
10805
10806
10807
10808
10809
10810
10811
10812
10813
10814struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10815 unsigned char name_assign_type,
10816 void (*setup)(struct net_device *),
10817 unsigned int txqs, unsigned int rxqs)
10818{
10819 struct net_device *dev;
10820 unsigned int alloc_size;
10821 struct net_device *p;
10822
10823 BUG_ON(strlen(name) >= sizeof(dev->name));
10824
10825 if (txqs < 1) {
10826 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10827 return NULL;
10828 }
10829
10830 if (rxqs < 1) {
10831 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10832 return NULL;
10833 }
10834
10835 alloc_size = sizeof(struct net_device);
10836 if (sizeof_priv) {
10837
10838 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10839 alloc_size += sizeof_priv;
10840 }
10841
10842 alloc_size += NETDEV_ALIGN - 1;
10843
10844 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
10845 if (!p)
10846 return NULL;
10847
10848 dev = PTR_ALIGN(p, NETDEV_ALIGN);
10849 dev->padded = (char *)dev - (char *)p;
10850
10851#ifdef CONFIG_PCPU_DEV_REFCNT
10852 dev->pcpu_refcnt = alloc_percpu(int);
10853 if (!dev->pcpu_refcnt)
10854 goto free_dev;
10855 dev_hold(dev);
10856#else
10857 refcount_set(&dev->dev_refcnt, 1);
10858#endif
10859
10860 if (dev_addr_init(dev))
10861 goto free_pcpu;
10862
10863 dev_mc_init(dev);
10864 dev_uc_init(dev);
10865
10866 dev_net_set(dev, &init_net);
10867
10868 dev->gso_max_size = GSO_MAX_SIZE;
10869 dev->gso_max_segs = GSO_MAX_SEGS;
10870 dev->upper_level = 1;
10871 dev->lower_level = 1;
10872#ifdef CONFIG_LOCKDEP
10873 dev->nested_level = 0;
10874 INIT_LIST_HEAD(&dev->unlink_list);
10875#endif
10876
10877 INIT_LIST_HEAD(&dev->napi_list);
10878 INIT_LIST_HEAD(&dev->unreg_list);
10879 INIT_LIST_HEAD(&dev->close_list);
10880 INIT_LIST_HEAD(&dev->link_watch_list);
10881 INIT_LIST_HEAD(&dev->adj_list.upper);
10882 INIT_LIST_HEAD(&dev->adj_list.lower);
10883 INIT_LIST_HEAD(&dev->ptype_all);
10884 INIT_LIST_HEAD(&dev->ptype_specific);
10885 INIT_LIST_HEAD(&dev->net_notifier_list);
10886#ifdef CONFIG_NET_SCHED
10887 hash_init(dev->qdisc_hash);
10888#endif
10889 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10890 setup(dev);
10891
10892 if (!dev->tx_queue_len) {
10893 dev->priv_flags |= IFF_NO_QUEUE;
10894 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10895 }
10896
10897 dev->num_tx_queues = txqs;
10898 dev->real_num_tx_queues = txqs;
10899 if (netif_alloc_netdev_queues(dev))
10900 goto free_all;
10901
10902 dev->num_rx_queues = rxqs;
10903 dev->real_num_rx_queues = rxqs;
10904 if (netif_alloc_rx_queues(dev))
10905 goto free_all;
10906
10907 strcpy(dev->name, name);
10908 dev->name_assign_type = name_assign_type;
10909 dev->group = INIT_NETDEV_GROUP;
10910 if (!dev->ethtool_ops)
10911 dev->ethtool_ops = &default_ethtool_ops;
10912
10913 nf_hook_ingress_init(dev);
10914
10915 return dev;
10916
10917free_all:
10918 free_netdev(dev);
10919 return NULL;
10920
10921free_pcpu:
10922#ifdef CONFIG_PCPU_DEV_REFCNT
10923 free_percpu(dev->pcpu_refcnt);
10924free_dev:
10925#endif
10926 netdev_freemem(dev);
10927 return NULL;
10928}
10929EXPORT_SYMBOL(alloc_netdev_mqs);
10930
10931
10932
10933
10934
10935
10936
10937
10938
10939
10940void free_netdev(struct net_device *dev)
10941{
10942 struct napi_struct *p, *n;
10943
10944 might_sleep();
10945
10946
10947
10948
10949
10950 if (dev->reg_state == NETREG_UNREGISTERING) {
10951 ASSERT_RTNL();
10952 dev->needs_free_netdev = true;
10953 return;
10954 }
10955
10956 netif_free_tx_queues(dev);
10957 netif_free_rx_queues(dev);
10958
10959 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10960
10961
10962 dev_addr_flush(dev);
10963
10964 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10965 netif_napi_del(p);
10966
10967#ifdef CONFIG_PCPU_DEV_REFCNT
10968 free_percpu(dev->pcpu_refcnt);
10969 dev->pcpu_refcnt = NULL;
10970#endif
10971 free_percpu(dev->xdp_bulkq);
10972 dev->xdp_bulkq = NULL;
10973
10974
10975 if (dev->reg_state == NETREG_UNINITIALIZED) {
10976 netdev_freemem(dev);
10977 return;
10978 }
10979
10980 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10981 dev->reg_state = NETREG_RELEASED;
10982
10983
10984 put_device(&dev->dev);
10985}
10986EXPORT_SYMBOL(free_netdev);
10987
10988
10989
10990
10991
10992
10993
10994void synchronize_net(void)
10995{
10996 might_sleep();
10997 if (rtnl_is_locked())
10998 synchronize_rcu_expedited();
10999 else
11000 synchronize_rcu();
11001}
11002EXPORT_SYMBOL(synchronize_net);
11003
11004
11005
11006
11007
11008
11009
11010
11011
11012
11013
11014
11015
11016
11017void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
11018{
11019 ASSERT_RTNL();
11020
11021 if (head) {
11022 list_move_tail(&dev->unreg_list, head);
11023 } else {
11024 LIST_HEAD(single);
11025
11026 list_add(&dev->unreg_list, &single);
11027 unregister_netdevice_many(&single);
11028 }
11029}
11030EXPORT_SYMBOL(unregister_netdevice_queue);
11031
11032
11033
11034
11035
11036
11037
11038
11039void unregister_netdevice_many(struct list_head *head)
11040{
11041 struct net_device *dev, *tmp;
11042 LIST_HEAD(close_head);
11043
11044 BUG_ON(dev_boot_phase);
11045 ASSERT_RTNL();
11046
11047 if (list_empty(head))
11048 return;
11049
11050 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
11051
11052
11053
11054
11055 if (dev->reg_state == NETREG_UNINITIALIZED) {
11056 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
11057 dev->name, dev);
11058
11059 WARN_ON(1);
11060 list_del(&dev->unreg_list);
11061 continue;
11062 }
11063 dev->dismantle = true;
11064 BUG_ON(dev->reg_state != NETREG_REGISTERED);
11065 }
11066
11067
11068 list_for_each_entry(dev, head, unreg_list)
11069 list_add_tail(&dev->close_list, &close_head);
11070 dev_close_many(&close_head, true);
11071
11072 list_for_each_entry(dev, head, unreg_list) {
11073
11074 unlist_netdevice(dev);
11075
11076 dev->reg_state = NETREG_UNREGISTERING;
11077 }
11078 flush_all_backlogs();
11079
11080 synchronize_net();
11081
11082 list_for_each_entry(dev, head, unreg_list) {
11083 struct sk_buff *skb = NULL;
11084
11085
11086 dev_shutdown(dev);
11087
11088 dev_xdp_uninstall(dev);
11089
11090
11091
11092
11093 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11094
11095 if (!dev->rtnl_link_ops ||
11096 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11097 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
11098 GFP_KERNEL, NULL, 0);
11099
11100
11101
11102
11103 dev_uc_flush(dev);
11104 dev_mc_flush(dev);
11105
11106 netdev_name_node_alt_flush(dev);
11107 netdev_name_node_free(dev->name_node);
11108
11109 if (dev->netdev_ops->ndo_uninit)
11110 dev->netdev_ops->ndo_uninit(dev);
11111
11112 if (skb)
11113 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
11114
11115
11116 WARN_ON(netdev_has_any_upper_dev(dev));
11117 WARN_ON(netdev_has_any_lower_dev(dev));
11118
11119
11120 netdev_unregister_kobject(dev);
11121#ifdef CONFIG_XPS
11122
11123 netif_reset_xps_queues_gt(dev, 0);
11124#endif
11125 }
11126
11127 synchronize_net();
11128
11129 list_for_each_entry(dev, head, unreg_list) {
11130 dev_put(dev);
11131 net_set_todo(dev);
11132 }
11133
11134 list_del(head);
11135}
11136EXPORT_SYMBOL(unregister_netdevice_many);
11137
11138
11139
11140
11141
11142
11143
11144
11145
11146
11147
11148
11149void unregister_netdev(struct net_device *dev)
11150{
11151 rtnl_lock();
11152 unregister_netdevice(dev);
11153 rtnl_unlock();
11154}
11155EXPORT_SYMBOL(unregister_netdev);
11156
11157
11158
11159
11160
11161
11162
11163
11164
11165
11166
11167
11168
11169
11170
11171
11172
11173int __dev_change_net_namespace(struct net_device *dev, struct net *net,
11174 const char *pat, int new_ifindex)
11175{
11176 struct net *net_old = dev_net(dev);
11177 int err, new_nsid;
11178
11179 ASSERT_RTNL();
11180
11181
11182 err = -EINVAL;
11183 if (dev->features & NETIF_F_NETNS_LOCAL)
11184 goto out;
11185
11186
11187 if (dev->reg_state != NETREG_REGISTERED)
11188 goto out;
11189
11190
11191 err = 0;
11192 if (net_eq(net_old, net))
11193 goto out;
11194
11195
11196
11197
11198 err = -EEXIST;
11199 if (__dev_get_by_name(net, dev->name)) {
11200
11201 if (!pat)
11202 goto out;
11203 err = dev_get_valid_name(net, dev, pat);
11204 if (err < 0)
11205 goto out;
11206 }
11207
11208
11209 err = -EBUSY;
11210 if (new_ifindex && __dev_get_by_index(net, new_ifindex))
11211 goto out;
11212
11213
11214
11215
11216
11217
11218 dev_close(dev);
11219
11220
11221 unlist_netdevice(dev);
11222
11223 synchronize_net();
11224
11225
11226 dev_shutdown(dev);
11227
11228
11229
11230
11231
11232
11233
11234
11235 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11236 rcu_barrier();
11237
11238 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11239
11240 if (!new_ifindex) {
11241 if (__dev_get_by_index(net, dev->ifindex))
11242 new_ifindex = dev_new_index(net);
11243 else
11244 new_ifindex = dev->ifindex;
11245 }
11246
11247 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11248 new_ifindex);
11249
11250
11251
11252
11253 dev_uc_flush(dev);
11254 dev_mc_flush(dev);
11255
11256
11257 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11258 netdev_adjacent_del_links(dev);
11259
11260
11261 move_netdevice_notifiers_dev_net(dev, net);
11262
11263
11264 dev_net_set(dev, net);
11265 dev->ifindex = new_ifindex;
11266
11267
11268 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11269 netdev_adjacent_add_links(dev);
11270
11271
11272 err = device_rename(&dev->dev, dev->name);
11273 WARN_ON(err);
11274
11275
11276
11277
11278 err = netdev_change_owner(dev, net_old, net);
11279 WARN_ON(err);
11280
11281
11282 list_netdevice(dev);
11283
11284
11285 call_netdevice_notifiers(NETDEV_REGISTER, dev);
11286
11287
11288
11289
11290
11291 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
11292
11293 synchronize_net();
11294 err = 0;
11295out:
11296 return err;
11297}
11298EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
11299
11300static int dev_cpu_dead(unsigned int oldcpu)
11301{
11302 struct sk_buff **list_skb;
11303 struct sk_buff *skb;
11304 unsigned int cpu;
11305 struct softnet_data *sd, *oldsd, *remsd = NULL;
11306
11307 local_irq_disable();
11308 cpu = smp_processor_id();
11309 sd = &per_cpu(softnet_data, cpu);
11310 oldsd = &per_cpu(softnet_data, oldcpu);
11311
11312
11313 list_skb = &sd->completion_queue;
11314 while (*list_skb)
11315 list_skb = &(*list_skb)->next;
11316
11317 *list_skb = oldsd->completion_queue;
11318 oldsd->completion_queue = NULL;
11319
11320
11321 if (oldsd->output_queue) {
11322 *sd->output_queue_tailp = oldsd->output_queue;
11323 sd->output_queue_tailp = oldsd->output_queue_tailp;
11324 oldsd->output_queue = NULL;
11325 oldsd->output_queue_tailp = &oldsd->output_queue;
11326 }
11327
11328
11329
11330
11331 while (!list_empty(&oldsd->poll_list)) {
11332 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11333 struct napi_struct,
11334 poll_list);
11335
11336 list_del_init(&napi->poll_list);
11337 if (napi->poll == process_backlog)
11338 napi->state = 0;
11339 else
11340 ____napi_schedule(sd, napi);
11341 }
11342
11343 raise_softirq_irqoff(NET_TX_SOFTIRQ);
11344 local_irq_enable();
11345
11346#ifdef CONFIG_RPS
11347 remsd = oldsd->rps_ipi_list;
11348 oldsd->rps_ipi_list = NULL;
11349#endif
11350
11351 net_rps_send_ipi(remsd);
11352
11353
11354 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11355 netif_rx_ni(skb);
11356 input_queue_head_incr(oldsd);
11357 }
11358 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11359 netif_rx_ni(skb);
11360 input_queue_head_incr(oldsd);
11361 }
11362
11363 return 0;
11364}
11365
11366
11367
11368
11369
11370
11371
11372
11373
11374
11375
11376netdev_features_t netdev_increment_features(netdev_features_t all,
11377 netdev_features_t one, netdev_features_t mask)
11378{
11379 if (mask & NETIF_F_HW_CSUM)
11380 mask |= NETIF_F_CSUM_MASK;
11381 mask |= NETIF_F_VLAN_CHALLENGED;
11382
11383 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11384 all &= one | ~NETIF_F_ALL_FOR_ALL;
11385
11386
11387 if (all & NETIF_F_HW_CSUM)
11388 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11389
11390 return all;
11391}
11392EXPORT_SYMBOL(netdev_increment_features);
11393
11394static struct hlist_head * __net_init netdev_create_hash(void)
11395{
11396 int i;
11397 struct hlist_head *hash;
11398
11399 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11400 if (hash != NULL)
11401 for (i = 0; i < NETDEV_HASHENTRIES; i++)
11402 INIT_HLIST_HEAD(&hash[i]);
11403
11404 return hash;
11405}
11406
11407
11408static int __net_init netdev_init(struct net *net)
11409{
11410 BUILD_BUG_ON(GRO_HASH_BUCKETS >
11411 8 * sizeof_field(struct napi_struct, gro_bitmask));
11412
11413 if (net != &init_net)
11414 INIT_LIST_HEAD(&net->dev_base_head);
11415
11416 net->dev_name_head = netdev_create_hash();
11417 if (net->dev_name_head == NULL)
11418 goto err_name;
11419
11420 net->dev_index_head = netdev_create_hash();
11421 if (net->dev_index_head == NULL)
11422 goto err_idx;
11423
11424 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11425
11426 return 0;
11427
11428err_idx:
11429 kfree(net->dev_name_head);
11430err_name:
11431 return -ENOMEM;
11432}
11433
11434
11435
11436
11437
11438
11439
11440const char *netdev_drivername(const struct net_device *dev)
11441{
11442 const struct device_driver *driver;
11443 const struct device *parent;
11444 const char *empty = "";
11445
11446 parent = dev->dev.parent;
11447 if (!parent)
11448 return empty;
11449
11450 driver = parent->driver;
11451 if (driver && driver->name)
11452 return driver->name;
11453 return empty;
11454}
11455
11456static void __netdev_printk(const char *level, const struct net_device *dev,
11457 struct va_format *vaf)
11458{
11459 if (dev && dev->dev.parent) {
11460 dev_printk_emit(level[1] - '0',
11461 dev->dev.parent,
11462 "%s %s %s%s: %pV",
11463 dev_driver_string(dev->dev.parent),
11464 dev_name(dev->dev.parent),
11465 netdev_name(dev), netdev_reg_state(dev),
11466 vaf);
11467 } else if (dev) {
11468 printk("%s%s%s: %pV",
11469 level, netdev_name(dev), netdev_reg_state(dev), vaf);
11470 } else {
11471 printk("%s(NULL net_device): %pV", level, vaf);
11472 }
11473}
11474
11475void netdev_printk(const char *level, const struct net_device *dev,
11476 const char *format, ...)
11477{
11478 struct va_format vaf;
11479 va_list args;
11480
11481 va_start(args, format);
11482
11483 vaf.fmt = format;
11484 vaf.va = &args;
11485
11486 __netdev_printk(level, dev, &vaf);
11487
11488 va_end(args);
11489}
11490EXPORT_SYMBOL(netdev_printk);
11491
11492#define define_netdev_printk_level(func, level) \
11493void func(const struct net_device *dev, const char *fmt, ...) \
11494{ \
11495 struct va_format vaf; \
11496 va_list args; \
11497 \
11498 va_start(args, fmt); \
11499 \
11500 vaf.fmt = fmt; \
11501 vaf.va = &args; \
11502 \
11503 __netdev_printk(level, dev, &vaf); \
11504 \
11505 va_end(args); \
11506} \
11507EXPORT_SYMBOL(func);
11508
11509define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11510define_netdev_printk_level(netdev_alert, KERN_ALERT);
11511define_netdev_printk_level(netdev_crit, KERN_CRIT);
11512define_netdev_printk_level(netdev_err, KERN_ERR);
11513define_netdev_printk_level(netdev_warn, KERN_WARNING);
11514define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11515define_netdev_printk_level(netdev_info, KERN_INFO);
11516
11517static void __net_exit netdev_exit(struct net *net)
11518{
11519 kfree(net->dev_name_head);
11520 kfree(net->dev_index_head);
11521 if (net != &init_net)
11522 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11523}
11524
11525static struct pernet_operations __net_initdata netdev_net_ops = {
11526 .init = netdev_init,
11527 .exit = netdev_exit,
11528};
11529
11530static void __net_exit default_device_exit(struct net *net)
11531{
11532 struct net_device *dev, *aux;
11533
11534
11535
11536
11537 rtnl_lock();
11538 for_each_netdev_safe(net, dev, aux) {
11539 int err;
11540 char fb_name[IFNAMSIZ];
11541
11542
11543 if (dev->features & NETIF_F_NETNS_LOCAL)
11544 continue;
11545
11546
11547 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11548 continue;
11549
11550
11551 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11552 if (__dev_get_by_name(&init_net, fb_name))
11553 snprintf(fb_name, IFNAMSIZ, "dev%%d");
11554 err = dev_change_net_namespace(dev, &init_net, fb_name);
11555 if (err) {
11556 pr_emerg("%s: failed to move %s to init_net: %d\n",
11557 __func__, dev->name, err);
11558 BUG();
11559 }
11560 }
11561 rtnl_unlock();
11562}
11563
11564static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
11565{
11566
11567
11568
11569 struct net *net;
11570 bool unregistering;
11571 DEFINE_WAIT_FUNC(wait, woken_wake_function);
11572
11573 add_wait_queue(&netdev_unregistering_wq, &wait);
11574 for (;;) {
11575 unregistering = false;
11576 rtnl_lock();
11577 list_for_each_entry(net, net_list, exit_list) {
11578 if (net->dev_unreg_count > 0) {
11579 unregistering = true;
11580 break;
11581 }
11582 }
11583 if (!unregistering)
11584 break;
11585 __rtnl_unlock();
11586
11587 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
11588 }
11589 remove_wait_queue(&netdev_unregistering_wq, &wait);
11590}
11591
11592static void __net_exit default_device_exit_batch(struct list_head *net_list)
11593{
11594
11595
11596
11597
11598
11599 struct net_device *dev;
11600 struct net *net;
11601 LIST_HEAD(dev_kill_list);
11602
11603
11604
11605
11606
11607
11608
11609
11610
11611
11612
11613
11614 rtnl_lock_unregistering(net_list);
11615 list_for_each_entry(net, net_list, exit_list) {
11616 for_each_netdev_reverse(net, dev) {
11617 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11618 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11619 else
11620 unregister_netdevice_queue(dev, &dev_kill_list);
11621 }
11622 }
11623 unregister_netdevice_many(&dev_kill_list);
11624 rtnl_unlock();
11625}
11626
11627static struct pernet_operations __net_initdata default_device_ops = {
11628 .exit = default_device_exit,
11629 .exit_batch = default_device_exit_batch,
11630};
11631
11632
11633
11634
11635
11636
11637
11638
11639
11640
11641
11642
11643static int __init net_dev_init(void)
11644{
11645 int i, rc = -ENOMEM;
11646
11647 BUG_ON(!dev_boot_phase);
11648
11649 if (dev_proc_init())
11650 goto out;
11651
11652 if (netdev_kobject_init())
11653 goto out;
11654
11655 INIT_LIST_HEAD(&ptype_all);
11656 for (i = 0; i < PTYPE_HASH_SIZE; i++)
11657 INIT_LIST_HEAD(&ptype_base[i]);
11658
11659 INIT_LIST_HEAD(&offload_base);
11660
11661 if (register_pernet_subsys(&netdev_net_ops))
11662 goto out;
11663
11664
11665
11666
11667
11668 for_each_possible_cpu(i) {
11669 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11670 struct softnet_data *sd = &per_cpu(softnet_data, i);
11671
11672 INIT_WORK(flush, flush_backlog);
11673
11674 skb_queue_head_init(&sd->input_pkt_queue);
11675 skb_queue_head_init(&sd->process_queue);
11676#ifdef CONFIG_XFRM_OFFLOAD
11677 skb_queue_head_init(&sd->xfrm_backlog);
11678#endif
11679 INIT_LIST_HEAD(&sd->poll_list);
11680 sd->output_queue_tailp = &sd->output_queue;
11681#ifdef CONFIG_RPS
11682 INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11683 sd->cpu = i;
11684#endif
11685
11686 init_gro_hash(&sd->backlog);
11687 sd->backlog.poll = process_backlog;
11688 sd->backlog.weight = weight_p;
11689 }
11690
11691 dev_boot_phase = 0;
11692
11693
11694
11695
11696
11697
11698
11699
11700
11701
11702 if (register_pernet_device(&loopback_net_ops))
11703 goto out;
11704
11705 if (register_pernet_device(&default_device_ops))
11706 goto out;
11707
11708 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11709 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11710
11711 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11712 NULL, dev_cpu_dead);
11713 WARN_ON(rc < 0);
11714 rc = 0;
11715out:
11716 return rc;
11717}
11718
11719subsys_initcall(net_dev_init);
11720