1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef _LINUX_NETDEVICE_H
22#define _LINUX_NETDEVICE_H
23
24#include <linux/timer.h>
25#include <linux/bug.h>
26#include <linux/delay.h>
27#include <linux/atomic.h>
28#include <linux/prefetch.h>
29#include <asm/cache.h>
30#include <asm/byteorder.h>
31
32#include <linux/percpu.h>
33#include <linux/rculist.h>
34#include <linux/workqueue.h>
35#include <linux/dynamic_queue_limits.h>
36
37#include <linux/ethtool.h>
38#include <net/net_namespace.h>
39#ifdef CONFIG_DCB
40#include <net/dcbnl.h>
41#endif
42#include <net/netprio_cgroup.h>
43#include <net/xdp.h>
44
45#include <linux/netdev_features.h>
46#include <linux/neighbour.h>
47#include <uapi/linux/netdevice.h>
48#include <uapi/linux/if_bonding.h>
49#include <uapi/linux/pkt_cls.h>
50#include <linux/hashtable.h>
51
52struct netpoll_info;
53struct device;
54struct phy_device;
55struct dsa_port;
56struct ip_tunnel_parm;
57struct macsec_context;
58struct macsec_ops;
59
60struct sfp_bus;
61
62struct wireless_dev;
63
64struct wpan_dev;
65struct mpls_dev;
66
67struct udp_tunnel_info;
68struct bpf_prog;
69struct xdp_buff;
70
71void netdev_set_default_ethtool_ops(struct net_device *dev,
72 const struct ethtool_ops *ops);
73
74
75#define NET_RX_SUCCESS 0
76#define NET_RX_DROP 1
77
78#define MAX_NEST_DEV 8
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98#define NET_XMIT_SUCCESS 0x00
99#define NET_XMIT_DROP 0x01
100#define NET_XMIT_CN 0x02
101#define NET_XMIT_MASK 0x0f
102
103
104
105
106#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
107#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
108
109
110#define NETDEV_TX_MASK 0xf0
111
112enum netdev_tx {
113 __NETDEV_TX_MIN = INT_MIN,
114 NETDEV_TX_OK = 0x00,
115 NETDEV_TX_BUSY = 0x10,
116};
117typedef enum netdev_tx netdev_tx_t;
118
119
120
121
122
123static inline bool dev_xmit_complete(int rc)
124{
125
126
127
128
129
130
131 if (likely(rc < NET_XMIT_MASK))
132 return true;
133
134 return false;
135}
136
137
138
139
140
141
142#if defined(CONFIG_HYPERV_NET)
143# define LL_MAX_HEADER 128
144#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
145# if defined(CONFIG_MAC80211_MESH)
146# define LL_MAX_HEADER 128
147# else
148# define LL_MAX_HEADER 96
149# endif
150#else
151# define LL_MAX_HEADER 32
152#endif
153
154#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
155 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
156#define MAX_HEADER LL_MAX_HEADER
157#else
158#define MAX_HEADER (LL_MAX_HEADER + 48)
159#endif
160
161
162
163
164
165
166struct net_device_stats {
167 unsigned long rx_packets;
168 unsigned long tx_packets;
169 unsigned long rx_bytes;
170 unsigned long tx_bytes;
171 unsigned long rx_errors;
172 unsigned long tx_errors;
173 unsigned long rx_dropped;
174 unsigned long tx_dropped;
175 unsigned long multicast;
176 unsigned long collisions;
177 unsigned long rx_length_errors;
178 unsigned long rx_over_errors;
179 unsigned long rx_crc_errors;
180 unsigned long rx_frame_errors;
181 unsigned long rx_fifo_errors;
182 unsigned long rx_missed_errors;
183 unsigned long tx_aborted_errors;
184 unsigned long tx_carrier_errors;
185 unsigned long tx_fifo_errors;
186 unsigned long tx_heartbeat_errors;
187 unsigned long tx_window_errors;
188 unsigned long rx_compressed;
189 unsigned long tx_compressed;
190};
191
192
193#include <linux/cache.h>
194#include <linux/skbuff.h>
195
196#ifdef CONFIG_RPS
197#include <linux/static_key.h>
198extern struct static_key_false rps_needed;
199extern struct static_key_false rfs_needed;
200#endif
201
202struct neighbour;
203struct neigh_parms;
204struct sk_buff;
205
206struct netdev_hw_addr {
207 struct list_head list;
208 unsigned char addr[MAX_ADDR_LEN];
209 unsigned char type;
210#define NETDEV_HW_ADDR_T_LAN 1
211#define NETDEV_HW_ADDR_T_SAN 2
212#define NETDEV_HW_ADDR_T_SLAVE 3
213#define NETDEV_HW_ADDR_T_UNICAST 4
214#define NETDEV_HW_ADDR_T_MULTICAST 5
215 bool global_use;
216 int sync_cnt;
217 int refcount;
218 int synced;
219 struct rcu_head rcu_head;
220};
221
222struct netdev_hw_addr_list {
223 struct list_head list;
224 int count;
225};
226
227#define netdev_hw_addr_list_count(l) ((l)->count)
228#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
229#define netdev_hw_addr_list_for_each(ha, l) \
230 list_for_each_entry(ha, &(l)->list, list)
231
232#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
233#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
234#define netdev_for_each_uc_addr(ha, dev) \
235 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
236
237#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
238#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
239#define netdev_for_each_mc_addr(ha, dev) \
240 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
241
242struct hh_cache {
243 unsigned int hh_len;
244 seqlock_t hh_lock;
245
246
247#define HH_DATA_MOD 16
248#define HH_DATA_OFF(__len) \
249 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
250#define HH_DATA_ALIGN(__len) \
251 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
252 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
253};
254
255
256
257
258
259
260
261
262
263#define LL_RESERVED_SPACE(dev) \
264 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
265#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
266 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
267
268struct header_ops {
269 int (*create) (struct sk_buff *skb, struct net_device *dev,
270 unsigned short type, const void *daddr,
271 const void *saddr, unsigned int len);
272 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
273 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
274 void (*cache_update)(struct hh_cache *hh,
275 const struct net_device *dev,
276 const unsigned char *haddr);
277 bool (*validate)(const char *ll_header, unsigned int len);
278 __be16 (*parse_protocol)(const struct sk_buff *skb);
279};
280
281
282
283
284
285
286enum netdev_state_t {
287 __LINK_STATE_START,
288 __LINK_STATE_PRESENT,
289 __LINK_STATE_NOCARRIER,
290 __LINK_STATE_LINKWATCH_PENDING,
291 __LINK_STATE_DORMANT,
292 __LINK_STATE_TESTING,
293};
294
295
296
297
298
299
300struct netdev_boot_setup {
301 char name[IFNAMSIZ];
302 struct ifmap map;
303};
304#define NETDEV_BOOT_SETUP_MAX 8
305
306int __init netdev_boot_setup(char *str);
307
308struct gro_list {
309 struct list_head list;
310 int count;
311};
312
313
314
315
316
317#define GRO_HASH_BUCKETS 8
318
319
320
321
322struct napi_struct {
323
324
325
326
327
328
329 struct list_head poll_list;
330
331 unsigned long state;
332 int weight;
333 int defer_hard_irqs_count;
334 unsigned long gro_bitmask;
335 int (*poll)(struct napi_struct *, int);
336#ifdef CONFIG_NETPOLL
337 int poll_owner;
338#endif
339 struct net_device *dev;
340 struct gro_list gro_hash[GRO_HASH_BUCKETS];
341 struct sk_buff *skb;
342 struct list_head rx_list;
343 int rx_count;
344 struct hrtimer timer;
345 struct list_head dev_list;
346 struct hlist_node napi_hash_node;
347 unsigned int napi_id;
348};
349
350enum {
351 NAPI_STATE_SCHED,
352 NAPI_STATE_MISSED,
353 NAPI_STATE_DISABLE,
354 NAPI_STATE_NPSVC,
355 NAPI_STATE_HASHED,
356 NAPI_STATE_NO_BUSY_POLL,
357 NAPI_STATE_IN_BUSY_POLL,
358};
359
360enum {
361 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
362 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
363 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
364 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
365 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
366 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
367 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
368};
369
370enum gro_result {
371 GRO_MERGED,
372 GRO_MERGED_FREE,
373 GRO_HELD,
374 GRO_NORMAL,
375 GRO_DROP,
376 GRO_CONSUMED,
377};
378typedef enum gro_result gro_result_t;
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421enum rx_handler_result {
422 RX_HANDLER_CONSUMED,
423 RX_HANDLER_ANOTHER,
424 RX_HANDLER_EXACT,
425 RX_HANDLER_PASS,
426};
427typedef enum rx_handler_result rx_handler_result_t;
428typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
429
430void __napi_schedule(struct napi_struct *n);
431void __napi_schedule_irqoff(struct napi_struct *n);
432
433static inline bool napi_disable_pending(struct napi_struct *n)
434{
435 return test_bit(NAPI_STATE_DISABLE, &n->state);
436}
437
438bool napi_schedule_prep(struct napi_struct *n);
439
440
441
442
443
444
445
446
447static inline void napi_schedule(struct napi_struct *n)
448{
449 if (napi_schedule_prep(n))
450 __napi_schedule(n);
451}
452
453
454
455
456
457
458
459static inline void napi_schedule_irqoff(struct napi_struct *n)
460{
461 if (napi_schedule_prep(n))
462 __napi_schedule_irqoff(n);
463}
464
465
466static inline bool napi_reschedule(struct napi_struct *napi)
467{
468 if (napi_schedule_prep(napi)) {
469 __napi_schedule(napi);
470 return true;
471 }
472 return false;
473}
474
475bool napi_complete_done(struct napi_struct *n, int work_done);
476
477
478
479
480
481
482
483
484static inline bool napi_complete(struct napi_struct *n)
485{
486 return napi_complete_done(n, 0);
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501bool napi_hash_del(struct napi_struct *napi);
502
503
504
505
506
507
508
509
510void napi_disable(struct napi_struct *n);
511
512
513
514
515
516
517
518
519static inline void napi_enable(struct napi_struct *n)
520{
521 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
522 smp_mb__before_atomic();
523 clear_bit(NAPI_STATE_SCHED, &n->state);
524 clear_bit(NAPI_STATE_NPSVC, &n->state);
525}
526
527
528
529
530
531
532
533
534
535static inline void napi_synchronize(const struct napi_struct *n)
536{
537 if (IS_ENABLED(CONFIG_SMP))
538 while (test_bit(NAPI_STATE_SCHED, &n->state))
539 msleep(1);
540 else
541 barrier();
542}
543
544
545
546
547
548
549
550
551
552static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
553{
554 unsigned long val, new;
555
556 do {
557 val = READ_ONCE(n->state);
558 if (val & NAPIF_STATE_DISABLE)
559 return true;
560
561 if (!(val & NAPIF_STATE_SCHED))
562 return false;
563
564 new = val | NAPIF_STATE_MISSED;
565 } while (cmpxchg(&n->state, val, new) != val);
566
567 return true;
568}
569
570enum netdev_queue_state_t {
571 __QUEUE_STATE_DRV_XOFF,
572 __QUEUE_STATE_STACK_XOFF,
573 __QUEUE_STATE_FROZEN,
574};
575
576#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
577#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
578#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
579
580#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
581#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
582 QUEUE_STATE_FROZEN)
583#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
584 QUEUE_STATE_FROZEN)
585
586
587
588
589
590
591
592
593
594
595
596struct netdev_queue {
597
598
599
600 struct net_device *dev;
601 struct Qdisc __rcu *qdisc;
602 struct Qdisc *qdisc_sleeping;
603#ifdef CONFIG_SYSFS
604 struct kobject kobj;
605#endif
606#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
607 int numa_node;
608#endif
609 unsigned long tx_maxrate;
610
611
612
613
614 unsigned long trans_timeout;
615
616
617 struct net_device *sb_dev;
618#ifdef CONFIG_XDP_SOCKETS
619 struct xdp_umem *umem;
620#endif
621
622
623
624 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
625 int xmit_lock_owner;
626
627
628
629 unsigned long trans_start;
630
631 unsigned long state;
632
633#ifdef CONFIG_BQL
634 struct dql dql;
635#endif
636} ____cacheline_aligned_in_smp;
637
638extern int sysctl_fb_tunnels_only_for_init_net;
639extern int sysctl_devconf_inherit_init_net;
640
641static inline bool net_has_fallback_tunnels(const struct net *net)
642{
643 return net == &init_net ||
644 !IS_ENABLED(CONFIG_SYSCTL) ||
645 !sysctl_fb_tunnels_only_for_init_net;
646}
647
648static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
649{
650#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
651 return q->numa_node;
652#else
653 return NUMA_NO_NODE;
654#endif
655}
656
657static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
658{
659#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
660 q->numa_node = node;
661#endif
662}
663
664#ifdef CONFIG_RPS
665
666
667
668
669struct rps_map {
670 unsigned int len;
671 struct rcu_head rcu;
672 u16 cpus[];
673};
674#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
675
676
677
678
679
680
681struct rps_dev_flow {
682 u16 cpu;
683 u16 filter;
684 unsigned int last_qtail;
685};
686#define RPS_NO_FILTER 0xffff
687
688
689
690
691struct rps_dev_flow_table {
692 unsigned int mask;
693 struct rcu_head rcu;
694 struct rps_dev_flow flows[];
695};
696#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
697 ((_num) * sizeof(struct rps_dev_flow)))
698
699
700
701
702
703
704
705
706
707
708
709struct rps_sock_flow_table {
710 u32 mask;
711
712 u32 ents[] ____cacheline_aligned_in_smp;
713};
714#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
715
716#define RPS_NO_CPU 0xffff
717
718extern u32 rps_cpu_mask;
719extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
720
721static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
722 u32 hash)
723{
724 if (table && hash) {
725 unsigned int index = hash & table->mask;
726 u32 val = hash & ~rps_cpu_mask;
727
728
729 val |= raw_smp_processor_id();
730
731 if (table->ents[index] != val)
732 table->ents[index] = val;
733 }
734}
735
736#ifdef CONFIG_RFS_ACCEL
737bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
738 u16 filter_id);
739#endif
740#endif
741
742
743struct netdev_rx_queue {
744#ifdef CONFIG_RPS
745 struct rps_map __rcu *rps_map;
746 struct rps_dev_flow_table __rcu *rps_flow_table;
747#endif
748 struct kobject kobj;
749 struct net_device *dev;
750 struct xdp_rxq_info xdp_rxq;
751#ifdef CONFIG_XDP_SOCKETS
752 struct xdp_umem *umem;
753#endif
754} ____cacheline_aligned_in_smp;
755
756
757
758
759struct rx_queue_attribute {
760 struct attribute attr;
761 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
762 ssize_t (*store)(struct netdev_rx_queue *queue,
763 const char *buf, size_t len);
764};
765
766#ifdef CONFIG_XPS
767
768
769
770
771struct xps_map {
772 unsigned int len;
773 unsigned int alloc_len;
774 struct rcu_head rcu;
775 u16 queues[];
776};
777#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
778#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
779 - sizeof(struct xps_map)) / sizeof(u16))
780
781
782
783
784struct xps_dev_maps {
785 struct rcu_head rcu;
786 struct xps_map __rcu *attr_map[];
787};
788
789#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
790 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
791
792#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
793 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
794
795#endif
796
797#define TC_MAX_QUEUE 16
798#define TC_BITMASK 15
799
800struct netdev_tc_txq {
801 u16 count;
802 u16 offset;
803};
804
805#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
806
807
808
809
810struct netdev_fcoe_hbainfo {
811 char manufacturer[64];
812 char serial_number[64];
813 char hardware_version[64];
814 char driver_version[64];
815 char optionrom_version[64];
816 char firmware_version[64];
817 char model[256];
818 char model_description[256];
819};
820#endif
821
822#define MAX_PHYS_ITEM_ID_LEN 32
823
824
825
826
827struct netdev_phys_item_id {
828 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
829 unsigned char id_len;
830};
831
832static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
833 struct netdev_phys_item_id *b)
834{
835 return a->id_len == b->id_len &&
836 memcmp(a->id, b->id, a->id_len) == 0;
837}
838
839typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
840 struct sk_buff *skb,
841 struct net_device *sb_dev);
842
843enum tc_setup_type {
844 TC_SETUP_QDISC_MQPRIO,
845 TC_SETUP_CLSU32,
846 TC_SETUP_CLSFLOWER,
847 TC_SETUP_CLSMATCHALL,
848 TC_SETUP_CLSBPF,
849 TC_SETUP_BLOCK,
850 TC_SETUP_QDISC_CBS,
851 TC_SETUP_QDISC_RED,
852 TC_SETUP_QDISC_PRIO,
853 TC_SETUP_QDISC_MQ,
854 TC_SETUP_QDISC_ETF,
855 TC_SETUP_ROOT_QDISC,
856 TC_SETUP_QDISC_GRED,
857 TC_SETUP_QDISC_TAPRIO,
858 TC_SETUP_FT,
859 TC_SETUP_QDISC_ETS,
860 TC_SETUP_QDISC_TBF,
861 TC_SETUP_QDISC_FIFO,
862};
863
864
865
866
867enum bpf_netdev_command {
868
869
870
871
872
873
874
875 XDP_SETUP_PROG,
876 XDP_SETUP_PROG_HW,
877 XDP_QUERY_PROG,
878 XDP_QUERY_PROG_HW,
879
880 BPF_OFFLOAD_MAP_ALLOC,
881 BPF_OFFLOAD_MAP_FREE,
882 XDP_SETUP_XSK_UMEM,
883};
884
885struct bpf_prog_offload_ops;
886struct netlink_ext_ack;
887struct xdp_umem;
888struct xdp_dev_bulk_queue;
889
890struct netdev_bpf {
891 enum bpf_netdev_command command;
892 union {
893
894 struct {
895 u32 flags;
896 struct bpf_prog *prog;
897 struct netlink_ext_ack *extack;
898 };
899
900 struct {
901 u32 prog_id;
902
903 u32 prog_flags;
904 };
905
906 struct {
907 struct bpf_offloaded_map *offmap;
908 };
909
910 struct {
911 struct xdp_umem *umem;
912 u16 queue_id;
913 } xsk;
914 };
915};
916
917
918#define XDP_WAKEUP_RX (1 << 0)
919#define XDP_WAKEUP_TX (1 << 1)
920
921#ifdef CONFIG_XFRM_OFFLOAD
922struct xfrmdev_ops {
923 int (*xdo_dev_state_add) (struct xfrm_state *x);
924 void (*xdo_dev_state_delete) (struct xfrm_state *x);
925 void (*xdo_dev_state_free) (struct xfrm_state *x);
926 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
927 struct xfrm_state *x);
928 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
929};
930#endif
931
932struct dev_ifalias {
933 struct rcu_head rcuhead;
934 char ifalias[];
935};
936
937struct devlink;
938struct tlsdev_ops;
939
940struct netdev_name_node {
941 struct hlist_node hlist;
942 struct list_head list;
943 struct net_device *dev;
944 const char *name;
945};
946
947int netdev_name_node_alt_create(struct net_device *dev, const char *name);
948int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
949
950struct netdev_net_notifier {
951 struct list_head list;
952 struct notifier_block *nb;
953};
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282struct net_device_ops {
1283 int (*ndo_init)(struct net_device *dev);
1284 void (*ndo_uninit)(struct net_device *dev);
1285 int (*ndo_open)(struct net_device *dev);
1286 int (*ndo_stop)(struct net_device *dev);
1287 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1288 struct net_device *dev);
1289 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1290 struct net_device *dev,
1291 netdev_features_t features);
1292 u16 (*ndo_select_queue)(struct net_device *dev,
1293 struct sk_buff *skb,
1294 struct net_device *sb_dev);
1295 void (*ndo_change_rx_flags)(struct net_device *dev,
1296 int flags);
1297 void (*ndo_set_rx_mode)(struct net_device *dev);
1298 int (*ndo_set_mac_address)(struct net_device *dev,
1299 void *addr);
1300 int (*ndo_validate_addr)(struct net_device *dev);
1301 int (*ndo_do_ioctl)(struct net_device *dev,
1302 struct ifreq *ifr, int cmd);
1303 int (*ndo_set_config)(struct net_device *dev,
1304 struct ifmap *map);
1305 int (*ndo_change_mtu)(struct net_device *dev,
1306 int new_mtu);
1307 int (*ndo_neigh_setup)(struct net_device *dev,
1308 struct neigh_parms *);
1309 void (*ndo_tx_timeout) (struct net_device *dev,
1310 unsigned int txqueue);
1311
1312 void (*ndo_get_stats64)(struct net_device *dev,
1313 struct rtnl_link_stats64 *storage);
1314 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1315 int (*ndo_get_offload_stats)(int attr_id,
1316 const struct net_device *dev,
1317 void *attr_data);
1318 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1319
1320 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1321 __be16 proto, u16 vid);
1322 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1323 __be16 proto, u16 vid);
1324#ifdef CONFIG_NET_POLL_CONTROLLER
1325 void (*ndo_poll_controller)(struct net_device *dev);
1326 int (*ndo_netpoll_setup)(struct net_device *dev,
1327 struct netpoll_info *info);
1328 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1329#endif
1330 int (*ndo_set_vf_mac)(struct net_device *dev,
1331 int queue, u8 *mac);
1332 int (*ndo_set_vf_vlan)(struct net_device *dev,
1333 int queue, u16 vlan,
1334 u8 qos, __be16 proto);
1335 int (*ndo_set_vf_rate)(struct net_device *dev,
1336 int vf, int min_tx_rate,
1337 int max_tx_rate);
1338 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1339 int vf, bool setting);
1340 int (*ndo_set_vf_trust)(struct net_device *dev,
1341 int vf, bool setting);
1342 int (*ndo_get_vf_config)(struct net_device *dev,
1343 int vf,
1344 struct ifla_vf_info *ivf);
1345 int (*ndo_set_vf_link_state)(struct net_device *dev,
1346 int vf, int link_state);
1347 int (*ndo_get_vf_stats)(struct net_device *dev,
1348 int vf,
1349 struct ifla_vf_stats
1350 *vf_stats);
1351 int (*ndo_set_vf_port)(struct net_device *dev,
1352 int vf,
1353 struct nlattr *port[]);
1354 int (*ndo_get_vf_port)(struct net_device *dev,
1355 int vf, struct sk_buff *skb);
1356 int (*ndo_get_vf_guid)(struct net_device *dev,
1357 int vf,
1358 struct ifla_vf_guid *node_guid,
1359 struct ifla_vf_guid *port_guid);
1360 int (*ndo_set_vf_guid)(struct net_device *dev,
1361 int vf, u64 guid,
1362 int guid_type);
1363 int (*ndo_set_vf_rss_query_en)(
1364 struct net_device *dev,
1365 int vf, bool setting);
1366 int (*ndo_setup_tc)(struct net_device *dev,
1367 enum tc_setup_type type,
1368 void *type_data);
1369#if IS_ENABLED(CONFIG_FCOE)
1370 int (*ndo_fcoe_enable)(struct net_device *dev);
1371 int (*ndo_fcoe_disable)(struct net_device *dev);
1372 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1373 u16 xid,
1374 struct scatterlist *sgl,
1375 unsigned int sgc);
1376 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1377 u16 xid);
1378 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1379 u16 xid,
1380 struct scatterlist *sgl,
1381 unsigned int sgc);
1382 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1383 struct netdev_fcoe_hbainfo *hbainfo);
1384#endif
1385
1386#if IS_ENABLED(CONFIG_LIBFCOE)
1387#define NETDEV_FCOE_WWNN 0
1388#define NETDEV_FCOE_WWPN 1
1389 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1390 u64 *wwn, int type);
1391#endif
1392
1393#ifdef CONFIG_RFS_ACCEL
1394 int (*ndo_rx_flow_steer)(struct net_device *dev,
1395 const struct sk_buff *skb,
1396 u16 rxq_index,
1397 u32 flow_id);
1398#endif
1399 int (*ndo_add_slave)(struct net_device *dev,
1400 struct net_device *slave_dev,
1401 struct netlink_ext_ack *extack);
1402 int (*ndo_del_slave)(struct net_device *dev,
1403 struct net_device *slave_dev);
1404 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
1405 struct sk_buff *skb,
1406 bool all_slaves);
1407 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1408 netdev_features_t features);
1409 int (*ndo_set_features)(struct net_device *dev,
1410 netdev_features_t features);
1411 int (*ndo_neigh_construct)(struct net_device *dev,
1412 struct neighbour *n);
1413 void (*ndo_neigh_destroy)(struct net_device *dev,
1414 struct neighbour *n);
1415
1416 int (*ndo_fdb_add)(struct ndmsg *ndm,
1417 struct nlattr *tb[],
1418 struct net_device *dev,
1419 const unsigned char *addr,
1420 u16 vid,
1421 u16 flags,
1422 struct netlink_ext_ack *extack);
1423 int (*ndo_fdb_del)(struct ndmsg *ndm,
1424 struct nlattr *tb[],
1425 struct net_device *dev,
1426 const unsigned char *addr,
1427 u16 vid);
1428 int (*ndo_fdb_dump)(struct sk_buff *skb,
1429 struct netlink_callback *cb,
1430 struct net_device *dev,
1431 struct net_device *filter_dev,
1432 int *idx);
1433 int (*ndo_fdb_get)(struct sk_buff *skb,
1434 struct nlattr *tb[],
1435 struct net_device *dev,
1436 const unsigned char *addr,
1437 u16 vid, u32 portid, u32 seq,
1438 struct netlink_ext_ack *extack);
1439 int (*ndo_bridge_setlink)(struct net_device *dev,
1440 struct nlmsghdr *nlh,
1441 u16 flags,
1442 struct netlink_ext_ack *extack);
1443 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1444 u32 pid, u32 seq,
1445 struct net_device *dev,
1446 u32 filter_mask,
1447 int nlflags);
1448 int (*ndo_bridge_dellink)(struct net_device *dev,
1449 struct nlmsghdr *nlh,
1450 u16 flags);
1451 int (*ndo_change_carrier)(struct net_device *dev,
1452 bool new_carrier);
1453 int (*ndo_get_phys_port_id)(struct net_device *dev,
1454 struct netdev_phys_item_id *ppid);
1455 int (*ndo_get_port_parent_id)(struct net_device *dev,
1456 struct netdev_phys_item_id *ppid);
1457 int (*ndo_get_phys_port_name)(struct net_device *dev,
1458 char *name, size_t len);
1459 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1460 struct udp_tunnel_info *ti);
1461 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1462 struct udp_tunnel_info *ti);
1463 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1464 struct net_device *dev);
1465 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1466 void *priv);
1467
1468 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1469 int queue_index,
1470 u32 maxrate);
1471 int (*ndo_get_iflink)(const struct net_device *dev);
1472 int (*ndo_change_proto_down)(struct net_device *dev,
1473 bool proto_down);
1474 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1475 struct sk_buff *skb);
1476 void (*ndo_set_rx_headroom)(struct net_device *dev,
1477 int needed_headroom);
1478 int (*ndo_bpf)(struct net_device *dev,
1479 struct netdev_bpf *bpf);
1480 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1481 struct xdp_frame **xdp,
1482 u32 flags);
1483 int (*ndo_xsk_wakeup)(struct net_device *dev,
1484 u32 queue_id, u32 flags);
1485 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
1486 int (*ndo_tunnel_ctl)(struct net_device *dev,
1487 struct ip_tunnel_parm *p, int cmd);
1488};
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536enum netdev_priv_flags {
1537 IFF_802_1Q_VLAN = 1<<0,
1538 IFF_EBRIDGE = 1<<1,
1539 IFF_BONDING = 1<<2,
1540 IFF_ISATAP = 1<<3,
1541 IFF_WAN_HDLC = 1<<4,
1542 IFF_XMIT_DST_RELEASE = 1<<5,
1543 IFF_DONT_BRIDGE = 1<<6,
1544 IFF_DISABLE_NETPOLL = 1<<7,
1545 IFF_MACVLAN_PORT = 1<<8,
1546 IFF_BRIDGE_PORT = 1<<9,
1547 IFF_OVS_DATAPATH = 1<<10,
1548 IFF_TX_SKB_SHARING = 1<<11,
1549 IFF_UNICAST_FLT = 1<<12,
1550 IFF_TEAM_PORT = 1<<13,
1551 IFF_SUPP_NOFCS = 1<<14,
1552 IFF_LIVE_ADDR_CHANGE = 1<<15,
1553 IFF_MACVLAN = 1<<16,
1554 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1555 IFF_L3MDEV_MASTER = 1<<18,
1556 IFF_NO_QUEUE = 1<<19,
1557 IFF_OPENVSWITCH = 1<<20,
1558 IFF_L3MDEV_SLAVE = 1<<21,
1559 IFF_TEAM = 1<<22,
1560 IFF_RXFH_CONFIGURED = 1<<23,
1561 IFF_PHONY_HEADROOM = 1<<24,
1562 IFF_MACSEC = 1<<25,
1563 IFF_NO_RX_HANDLER = 1<<26,
1564 IFF_FAILOVER = 1<<27,
1565 IFF_FAILOVER_SLAVE = 1<<28,
1566 IFF_L3MDEV_RX_HANDLER = 1<<29,
1567 IFF_LIVE_RENAME_OK = 1<<30,
1568};
1569
1570#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1571#define IFF_EBRIDGE IFF_EBRIDGE
1572#define IFF_BONDING IFF_BONDING
1573#define IFF_ISATAP IFF_ISATAP
1574#define IFF_WAN_HDLC IFF_WAN_HDLC
1575#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1576#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1577#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1578#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1579#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1580#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1581#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1582#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1583#define IFF_TEAM_PORT IFF_TEAM_PORT
1584#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1585#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1586#define IFF_MACVLAN IFF_MACVLAN
1587#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1588#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1589#define IFF_NO_QUEUE IFF_NO_QUEUE
1590#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1591#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1592#define IFF_TEAM IFF_TEAM
1593#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1594#define IFF_MACSEC IFF_MACSEC
1595#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1596#define IFF_FAILOVER IFF_FAILOVER
1597#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1598#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1599#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843struct net_device {
1844 char name[IFNAMSIZ];
1845 struct netdev_name_node *name_node;
1846 struct dev_ifalias __rcu *ifalias;
1847
1848
1849
1850
1851 unsigned long mem_end;
1852 unsigned long mem_start;
1853 unsigned long base_addr;
1854 int irq;
1855
1856
1857
1858
1859
1860
1861
1862 unsigned long state;
1863
1864 struct list_head dev_list;
1865 struct list_head napi_list;
1866 struct list_head unreg_list;
1867 struct list_head close_list;
1868 struct list_head ptype_all;
1869 struct list_head ptype_specific;
1870
1871 struct {
1872 struct list_head upper;
1873 struct list_head lower;
1874 } adj_list;
1875
1876 netdev_features_t features;
1877 netdev_features_t hw_features;
1878 netdev_features_t wanted_features;
1879 netdev_features_t vlan_features;
1880 netdev_features_t hw_enc_features;
1881 netdev_features_t mpls_features;
1882 netdev_features_t gso_partial_features;
1883
1884 int ifindex;
1885 int group;
1886
1887 struct net_device_stats stats;
1888
1889 atomic_long_t rx_dropped;
1890 atomic_long_t tx_dropped;
1891 atomic_long_t rx_nohandler;
1892
1893
1894 atomic_t carrier_up_count;
1895 atomic_t carrier_down_count;
1896
1897#ifdef CONFIG_WIRELESS_EXT
1898 const struct iw_handler_def *wireless_handlers;
1899 struct iw_public_data *wireless_data;
1900#endif
1901 const struct net_device_ops *netdev_ops;
1902 const struct ethtool_ops *ethtool_ops;
1903#ifdef CONFIG_NET_L3_MASTER_DEV
1904 const struct l3mdev_ops *l3mdev_ops;
1905#endif
1906#if IS_ENABLED(CONFIG_IPV6)
1907 const struct ndisc_ops *ndisc_ops;
1908#endif
1909
1910#ifdef CONFIG_XFRM_OFFLOAD
1911 const struct xfrmdev_ops *xfrmdev_ops;
1912#endif
1913
1914#if IS_ENABLED(CONFIG_TLS_DEVICE)
1915 const struct tlsdev_ops *tlsdev_ops;
1916#endif
1917
1918 const struct header_ops *header_ops;
1919
1920 unsigned int flags;
1921 unsigned int priv_flags;
1922
1923 unsigned short gflags;
1924 unsigned short padded;
1925
1926 unsigned char operstate;
1927 unsigned char link_mode;
1928
1929 unsigned char if_port;
1930 unsigned char dma;
1931
1932
1933
1934
1935
1936
1937 unsigned int mtu;
1938 unsigned int min_mtu;
1939 unsigned int max_mtu;
1940 unsigned short type;
1941 unsigned short hard_header_len;
1942 unsigned char min_header_len;
1943
1944 unsigned short needed_headroom;
1945 unsigned short needed_tailroom;
1946
1947
1948 unsigned char perm_addr[MAX_ADDR_LEN];
1949 unsigned char addr_assign_type;
1950 unsigned char addr_len;
1951 unsigned char upper_level;
1952 unsigned char lower_level;
1953 unsigned short neigh_priv_len;
1954 unsigned short dev_id;
1955 unsigned short dev_port;
1956 spinlock_t addr_list_lock;
1957 unsigned char name_assign_type;
1958 bool uc_promisc;
1959 struct netdev_hw_addr_list uc;
1960 struct netdev_hw_addr_list mc;
1961 struct netdev_hw_addr_list dev_addrs;
1962
1963#ifdef CONFIG_SYSFS
1964 struct kset *queues_kset;
1965#endif
1966 unsigned int promiscuity;
1967 unsigned int allmulti;
1968
1969
1970
1971
1972#if IS_ENABLED(CONFIG_VLAN_8021Q)
1973 struct vlan_info __rcu *vlan_info;
1974#endif
1975#if IS_ENABLED(CONFIG_NET_DSA)
1976 struct dsa_port *dsa_ptr;
1977#endif
1978#if IS_ENABLED(CONFIG_TIPC)
1979 struct tipc_bearer __rcu *tipc_ptr;
1980#endif
1981#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
1982 void *atalk_ptr;
1983#endif
1984 struct in_device __rcu *ip_ptr;
1985#if IS_ENABLED(CONFIG_DECNET)
1986 struct dn_dev __rcu *dn_ptr;
1987#endif
1988 struct inet6_dev __rcu *ip6_ptr;
1989#if IS_ENABLED(CONFIG_AX25)
1990 void *ax25_ptr;
1991#endif
1992 struct wireless_dev *ieee80211_ptr;
1993 struct wpan_dev *ieee802154_ptr;
1994#if IS_ENABLED(CONFIG_MPLS_ROUTING)
1995 struct mpls_dev __rcu *mpls_ptr;
1996#endif
1997
1998
1999
2000
2001
2002 unsigned char *dev_addr;
2003
2004 struct netdev_rx_queue *_rx;
2005 unsigned int num_rx_queues;
2006 unsigned int real_num_rx_queues;
2007
2008 struct bpf_prog __rcu *xdp_prog;
2009 unsigned long gro_flush_timeout;
2010 int napi_defer_hard_irqs;
2011 rx_handler_func_t __rcu *rx_handler;
2012 void __rcu *rx_handler_data;
2013
2014#ifdef CONFIG_NET_CLS_ACT
2015 struct mini_Qdisc __rcu *miniq_ingress;
2016#endif
2017 struct netdev_queue __rcu *ingress_queue;
2018#ifdef CONFIG_NETFILTER_INGRESS
2019 struct nf_hook_entries __rcu *nf_hooks_ingress;
2020#endif
2021
2022 unsigned char broadcast[MAX_ADDR_LEN];
2023#ifdef CONFIG_RFS_ACCEL
2024 struct cpu_rmap *rx_cpu_rmap;
2025#endif
2026 struct hlist_node index_hlist;
2027
2028
2029
2030
2031 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
2032 unsigned int num_tx_queues;
2033 unsigned int real_num_tx_queues;
2034 struct Qdisc *qdisc;
2035 unsigned int tx_queue_len;
2036 spinlock_t tx_global_lock;
2037
2038 struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2039
2040#ifdef CONFIG_XPS
2041 struct xps_dev_maps __rcu *xps_cpus_map;
2042 struct xps_dev_maps __rcu *xps_rxqs_map;
2043#endif
2044#ifdef CONFIG_NET_CLS_ACT
2045 struct mini_Qdisc __rcu *miniq_egress;
2046#endif
2047
2048#ifdef CONFIG_NET_SCHED
2049 DECLARE_HASHTABLE (qdisc_hash, 4);
2050#endif
2051
2052 struct timer_list watchdog_timer;
2053 int watchdog_timeo;
2054
2055 struct list_head todo_list;
2056 int __percpu *pcpu_refcnt;
2057
2058 struct list_head link_watch_list;
2059
2060 enum { NETREG_UNINITIALIZED=0,
2061 NETREG_REGISTERED,
2062 NETREG_UNREGISTERING,
2063 NETREG_UNREGISTERED,
2064 NETREG_RELEASED,
2065 NETREG_DUMMY,
2066 } reg_state:8;
2067
2068 bool dismantle;
2069
2070 enum {
2071 RTNL_LINK_INITIALIZED,
2072 RTNL_LINK_INITIALIZING,
2073 } rtnl_link_state:16;
2074
2075 bool needs_free_netdev;
2076 void (*priv_destructor)(struct net_device *dev);
2077
2078#ifdef CONFIG_NETPOLL
2079 struct netpoll_info __rcu *npinfo;
2080#endif
2081
2082 possible_net_t nd_net;
2083
2084
2085 union {
2086 void *ml_priv;
2087 struct pcpu_lstats __percpu *lstats;
2088 struct pcpu_sw_netstats __percpu *tstats;
2089 struct pcpu_dstats __percpu *dstats;
2090 };
2091
2092#if IS_ENABLED(CONFIG_GARP)
2093 struct garp_port __rcu *garp_port;
2094#endif
2095#if IS_ENABLED(CONFIG_MRP)
2096 struct mrp_port __rcu *mrp_port;
2097#endif
2098
2099 struct device dev;
2100 const struct attribute_group *sysfs_groups[4];
2101 const struct attribute_group *sysfs_rx_queue_group;
2102
2103 const struct rtnl_link_ops *rtnl_link_ops;
2104
2105
2106#define GSO_MAX_SIZE 65536
2107 unsigned int gso_max_size;
2108#define GSO_MAX_SEGS 65535
2109 u16 gso_max_segs;
2110
2111#ifdef CONFIG_DCB
2112 const struct dcbnl_rtnl_ops *dcbnl_ops;
2113#endif
2114 s16 num_tc;
2115 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2116 u8 prio_tc_map[TC_BITMASK + 1];
2117
2118#if IS_ENABLED(CONFIG_FCOE)
2119 unsigned int fcoe_ddp_xid;
2120#endif
2121#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2122 struct netprio_map __rcu *priomap;
2123#endif
2124 struct phy_device *phydev;
2125 struct sfp_bus *sfp_bus;
2126 struct lock_class_key *qdisc_tx_busylock;
2127 struct lock_class_key *qdisc_running_key;
2128 bool proto_down;
2129 unsigned wol_enabled:1;
2130
2131 struct list_head net_notifier_list;
2132
2133#if IS_ENABLED(CONFIG_MACSEC)
2134
2135 const struct macsec_ops *macsec_ops;
2136#endif
2137};
2138#define to_net_dev(d) container_of(d, struct net_device, dev)
2139
2140static inline bool netif_elide_gro(const struct net_device *dev)
2141{
2142 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2143 return true;
2144 return false;
2145}
2146
2147#define NETDEV_ALIGN 32
2148
2149static inline
2150int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2151{
2152 return dev->prio_tc_map[prio & TC_BITMASK];
2153}
2154
2155static inline
2156int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2157{
2158 if (tc >= dev->num_tc)
2159 return -EINVAL;
2160
2161 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2162 return 0;
2163}
2164
2165int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2166void netdev_reset_tc(struct net_device *dev);
2167int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2168int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2169
2170static inline
2171int netdev_get_num_tc(struct net_device *dev)
2172{
2173 return dev->num_tc;
2174}
2175
2176void netdev_unbind_sb_channel(struct net_device *dev,
2177 struct net_device *sb_dev);
2178int netdev_bind_sb_channel_queue(struct net_device *dev,
2179 struct net_device *sb_dev,
2180 u8 tc, u16 count, u16 offset);
2181int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2182static inline int netdev_get_sb_channel(struct net_device *dev)
2183{
2184 return max_t(int, -dev->num_tc, 0);
2185}
2186
2187static inline
2188struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2189 unsigned int index)
2190{
2191 return &dev->_tx[index];
2192}
2193
2194static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2195 const struct sk_buff *skb)
2196{
2197 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2198}
2199
2200static inline void netdev_for_each_tx_queue(struct net_device *dev,
2201 void (*f)(struct net_device *,
2202 struct netdev_queue *,
2203 void *),
2204 void *arg)
2205{
2206 unsigned int i;
2207
2208 for (i = 0; i < dev->num_tx_queues; i++)
2209 f(dev, &dev->_tx[i], arg);
2210}
2211
2212#define netdev_lockdep_set_classes(dev) \
2213{ \
2214 static struct lock_class_key qdisc_tx_busylock_key; \
2215 static struct lock_class_key qdisc_running_key; \
2216 static struct lock_class_key qdisc_xmit_lock_key; \
2217 static struct lock_class_key dev_addr_list_lock_key; \
2218 unsigned int i; \
2219 \
2220 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2221 (dev)->qdisc_running_key = &qdisc_running_key; \
2222 lockdep_set_class(&(dev)->addr_list_lock, \
2223 &dev_addr_list_lock_key); \
2224 for (i = 0; i < (dev)->num_tx_queues; i++) \
2225 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2226 &qdisc_xmit_lock_key); \
2227}
2228
2229u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2230 struct net_device *sb_dev);
2231struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2232 struct sk_buff *skb,
2233 struct net_device *sb_dev);
2234
2235
2236
2237
2238static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2239{
2240 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2241}
2242
2243static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2244{
2245 if (dev->netdev_ops->ndo_set_rx_headroom)
2246 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2247}
2248
2249
2250static inline void netdev_reset_rx_headroom(struct net_device *dev)
2251{
2252 netdev_set_rx_headroom(dev, -1);
2253}
2254
2255
2256
2257
2258static inline
2259struct net *dev_net(const struct net_device *dev)
2260{
2261 return read_pnet(&dev->nd_net);
2262}
2263
2264static inline
2265void dev_net_set(struct net_device *dev, struct net *net)
2266{
2267 write_pnet(&dev->nd_net, net);
2268}
2269
2270
2271
2272
2273
2274
2275
2276static inline void *netdev_priv(const struct net_device *dev)
2277{
2278 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2279}
2280
2281
2282
2283
2284#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2285
2286
2287
2288
2289
2290#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2291
2292
2293
2294
2295#define NAPI_POLL_WEIGHT 64
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2308 int (*poll)(struct napi_struct *, int), int weight);
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321static inline void netif_tx_napi_add(struct net_device *dev,
2322 struct napi_struct *napi,
2323 int (*poll)(struct napi_struct *, int),
2324 int weight)
2325{
2326 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2327 netif_napi_add(dev, napi, poll, weight);
2328}
2329
2330
2331
2332
2333
2334
2335
2336void netif_napi_del(struct napi_struct *napi);
2337
2338struct napi_gro_cb {
2339
2340 void *frag0;
2341
2342
2343 unsigned int frag0_len;
2344
2345
2346 int data_offset;
2347
2348
2349 u16 flush;
2350
2351
2352 u16 flush_id;
2353
2354
2355 u16 count;
2356
2357
2358 u16 gro_remcsum_start;
2359
2360
2361 unsigned long age;
2362
2363
2364 u16 proto;
2365
2366
2367 u8 same_flow:1;
2368
2369
2370 u8 encap_mark:1;
2371
2372
2373 u8 csum_valid:1;
2374
2375
2376 u8 csum_cnt:3;
2377
2378
2379 u8 free:2;
2380#define NAPI_GRO_FREE 1
2381#define NAPI_GRO_FREE_STOLEN_HEAD 2
2382
2383
2384 u8 is_ipv6:1;
2385
2386
2387 u8 is_fou:1;
2388
2389
2390 u8 is_atomic:1;
2391
2392
2393 u8 recursion_counter:4;
2394
2395
2396 u8 is_flist:1;
2397
2398
2399 __wsum csum;
2400
2401
2402 struct sk_buff *last;
2403};
2404
2405#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2406
2407#define GRO_RECURSION_LIMIT 15
2408static inline int gro_recursion_inc_test(struct sk_buff *skb)
2409{
2410 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2411}
2412
2413typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2414static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2415 struct list_head *head,
2416 struct sk_buff *skb)
2417{
2418 if (unlikely(gro_recursion_inc_test(skb))) {
2419 NAPI_GRO_CB(skb)->flush |= 1;
2420 return NULL;
2421 }
2422
2423 return cb(head, skb);
2424}
2425
2426typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2427 struct sk_buff *);
2428static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2429 struct sock *sk,
2430 struct list_head *head,
2431 struct sk_buff *skb)
2432{
2433 if (unlikely(gro_recursion_inc_test(skb))) {
2434 NAPI_GRO_CB(skb)->flush |= 1;
2435 return NULL;
2436 }
2437
2438 return cb(sk, head, skb);
2439}
2440
2441struct packet_type {
2442 __be16 type;
2443 bool ignore_outgoing;
2444 struct net_device *dev;
2445 int (*func) (struct sk_buff *,
2446 struct net_device *,
2447 struct packet_type *,
2448 struct net_device *);
2449 void (*list_func) (struct list_head *,
2450 struct packet_type *,
2451 struct net_device *);
2452 bool (*id_match)(struct packet_type *ptype,
2453 struct sock *sk);
2454 void *af_packet_priv;
2455 struct list_head list;
2456};
2457
2458struct offload_callbacks {
2459 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2460 netdev_features_t features);
2461 struct sk_buff *(*gro_receive)(struct list_head *head,
2462 struct sk_buff *skb);
2463 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2464};
2465
2466struct packet_offload {
2467 __be16 type;
2468 u16 priority;
2469 struct offload_callbacks callbacks;
2470 struct list_head list;
2471};
2472
2473
2474struct pcpu_sw_netstats {
2475 u64 rx_packets;
2476 u64 rx_bytes;
2477 u64 tx_packets;
2478 u64 tx_bytes;
2479 struct u64_stats_sync syncp;
2480} __aligned(4 * sizeof(u64));
2481
2482struct pcpu_lstats {
2483 u64_stats_t packets;
2484 u64_stats_t bytes;
2485 struct u64_stats_sync syncp;
2486} __aligned(2 * sizeof(u64));
2487
2488void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
2489
2490static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
2491{
2492 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
2493
2494 u64_stats_update_begin(&lstats->syncp);
2495 u64_stats_add(&lstats->bytes, len);
2496 u64_stats_inc(&lstats->packets);
2497 u64_stats_update_end(&lstats->syncp);
2498}
2499
2500#define __netdev_alloc_pcpu_stats(type, gfp) \
2501({ \
2502 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2503 if (pcpu_stats) { \
2504 int __cpu; \
2505 for_each_possible_cpu(__cpu) { \
2506 typeof(type) *stat; \
2507 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2508 u64_stats_init(&stat->syncp); \
2509 } \
2510 } \
2511 pcpu_stats; \
2512})
2513
2514#define netdev_alloc_pcpu_stats(type) \
2515 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2516
2517enum netdev_lag_tx_type {
2518 NETDEV_LAG_TX_TYPE_UNKNOWN,
2519 NETDEV_LAG_TX_TYPE_RANDOM,
2520 NETDEV_LAG_TX_TYPE_BROADCAST,
2521 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2522 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2523 NETDEV_LAG_TX_TYPE_HASH,
2524};
2525
2526enum netdev_lag_hash {
2527 NETDEV_LAG_HASH_NONE,
2528 NETDEV_LAG_HASH_L2,
2529 NETDEV_LAG_HASH_L34,
2530 NETDEV_LAG_HASH_L23,
2531 NETDEV_LAG_HASH_E23,
2532 NETDEV_LAG_HASH_E34,
2533 NETDEV_LAG_HASH_UNKNOWN,
2534};
2535
2536struct netdev_lag_upper_info {
2537 enum netdev_lag_tx_type tx_type;
2538 enum netdev_lag_hash hash_type;
2539};
2540
2541struct netdev_lag_lower_state_info {
2542 u8 link_up : 1,
2543 tx_enabled : 1;
2544};
2545
2546#include <linux/notifier.h>
2547
2548
2549
2550
2551
2552enum netdev_cmd {
2553 NETDEV_UP = 1,
2554 NETDEV_DOWN,
2555 NETDEV_REBOOT,
2556
2557
2558
2559 NETDEV_CHANGE,
2560 NETDEV_REGISTER,
2561 NETDEV_UNREGISTER,
2562 NETDEV_CHANGEMTU,
2563 NETDEV_CHANGEADDR,
2564 NETDEV_PRE_CHANGEADDR,
2565 NETDEV_GOING_DOWN,
2566 NETDEV_CHANGENAME,
2567 NETDEV_FEAT_CHANGE,
2568 NETDEV_BONDING_FAILOVER,
2569 NETDEV_PRE_UP,
2570 NETDEV_PRE_TYPE_CHANGE,
2571 NETDEV_POST_TYPE_CHANGE,
2572 NETDEV_POST_INIT,
2573 NETDEV_RELEASE,
2574 NETDEV_NOTIFY_PEERS,
2575 NETDEV_JOIN,
2576 NETDEV_CHANGEUPPER,
2577 NETDEV_RESEND_IGMP,
2578 NETDEV_PRECHANGEMTU,
2579 NETDEV_CHANGEINFODATA,
2580 NETDEV_BONDING_INFO,
2581 NETDEV_PRECHANGEUPPER,
2582 NETDEV_CHANGELOWERSTATE,
2583 NETDEV_UDP_TUNNEL_PUSH_INFO,
2584 NETDEV_UDP_TUNNEL_DROP_INFO,
2585 NETDEV_CHANGE_TX_QUEUE_LEN,
2586 NETDEV_CVLAN_FILTER_PUSH_INFO,
2587 NETDEV_CVLAN_FILTER_DROP_INFO,
2588 NETDEV_SVLAN_FILTER_PUSH_INFO,
2589 NETDEV_SVLAN_FILTER_DROP_INFO,
2590};
2591const char *netdev_cmd_to_name(enum netdev_cmd cmd);
2592
2593int register_netdevice_notifier(struct notifier_block *nb);
2594int unregister_netdevice_notifier(struct notifier_block *nb);
2595int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
2596int unregister_netdevice_notifier_net(struct net *net,
2597 struct notifier_block *nb);
2598int register_netdevice_notifier_dev_net(struct net_device *dev,
2599 struct notifier_block *nb,
2600 struct netdev_net_notifier *nn);
2601int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2602 struct notifier_block *nb,
2603 struct netdev_net_notifier *nn);
2604
2605struct netdev_notifier_info {
2606 struct net_device *dev;
2607 struct netlink_ext_ack *extack;
2608};
2609
2610struct netdev_notifier_info_ext {
2611 struct netdev_notifier_info info;
2612 union {
2613 u32 mtu;
2614 } ext;
2615};
2616
2617struct netdev_notifier_change_info {
2618 struct netdev_notifier_info info;
2619 unsigned int flags_changed;
2620};
2621
2622struct netdev_notifier_changeupper_info {
2623 struct netdev_notifier_info info;
2624 struct net_device *upper_dev;
2625 bool master;
2626 bool linking;
2627 void *upper_info;
2628};
2629
2630struct netdev_notifier_changelowerstate_info {
2631 struct netdev_notifier_info info;
2632 void *lower_state_info;
2633};
2634
2635struct netdev_notifier_pre_changeaddr_info {
2636 struct netdev_notifier_info info;
2637 const unsigned char *dev_addr;
2638};
2639
2640static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2641 struct net_device *dev)
2642{
2643 info->dev = dev;
2644 info->extack = NULL;
2645}
2646
2647static inline struct net_device *
2648netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2649{
2650 return info->dev;
2651}
2652
2653static inline struct netlink_ext_ack *
2654netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2655{
2656 return info->extack;
2657}
2658
2659int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2660
2661
2662extern rwlock_t dev_base_lock;
2663
2664#define for_each_netdev(net, d) \
2665 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2666#define for_each_netdev_reverse(net, d) \
2667 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2668#define for_each_netdev_rcu(net, d) \
2669 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2670#define for_each_netdev_safe(net, d, n) \
2671 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2672#define for_each_netdev_continue(net, d) \
2673 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2674#define for_each_netdev_continue_reverse(net, d) \
2675 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
2676 dev_list)
2677#define for_each_netdev_continue_rcu(net, d) \
2678 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2679#define for_each_netdev_in_bond_rcu(bond, slave) \
2680 for_each_netdev_rcu(&init_net, slave) \
2681 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2682#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2683
2684static inline struct net_device *next_net_device(struct net_device *dev)
2685{
2686 struct list_head *lh;
2687 struct net *net;
2688
2689 net = dev_net(dev);
2690 lh = dev->dev_list.next;
2691 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2692}
2693
2694static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2695{
2696 struct list_head *lh;
2697 struct net *net;
2698
2699 net = dev_net(dev);
2700 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2701 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2702}
2703
2704static inline struct net_device *first_net_device(struct net *net)
2705{
2706 return list_empty(&net->dev_base_head) ? NULL :
2707 net_device_entry(net->dev_base_head.next);
2708}
2709
2710static inline struct net_device *first_net_device_rcu(struct net *net)
2711{
2712 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2713
2714 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2715}
2716
2717int netdev_boot_setup_check(struct net_device *dev);
2718unsigned long netdev_boot_base(const char *prefix, int unit);
2719struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2720 const char *hwaddr);
2721struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2722struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2723void dev_add_pack(struct packet_type *pt);
2724void dev_remove_pack(struct packet_type *pt);
2725void __dev_remove_pack(struct packet_type *pt);
2726void dev_add_offload(struct packet_offload *po);
2727void dev_remove_offload(struct packet_offload *po);
2728
2729int dev_get_iflink(const struct net_device *dev);
2730int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2731struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2732 unsigned short mask);
2733struct net_device *dev_get_by_name(struct net *net, const char *name);
2734struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2735struct net_device *__dev_get_by_name(struct net *net, const char *name);
2736int dev_alloc_name(struct net_device *dev, const char *name);
2737int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
2738void dev_close(struct net_device *dev);
2739void dev_close_many(struct list_head *head, bool unlink);
2740void dev_disable_lro(struct net_device *dev);
2741int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2742u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2743 struct net_device *sb_dev);
2744u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2745 struct net_device *sb_dev);
2746int dev_queue_xmit(struct sk_buff *skb);
2747int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2748int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2749int register_netdevice(struct net_device *dev);
2750void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2751void unregister_netdevice_many(struct list_head *head);
2752static inline void unregister_netdevice(struct net_device *dev)
2753{
2754 unregister_netdevice_queue(dev, NULL);
2755}
2756
2757int netdev_refcnt_read(const struct net_device *dev);
2758void free_netdev(struct net_device *dev);
2759void netdev_freemem(struct net_device *dev);
2760void synchronize_net(void);
2761int init_dummy_netdev(struct net_device *dev);
2762
2763struct net_device *netdev_get_xmit_slave(struct net_device *dev,
2764 struct sk_buff *skb,
2765 bool all_slaves);
2766struct net_device *dev_get_by_index(struct net *net, int ifindex);
2767struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2768struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2769struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2770int netdev_get_name(struct net *net, char *name, int ifindex);
2771int dev_restart(struct net_device *dev);
2772int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2773int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
2774
2775static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2776{
2777 return NAPI_GRO_CB(skb)->data_offset;
2778}
2779
2780static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2781{
2782 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2783}
2784
2785static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2786{
2787 NAPI_GRO_CB(skb)->data_offset += len;
2788}
2789
2790static inline void *skb_gro_header_fast(struct sk_buff *skb,
2791 unsigned int offset)
2792{
2793 return NAPI_GRO_CB(skb)->frag0 + offset;
2794}
2795
2796static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2797{
2798 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2799}
2800
2801static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2802{
2803 NAPI_GRO_CB(skb)->frag0 = NULL;
2804 NAPI_GRO_CB(skb)->frag0_len = 0;
2805}
2806
2807static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2808 unsigned int offset)
2809{
2810 if (!pskb_may_pull(skb, hlen))
2811 return NULL;
2812
2813 skb_gro_frag0_invalidate(skb);
2814 return skb->data + offset;
2815}
2816
2817static inline void *skb_gro_network_header(struct sk_buff *skb)
2818{
2819 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2820 skb_network_offset(skb);
2821}
2822
2823static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2824 const void *start, unsigned int len)
2825{
2826 if (NAPI_GRO_CB(skb)->csum_valid)
2827 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2828 csum_partial(start, len, 0));
2829}
2830
2831
2832
2833
2834
2835
2836__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2837
2838static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2839{
2840 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2841}
2842
2843static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2844 bool zero_okay,
2845 __sum16 check)
2846{
2847 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2848 skb_checksum_start_offset(skb) <
2849 skb_gro_offset(skb)) &&
2850 !skb_at_gro_remcsum_start(skb) &&
2851 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2852 (!zero_okay || check));
2853}
2854
2855static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2856 __wsum psum)
2857{
2858 if (NAPI_GRO_CB(skb)->csum_valid &&
2859 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2860 return 0;
2861
2862 NAPI_GRO_CB(skb)->csum = psum;
2863
2864 return __skb_gro_checksum_complete(skb);
2865}
2866
2867static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2868{
2869 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2870
2871 NAPI_GRO_CB(skb)->csum_cnt--;
2872 } else {
2873
2874
2875
2876
2877 __skb_incr_checksum_unnecessary(skb);
2878 }
2879}
2880
2881#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2882 compute_pseudo) \
2883({ \
2884 __sum16 __ret = 0; \
2885 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2886 __ret = __skb_gro_checksum_validate_complete(skb, \
2887 compute_pseudo(skb, proto)); \
2888 if (!__ret) \
2889 skb_gro_incr_csum_unnecessary(skb); \
2890 __ret; \
2891})
2892
2893#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2894 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2895
2896#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2897 compute_pseudo) \
2898 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2899
2900#define skb_gro_checksum_simple_validate(skb) \
2901 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2902
2903static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2904{
2905 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2906 !NAPI_GRO_CB(skb)->csum_valid);
2907}
2908
2909static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2910 __wsum pseudo)
2911{
2912 NAPI_GRO_CB(skb)->csum = ~pseudo;
2913 NAPI_GRO_CB(skb)->csum_valid = 1;
2914}
2915
2916#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
2917do { \
2918 if (__skb_gro_checksum_convert_check(skb)) \
2919 __skb_gro_checksum_convert(skb, \
2920 compute_pseudo(skb, proto)); \
2921} while (0)
2922
2923struct gro_remcsum {
2924 int offset;
2925 __wsum delta;
2926};
2927
2928static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2929{
2930 grc->offset = 0;
2931 grc->delta = 0;
2932}
2933
2934static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2935 unsigned int off, size_t hdrlen,
2936 int start, int offset,
2937 struct gro_remcsum *grc,
2938 bool nopartial)
2939{
2940 __wsum delta;
2941 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2942
2943 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2944
2945 if (!nopartial) {
2946 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2947 return ptr;
2948 }
2949
2950 ptr = skb_gro_header_fast(skb, off);
2951 if (skb_gro_header_hard(skb, off + plen)) {
2952 ptr = skb_gro_header_slow(skb, off + plen, off);
2953 if (!ptr)
2954 return NULL;
2955 }
2956
2957 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2958 start, offset);
2959
2960
2961 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2962
2963 grc->offset = off + hdrlen + offset;
2964 grc->delta = delta;
2965
2966 return ptr;
2967}
2968
2969static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2970 struct gro_remcsum *grc)
2971{
2972 void *ptr;
2973 size_t plen = grc->offset + sizeof(u16);
2974
2975 if (!grc->delta)
2976 return;
2977
2978 ptr = skb_gro_header_fast(skb, grc->offset);
2979 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2980 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2981 if (!ptr)
2982 return;
2983 }
2984
2985 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2986}
2987
2988#ifdef CONFIG_XFRM_OFFLOAD
2989static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2990{
2991 if (PTR_ERR(pp) != -EINPROGRESS)
2992 NAPI_GRO_CB(skb)->flush |= flush;
2993}
2994static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2995 struct sk_buff *pp,
2996 int flush,
2997 struct gro_remcsum *grc)
2998{
2999 if (PTR_ERR(pp) != -EINPROGRESS) {
3000 NAPI_GRO_CB(skb)->flush |= flush;
3001 skb_gro_remcsum_cleanup(skb, grc);
3002 skb->remcsum_offload = 0;
3003 }
3004}
3005#else
3006static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3007{
3008 NAPI_GRO_CB(skb)->flush |= flush;
3009}
3010static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3011 struct sk_buff *pp,
3012 int flush,
3013 struct gro_remcsum *grc)
3014{
3015 NAPI_GRO_CB(skb)->flush |= flush;
3016 skb_gro_remcsum_cleanup(skb, grc);
3017 skb->remcsum_offload = 0;
3018}
3019#endif
3020
3021static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3022 unsigned short type,
3023 const void *daddr, const void *saddr,
3024 unsigned int len)
3025{
3026 if (!dev->header_ops || !dev->header_ops->create)
3027 return 0;
3028
3029 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3030}
3031
3032static inline int dev_parse_header(const struct sk_buff *skb,
3033 unsigned char *haddr)
3034{
3035 const struct net_device *dev = skb->dev;
3036
3037 if (!dev->header_ops || !dev->header_ops->parse)
3038 return 0;
3039 return dev->header_ops->parse(skb, haddr);
3040}
3041
3042static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3043{
3044 const struct net_device *dev = skb->dev;
3045
3046 if (!dev->header_ops || !dev->header_ops->parse_protocol)
3047 return 0;
3048 return dev->header_ops->parse_protocol(skb);
3049}
3050
3051
3052static inline bool dev_validate_header(const struct net_device *dev,
3053 char *ll_header, int len)
3054{
3055 if (likely(len >= dev->hard_header_len))
3056 return true;
3057 if (len < dev->min_header_len)
3058 return false;
3059
3060 if (capable(CAP_SYS_RAWIO)) {
3061 memset(ll_header + len, 0, dev->hard_header_len - len);
3062 return true;
3063 }
3064
3065 if (dev->header_ops && dev->header_ops->validate)
3066 return dev->header_ops->validate(ll_header, len);
3067
3068 return false;
3069}
3070
3071typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
3072 int len, int size);
3073int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
3074static inline int unregister_gifconf(unsigned int family)
3075{
3076 return register_gifconf(family, NULL);
3077}
3078
3079#ifdef CONFIG_NET_FLOW_LIMIT
3080#define FLOW_LIMIT_HISTORY (1 << 7)
3081struct sd_flow_limit {
3082 u64 count;
3083 unsigned int num_buckets;
3084 unsigned int history_head;
3085 u16 history[FLOW_LIMIT_HISTORY];
3086 u8 buckets[];
3087};
3088
3089extern int netdev_flow_limit_table_len;
3090#endif
3091
3092
3093
3094
3095struct softnet_data {
3096 struct list_head poll_list;
3097 struct sk_buff_head process_queue;
3098
3099
3100 unsigned int processed;
3101 unsigned int time_squeeze;
3102 unsigned int received_rps;
3103#ifdef CONFIG_RPS
3104 struct softnet_data *rps_ipi_list;
3105#endif
3106#ifdef CONFIG_NET_FLOW_LIMIT
3107 struct sd_flow_limit __rcu *flow_limit;
3108#endif
3109 struct Qdisc *output_queue;
3110 struct Qdisc **output_queue_tailp;
3111 struct sk_buff *completion_queue;
3112#ifdef CONFIG_XFRM_OFFLOAD
3113 struct sk_buff_head xfrm_backlog;
3114#endif
3115
3116 struct {
3117 u16 recursion;
3118 u8 more;
3119 } xmit;
3120#ifdef CONFIG_RPS
3121
3122
3123
3124 unsigned int input_queue_head ____cacheline_aligned_in_smp;
3125
3126
3127 call_single_data_t csd ____cacheline_aligned_in_smp;
3128 struct softnet_data *rps_ipi_next;
3129 unsigned int cpu;
3130 unsigned int input_queue_tail;
3131#endif
3132 unsigned int dropped;
3133 struct sk_buff_head input_pkt_queue;
3134 struct napi_struct backlog;
3135
3136};
3137
3138static inline void input_queue_head_incr(struct softnet_data *sd)
3139{
3140#ifdef CONFIG_RPS
3141 sd->input_queue_head++;
3142#endif
3143}
3144
3145static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3146 unsigned int *qtail)
3147{
3148#ifdef CONFIG_RPS
3149 *qtail = ++sd->input_queue_tail;
3150#endif
3151}
3152
3153DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3154
3155static inline int dev_recursion_level(void)
3156{
3157 return this_cpu_read(softnet_data.xmit.recursion);
3158}
3159
3160#define XMIT_RECURSION_LIMIT 8
3161static inline bool dev_xmit_recursion(void)
3162{
3163 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3164 XMIT_RECURSION_LIMIT);
3165}
3166
3167static inline void dev_xmit_recursion_inc(void)
3168{
3169 __this_cpu_inc(softnet_data.xmit.recursion);
3170}
3171
3172static inline void dev_xmit_recursion_dec(void)
3173{
3174 __this_cpu_dec(softnet_data.xmit.recursion);
3175}
3176
3177void __netif_schedule(struct Qdisc *q);
3178void netif_schedule_queue(struct netdev_queue *txq);
3179
3180static inline void netif_tx_schedule_all(struct net_device *dev)
3181{
3182 unsigned int i;
3183
3184 for (i = 0; i < dev->num_tx_queues; i++)
3185 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3186}
3187
3188static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3189{
3190 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3191}
3192
3193
3194
3195
3196
3197
3198
3199static inline void netif_start_queue(struct net_device *dev)
3200{
3201 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3202}
3203
3204static inline void netif_tx_start_all_queues(struct net_device *dev)
3205{
3206 unsigned int i;
3207
3208 for (i = 0; i < dev->num_tx_queues; i++) {
3209 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3210 netif_tx_start_queue(txq);
3211 }
3212}
3213
3214void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3215
3216
3217
3218
3219
3220
3221
3222
3223static inline void netif_wake_queue(struct net_device *dev)
3224{
3225 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3226}
3227
3228static inline void netif_tx_wake_all_queues(struct net_device *dev)
3229{
3230 unsigned int i;
3231
3232 for (i = 0; i < dev->num_tx_queues; i++) {
3233 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3234 netif_tx_wake_queue(txq);
3235 }
3236}
3237
3238static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3239{
3240 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3241}
3242
3243
3244
3245
3246
3247
3248
3249
3250static inline void netif_stop_queue(struct net_device *dev)
3251{
3252 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3253}
3254
3255void netif_tx_stop_all_queues(struct net_device *dev);
3256
3257static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3258{
3259 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3260}
3261
3262
3263
3264
3265
3266
3267
3268static inline bool netif_queue_stopped(const struct net_device *dev)
3269{
3270 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3271}
3272
3273static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3274{
3275 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3276}
3277
3278static inline bool
3279netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3280{
3281 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3282}
3283
3284static inline bool
3285netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3286{
3287 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3288}
3289
3290
3291
3292
3293
3294
3295
3296
3297static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3298{
3299#ifdef CONFIG_BQL
3300 prefetchw(&dev_queue->dql.num_queued);
3301#endif
3302}
3303
3304
3305
3306
3307
3308
3309
3310
3311static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3312{
3313#ifdef CONFIG_BQL
3314 prefetchw(&dev_queue->dql.limit);
3315#endif
3316}
3317
3318static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3319 unsigned int bytes)
3320{
3321#ifdef CONFIG_BQL
3322 dql_queued(&dev_queue->dql, bytes);
3323
3324 if (likely(dql_avail(&dev_queue->dql) >= 0))
3325 return;
3326
3327 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3328
3329
3330
3331
3332
3333
3334 smp_mb();
3335
3336
3337 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3338 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3339#endif
3340}
3341
3342
3343
3344
3345
3346
3347
3348static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3349 unsigned int bytes,
3350 bool xmit_more)
3351{
3352 if (xmit_more) {
3353#ifdef CONFIG_BQL
3354 dql_queued(&dev_queue->dql, bytes);
3355#endif
3356 return netif_tx_queue_stopped(dev_queue);
3357 }
3358 netdev_tx_sent_queue(dev_queue, bytes);
3359 return true;
3360}
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3372{
3373 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3374}
3375
3376static inline bool __netdev_sent_queue(struct net_device *dev,
3377 unsigned int bytes,
3378 bool xmit_more)
3379{
3380 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3381 xmit_more);
3382}
3383
3384static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3385 unsigned int pkts, unsigned int bytes)
3386{
3387#ifdef CONFIG_BQL
3388 if (unlikely(!bytes))
3389 return;
3390
3391 dql_completed(&dev_queue->dql, bytes);
3392
3393
3394
3395
3396
3397
3398 smp_mb();
3399
3400 if (unlikely(dql_avail(&dev_queue->dql) < 0))
3401 return;
3402
3403 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3404 netif_schedule_queue(dev_queue);
3405#endif
3406}
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418static inline void netdev_completed_queue(struct net_device *dev,
3419 unsigned int pkts, unsigned int bytes)
3420{
3421 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3422}
3423
3424static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3425{
3426#ifdef CONFIG_BQL
3427 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3428 dql_reset(&q->dql);
3429#endif
3430}
3431
3432
3433
3434
3435
3436
3437
3438
3439static inline void netdev_reset_queue(struct net_device *dev_queue)
3440{
3441 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3442}
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3453{
3454 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3455 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3456 dev->name, queue_index,
3457 dev->real_num_tx_queues);
3458 return 0;
3459 }
3460
3461 return queue_index;
3462}
3463
3464
3465
3466
3467
3468
3469
3470static inline bool netif_running(const struct net_device *dev)
3471{
3472 return test_bit(__LINK_STATE_START, &dev->state);
3473}
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3490{
3491 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3492
3493 netif_tx_start_queue(txq);
3494}
3495
3496
3497
3498
3499
3500
3501
3502
3503static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3504{
3505 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3506 netif_tx_stop_queue(txq);
3507}
3508
3509
3510
3511
3512
3513
3514
3515
3516static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3517 u16 queue_index)
3518{
3519 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3520
3521 return netif_tx_queue_stopped(txq);
3522}
3523
3524static inline bool netif_subqueue_stopped(const struct net_device *dev,
3525 struct sk_buff *skb)
3526{
3527 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3528}
3529
3530
3531
3532
3533
3534
3535
3536
3537static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3538{
3539 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3540
3541 netif_tx_wake_queue(txq);
3542}
3543
3544#ifdef CONFIG_XPS
3545int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3546 u16 index);
3547int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3548 u16 index, bool is_rxqs_map);
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558static inline bool netif_attr_test_mask(unsigned long j,
3559 const unsigned long *mask,
3560 unsigned int nr_bits)
3561{
3562 cpu_max_bits_warn(j, nr_bits);
3563 return test_bit(j, mask);
3564}
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574static inline bool netif_attr_test_online(unsigned long j,
3575 const unsigned long *online_mask,
3576 unsigned int nr_bits)
3577{
3578 cpu_max_bits_warn(j, nr_bits);
3579
3580 if (online_mask)
3581 return test_bit(j, online_mask);
3582
3583 return (j < nr_bits);
3584}
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3595 unsigned int nr_bits)
3596{
3597
3598 if (n != -1)
3599 cpu_max_bits_warn(n, nr_bits);
3600
3601 if (srcp)
3602 return find_next_bit(srcp, nr_bits, n + 1);
3603
3604 return n + 1;
3605}
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3617 const unsigned long *src2p,
3618 unsigned int nr_bits)
3619{
3620
3621 if (n != -1)
3622 cpu_max_bits_warn(n, nr_bits);
3623
3624 if (src1p && src2p)
3625 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3626 else if (src1p)
3627 return find_next_bit(src1p, nr_bits, n + 1);
3628 else if (src2p)
3629 return find_next_bit(src2p, nr_bits, n + 1);
3630
3631 return n + 1;
3632}
3633#else
3634static inline int netif_set_xps_queue(struct net_device *dev,
3635 const struct cpumask *mask,
3636 u16 index)
3637{
3638 return 0;
3639}
3640
3641static inline int __netif_set_xps_queue(struct net_device *dev,
3642 const unsigned long *mask,
3643 u16 index, bool is_rxqs_map)
3644{
3645 return 0;
3646}
3647#endif
3648
3649
3650
3651
3652
3653
3654
3655static inline bool netif_is_multiqueue(const struct net_device *dev)
3656{
3657 return dev->num_tx_queues > 1;
3658}
3659
3660int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3661
3662#ifdef CONFIG_SYSFS
3663int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3664#else
3665static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3666 unsigned int rxqs)
3667{
3668 dev->real_num_rx_queues = rxqs;
3669 return 0;
3670}
3671#endif
3672
3673static inline struct netdev_rx_queue *
3674__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3675{
3676 return dev->_rx + rxq;
3677}
3678
3679#ifdef CONFIG_SYSFS
3680static inline unsigned int get_netdev_rx_queue_index(
3681 struct netdev_rx_queue *queue)
3682{
3683 struct net_device *dev = queue->dev;
3684 int index = queue - dev->_rx;
3685
3686 BUG_ON(index >= dev->num_rx_queues);
3687 return index;
3688}
3689#endif
3690
3691#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3692int netif_get_num_default_rss_queues(void);
3693
3694enum skb_free_reason {
3695 SKB_REASON_CONSUMED,
3696 SKB_REASON_DROPPED,
3697};
3698
3699void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3700void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3722{
3723 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3724}
3725
3726static inline void dev_consume_skb_irq(struct sk_buff *skb)
3727{
3728 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3729}
3730
3731static inline void dev_kfree_skb_any(struct sk_buff *skb)
3732{
3733 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3734}
3735
3736static inline void dev_consume_skb_any(struct sk_buff *skb)
3737{
3738 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3739}
3740
3741void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3742int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
3743int netif_rx(struct sk_buff *skb);
3744int netif_rx_ni(struct sk_buff *skb);
3745int netif_receive_skb(struct sk_buff *skb);
3746int netif_receive_skb_core(struct sk_buff *skb);
3747void netif_receive_skb_list(struct list_head *head);
3748gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3749void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3750struct sk_buff *napi_get_frags(struct napi_struct *napi);
3751gro_result_t napi_gro_frags(struct napi_struct *napi);
3752struct packet_offload *gro_find_receive_by_type(__be16 type);
3753struct packet_offload *gro_find_complete_by_type(__be16 type);
3754
3755static inline void napi_free_frags(struct napi_struct *napi)
3756{
3757 kfree_skb(napi->skb);
3758 napi->skb = NULL;
3759}
3760
3761bool netdev_is_rx_handler_busy(struct net_device *dev);
3762int netdev_rx_handler_register(struct net_device *dev,
3763 rx_handler_func_t *rx_handler,
3764 void *rx_handler_data);
3765void netdev_rx_handler_unregister(struct net_device *dev);
3766
3767bool dev_valid_name(const char *name);
3768int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3769 bool *need_copyout);
3770int dev_ifconf(struct net *net, struct ifconf *, int);
3771int dev_ethtool(struct net *net, struct ifreq *);
3772unsigned int dev_get_flags(const struct net_device *);
3773int __dev_change_flags(struct net_device *dev, unsigned int flags,
3774 struct netlink_ext_ack *extack);
3775int dev_change_flags(struct net_device *dev, unsigned int flags,
3776 struct netlink_ext_ack *extack);
3777void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3778 unsigned int gchanges);
3779int dev_change_name(struct net_device *, const char *);
3780int dev_set_alias(struct net_device *, const char *, size_t);
3781int dev_get_alias(const struct net_device *, char *, size_t);
3782int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3783int __dev_set_mtu(struct net_device *, int);
3784int dev_validate_mtu(struct net_device *dev, int mtu,
3785 struct netlink_ext_ack *extack);
3786int dev_set_mtu_ext(struct net_device *dev, int mtu,
3787 struct netlink_ext_ack *extack);
3788int dev_set_mtu(struct net_device *, int);
3789int dev_change_tx_queue_len(struct net_device *, unsigned long);
3790void dev_set_group(struct net_device *, int);
3791int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3792 struct netlink_ext_ack *extack);
3793int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3794 struct netlink_ext_ack *extack);
3795int dev_change_carrier(struct net_device *, bool new_carrier);
3796int dev_get_phys_port_id(struct net_device *dev,
3797 struct netdev_phys_item_id *ppid);
3798int dev_get_phys_port_name(struct net_device *dev,
3799 char *name, size_t len);
3800int dev_get_port_parent_id(struct net_device *dev,
3801 struct netdev_phys_item_id *ppid, bool recurse);
3802bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
3803int dev_change_proto_down(struct net_device *dev, bool proto_down);
3804int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
3805struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3806struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3807 struct netdev_queue *txq, int *ret);
3808
3809typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3810int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3811 int fd, int expected_fd, u32 flags);
3812u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3813 enum bpf_netdev_command cmd);
3814int xdp_umem_query(struct net_device *dev, u16 queue_id);
3815
3816int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3817int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3818bool is_skb_forwardable(const struct net_device *dev,
3819 const struct sk_buff *skb);
3820
3821static __always_inline int ____dev_forward_skb(struct net_device *dev,
3822 struct sk_buff *skb)
3823{
3824 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3825 unlikely(!is_skb_forwardable(dev, skb))) {
3826 atomic_long_inc(&dev->rx_dropped);
3827 kfree_skb(skb);
3828 return NET_RX_DROP;
3829 }
3830
3831 skb_scrub_packet(skb, true);
3832 skb->priority = 0;
3833 return 0;
3834}
3835
3836bool dev_nit_active(struct net_device *dev);
3837void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3838
3839extern int netdev_budget;
3840extern unsigned int netdev_budget_usecs;
3841
3842
3843void netdev_run_todo(void);
3844
3845
3846
3847
3848
3849
3850
3851static inline void dev_put(struct net_device *dev)
3852{
3853 this_cpu_dec(*dev->pcpu_refcnt);
3854}
3855
3856
3857
3858
3859
3860
3861
3862static inline void dev_hold(struct net_device *dev)
3863{
3864 this_cpu_inc(*dev->pcpu_refcnt);
3865}
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876void linkwatch_init_dev(struct net_device *dev);
3877void linkwatch_fire_event(struct net_device *dev);
3878void linkwatch_forget_dev(struct net_device *dev);
3879
3880
3881
3882
3883
3884
3885
3886static inline bool netif_carrier_ok(const struct net_device *dev)
3887{
3888 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3889}
3890
3891unsigned long dev_trans_start(struct net_device *dev);
3892
3893void __netdev_watchdog_up(struct net_device *dev);
3894
3895void netif_carrier_on(struct net_device *dev);
3896
3897void netif_carrier_off(struct net_device *dev);
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911static inline void netif_dormant_on(struct net_device *dev)
3912{
3913 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3914 linkwatch_fire_event(dev);
3915}
3916
3917
3918
3919
3920
3921
3922
3923static inline void netif_dormant_off(struct net_device *dev)
3924{
3925 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3926 linkwatch_fire_event(dev);
3927}
3928
3929
3930
3931
3932
3933
3934
3935static inline bool netif_dormant(const struct net_device *dev)
3936{
3937 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3938}
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951static inline void netif_testing_on(struct net_device *dev)
3952{
3953 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
3954 linkwatch_fire_event(dev);
3955}
3956
3957
3958
3959
3960
3961
3962
3963static inline void netif_testing_off(struct net_device *dev)
3964{
3965 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
3966 linkwatch_fire_event(dev);
3967}
3968
3969
3970
3971
3972
3973
3974
3975static inline bool netif_testing(const struct net_device *dev)
3976{
3977 return test_bit(__LINK_STATE_TESTING, &dev->state);
3978}
3979
3980
3981
3982
3983
3984
3985
3986
3987static inline bool netif_oper_up(const struct net_device *dev)
3988{
3989 return (dev->operstate == IF_OPER_UP ||
3990 dev->operstate == IF_OPER_UNKNOWN );
3991}
3992
3993
3994
3995
3996
3997
3998
3999static inline bool netif_device_present(struct net_device *dev)
4000{
4001 return test_bit(__LINK_STATE_PRESENT, &dev->state);
4002}
4003
4004void netif_device_detach(struct net_device *dev);
4005
4006void netif_device_attach(struct net_device *dev);
4007
4008
4009
4010
4011
4012enum {
4013 NETIF_MSG_DRV_BIT,
4014 NETIF_MSG_PROBE_BIT,
4015 NETIF_MSG_LINK_BIT,
4016 NETIF_MSG_TIMER_BIT,
4017 NETIF_MSG_IFDOWN_BIT,
4018 NETIF_MSG_IFUP_BIT,
4019 NETIF_MSG_RX_ERR_BIT,
4020 NETIF_MSG_TX_ERR_BIT,
4021 NETIF_MSG_TX_QUEUED_BIT,
4022 NETIF_MSG_INTR_BIT,
4023 NETIF_MSG_TX_DONE_BIT,
4024 NETIF_MSG_RX_STATUS_BIT,
4025 NETIF_MSG_PKTDATA_BIT,
4026 NETIF_MSG_HW_BIT,
4027 NETIF_MSG_WOL_BIT,
4028
4029
4030
4031
4032 NETIF_MSG_CLASS_COUNT,
4033};
4034
4035static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4036
4037#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
4038#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4039
4040#define NETIF_MSG_DRV __NETIF_MSG(DRV)
4041#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
4042#define NETIF_MSG_LINK __NETIF_MSG(LINK)
4043#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
4044#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
4045#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
4046#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
4047#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
4048#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
4049#define NETIF_MSG_INTR __NETIF_MSG(INTR)
4050#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
4051#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
4052#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
4053#define NETIF_MSG_HW __NETIF_MSG(HW)
4054#define NETIF_MSG_WOL __NETIF_MSG(WOL)
4055
4056#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4057#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4058#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4059#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4060#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4061#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4062#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4063#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4064#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4065#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4066#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4067#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4068#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4069#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4070#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4071
4072static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4073{
4074
4075 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4076 return default_msg_enable_bits;
4077 if (debug_value == 0)
4078 return 0;
4079
4080 return (1U << debug_value) - 1;
4081}
4082
4083static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4084{
4085 spin_lock(&txq->_xmit_lock);
4086 txq->xmit_lock_owner = cpu;
4087}
4088
4089static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4090{
4091 __acquire(&txq->_xmit_lock);
4092 return true;
4093}
4094
4095static inline void __netif_tx_release(struct netdev_queue *txq)
4096{
4097 __release(&txq->_xmit_lock);
4098}
4099
4100static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4101{
4102 spin_lock_bh(&txq->_xmit_lock);
4103 txq->xmit_lock_owner = smp_processor_id();
4104}
4105
4106static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4107{
4108 bool ok = spin_trylock(&txq->_xmit_lock);
4109 if (likely(ok))
4110 txq->xmit_lock_owner = smp_processor_id();
4111 return ok;
4112}
4113
4114static inline void __netif_tx_unlock(struct netdev_queue *txq)
4115{
4116 txq->xmit_lock_owner = -1;
4117 spin_unlock(&txq->_xmit_lock);
4118}
4119
4120static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4121{
4122 txq->xmit_lock_owner = -1;
4123 spin_unlock_bh(&txq->_xmit_lock);
4124}
4125
4126static inline void txq_trans_update(struct netdev_queue *txq)
4127{
4128 if (txq->xmit_lock_owner != -1)
4129 txq->trans_start = jiffies;
4130}
4131
4132
4133static inline void netif_trans_update(struct net_device *dev)
4134{
4135 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4136
4137 if (txq->trans_start != jiffies)
4138 txq->trans_start = jiffies;
4139}
4140
4141
4142
4143
4144
4145
4146
4147static inline void netif_tx_lock(struct net_device *dev)
4148{
4149 unsigned int i;
4150 int cpu;
4151
4152 spin_lock(&dev->tx_global_lock);
4153 cpu = smp_processor_id();
4154 for (i = 0; i < dev->num_tx_queues; i++) {
4155 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4156
4157
4158
4159
4160
4161
4162
4163 __netif_tx_lock(txq, cpu);
4164 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
4165 __netif_tx_unlock(txq);
4166 }
4167}
4168
4169static inline void netif_tx_lock_bh(struct net_device *dev)
4170{
4171 local_bh_disable();
4172 netif_tx_lock(dev);
4173}
4174
4175static inline void netif_tx_unlock(struct net_device *dev)
4176{
4177 unsigned int i;
4178
4179 for (i = 0; i < dev->num_tx_queues; i++) {
4180 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4181
4182
4183
4184
4185
4186 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
4187 netif_schedule_queue(txq);
4188 }
4189 spin_unlock(&dev->tx_global_lock);
4190}
4191
4192static inline void netif_tx_unlock_bh(struct net_device *dev)
4193{
4194 netif_tx_unlock(dev);
4195 local_bh_enable();
4196}
4197
4198#define HARD_TX_LOCK(dev, txq, cpu) { \
4199 if ((dev->features & NETIF_F_LLTX) == 0) { \
4200 __netif_tx_lock(txq, cpu); \
4201 } else { \
4202 __netif_tx_acquire(txq); \
4203 } \
4204}
4205
4206#define HARD_TX_TRYLOCK(dev, txq) \
4207 (((dev->features & NETIF_F_LLTX) == 0) ? \
4208 __netif_tx_trylock(txq) : \
4209 __netif_tx_acquire(txq))
4210
4211#define HARD_TX_UNLOCK(dev, txq) { \
4212 if ((dev->features & NETIF_F_LLTX) == 0) { \
4213 __netif_tx_unlock(txq); \
4214 } else { \
4215 __netif_tx_release(txq); \
4216 } \
4217}
4218
4219static inline void netif_tx_disable(struct net_device *dev)
4220{
4221 unsigned int i;
4222 int cpu;
4223
4224 local_bh_disable();
4225 cpu = smp_processor_id();
4226 for (i = 0; i < dev->num_tx_queues; i++) {
4227 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4228
4229 __netif_tx_lock(txq, cpu);
4230 netif_tx_stop_queue(txq);
4231 __netif_tx_unlock(txq);
4232 }
4233 local_bh_enable();
4234}
4235
4236static inline void netif_addr_lock(struct net_device *dev)
4237{
4238 spin_lock(&dev->addr_list_lock);
4239}
4240
4241static inline void netif_addr_lock_nested(struct net_device *dev)
4242{
4243 spin_lock_nested(&dev->addr_list_lock, dev->lower_level);
4244}
4245
4246static inline void netif_addr_lock_bh(struct net_device *dev)
4247{
4248 spin_lock_bh(&dev->addr_list_lock);
4249}
4250
4251static inline void netif_addr_unlock(struct net_device *dev)
4252{
4253 spin_unlock(&dev->addr_list_lock);
4254}
4255
4256static inline void netif_addr_unlock_bh(struct net_device *dev)
4257{
4258 spin_unlock_bh(&dev->addr_list_lock);
4259}
4260
4261
4262
4263
4264
4265#define for_each_dev_addr(dev, ha) \
4266 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4267
4268
4269
4270void ether_setup(struct net_device *dev);
4271
4272
4273struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4274 unsigned char name_assign_type,
4275 void (*setup)(struct net_device *),
4276 unsigned int txqs, unsigned int rxqs);
4277#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4278 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4279
4280#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4281 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4282 count)
4283
4284int register_netdev(struct net_device *dev);
4285void unregister_netdev(struct net_device *dev);
4286
4287int devm_register_netdev(struct device *dev, struct net_device *ndev);
4288
4289
4290int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4291 struct netdev_hw_addr_list *from_list, int addr_len);
4292void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4293 struct netdev_hw_addr_list *from_list, int addr_len);
4294int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4295 struct net_device *dev,
4296 int (*sync)(struct net_device *, const unsigned char *),
4297 int (*unsync)(struct net_device *,
4298 const unsigned char *));
4299int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4300 struct net_device *dev,
4301 int (*sync)(struct net_device *,
4302 const unsigned char *, int),
4303 int (*unsync)(struct net_device *,
4304 const unsigned char *, int));
4305void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4306 struct net_device *dev,
4307 int (*unsync)(struct net_device *,
4308 const unsigned char *, int));
4309void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4310 struct net_device *dev,
4311 int (*unsync)(struct net_device *,
4312 const unsigned char *));
4313void __hw_addr_init(struct netdev_hw_addr_list *list);
4314
4315
4316int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4317 unsigned char addr_type);
4318int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4319 unsigned char addr_type);
4320void dev_addr_flush(struct net_device *dev);
4321int dev_addr_init(struct net_device *dev);
4322
4323
4324int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4325int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4326int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4327int dev_uc_sync(struct net_device *to, struct net_device *from);
4328int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4329void dev_uc_unsync(struct net_device *to, struct net_device *from);
4330void dev_uc_flush(struct net_device *dev);
4331void dev_uc_init(struct net_device *dev);
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342static inline int __dev_uc_sync(struct net_device *dev,
4343 int (*sync)(struct net_device *,
4344 const unsigned char *),
4345 int (*unsync)(struct net_device *,
4346 const unsigned char *))
4347{
4348 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4349}
4350
4351
4352
4353
4354
4355
4356
4357
4358static inline void __dev_uc_unsync(struct net_device *dev,
4359 int (*unsync)(struct net_device *,
4360 const unsigned char *))
4361{
4362 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4363}
4364
4365
4366int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4367int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4368int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4369int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4370int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4371int dev_mc_sync(struct net_device *to, struct net_device *from);
4372int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4373void dev_mc_unsync(struct net_device *to, struct net_device *from);
4374void dev_mc_flush(struct net_device *dev);
4375void dev_mc_init(struct net_device *dev);
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386static inline int __dev_mc_sync(struct net_device *dev,
4387 int (*sync)(struct net_device *,
4388 const unsigned char *),
4389 int (*unsync)(struct net_device *,
4390 const unsigned char *))
4391{
4392 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4393}
4394
4395
4396
4397
4398
4399
4400
4401
4402static inline void __dev_mc_unsync(struct net_device *dev,
4403 int (*unsync)(struct net_device *,
4404 const unsigned char *))
4405{
4406 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4407}
4408
4409
4410void dev_set_rx_mode(struct net_device *dev);
4411void __dev_set_rx_mode(struct net_device *dev);
4412int dev_set_promiscuity(struct net_device *dev, int inc);
4413int dev_set_allmulti(struct net_device *dev, int inc);
4414void netdev_state_change(struct net_device *dev);
4415void netdev_notify_peers(struct net_device *dev);
4416void netdev_features_change(struct net_device *dev);
4417
4418void dev_load(struct net *net, const char *name);
4419struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4420 struct rtnl_link_stats64 *storage);
4421void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4422 const struct net_device_stats *netdev_stats);
4423
4424extern int netdev_max_backlog;
4425extern int netdev_tstamp_prequeue;
4426extern int weight_p;
4427extern int dev_weight_rx_bias;
4428extern int dev_weight_tx_bias;
4429extern int dev_rx_weight;
4430extern int dev_tx_weight;
4431extern int gro_normal_batch;
4432
4433bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4434struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4435 struct list_head **iter);
4436struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4437 struct list_head **iter);
4438
4439
4440#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4441 for (iter = &(dev)->adj_list.upper, \
4442 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4443 updev; \
4444 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4445
4446int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4447 int (*fn)(struct net_device *upper_dev,
4448 void *data),
4449 void *data);
4450
4451bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4452 struct net_device *upper_dev);
4453
4454bool netdev_has_any_upper_dev(struct net_device *dev);
4455
4456void *netdev_lower_get_next_private(struct net_device *dev,
4457 struct list_head **iter);
4458void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4459 struct list_head **iter);
4460
4461#define netdev_for_each_lower_private(dev, priv, iter) \
4462 for (iter = (dev)->adj_list.lower.next, \
4463 priv = netdev_lower_get_next_private(dev, &(iter)); \
4464 priv; \
4465 priv = netdev_lower_get_next_private(dev, &(iter)))
4466
4467#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4468 for (iter = &(dev)->adj_list.lower, \
4469 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4470 priv; \
4471 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4472
4473void *netdev_lower_get_next(struct net_device *dev,
4474 struct list_head **iter);
4475
4476#define netdev_for_each_lower_dev(dev, ldev, iter) \
4477 for (iter = (dev)->adj_list.lower.next, \
4478 ldev = netdev_lower_get_next(dev, &(iter)); \
4479 ldev; \
4480 ldev = netdev_lower_get_next(dev, &(iter)))
4481
4482struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
4483 struct list_head **iter);
4484int netdev_walk_all_lower_dev(struct net_device *dev,
4485 int (*fn)(struct net_device *lower_dev,
4486 void *data),
4487 void *data);
4488int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4489 int (*fn)(struct net_device *lower_dev,
4490 void *data),
4491 void *data);
4492
4493void *netdev_adjacent_get_private(struct list_head *adj_list);
4494void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4495struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4496struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4497int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4498 struct netlink_ext_ack *extack);
4499int netdev_master_upper_dev_link(struct net_device *dev,
4500 struct net_device *upper_dev,
4501 void *upper_priv, void *upper_info,
4502 struct netlink_ext_ack *extack);
4503void netdev_upper_dev_unlink(struct net_device *dev,
4504 struct net_device *upper_dev);
4505int netdev_adjacent_change_prepare(struct net_device *old_dev,
4506 struct net_device *new_dev,
4507 struct net_device *dev,
4508 struct netlink_ext_ack *extack);
4509void netdev_adjacent_change_commit(struct net_device *old_dev,
4510 struct net_device *new_dev,
4511 struct net_device *dev);
4512void netdev_adjacent_change_abort(struct net_device *old_dev,
4513 struct net_device *new_dev,
4514 struct net_device *dev);
4515void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4516void *netdev_lower_dev_get_private(struct net_device *dev,
4517 struct net_device *lower_dev);
4518void netdev_lower_state_changed(struct net_device *lower_dev,
4519 void *lower_state_info);
4520
4521
4522#define NETDEV_RSS_KEY_LEN 52
4523extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4524void netdev_rss_key_fill(void *buffer, size_t len);
4525
4526int skb_checksum_help(struct sk_buff *skb);
4527int skb_crc32c_csum_help(struct sk_buff *skb);
4528int skb_csum_hwoffload_help(struct sk_buff *skb,
4529 const netdev_features_t features);
4530
4531struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4532 netdev_features_t features, bool tx_path);
4533struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4534 netdev_features_t features);
4535
4536struct netdev_bonding_info {
4537 ifslave slave;
4538 ifbond master;
4539};
4540
4541struct netdev_notifier_bonding_info {
4542 struct netdev_notifier_info info;
4543 struct netdev_bonding_info bonding_info;
4544};
4545
4546void netdev_bonding_info_change(struct net_device *dev,
4547 struct netdev_bonding_info *bonding_info);
4548
4549#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
4550void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
4551#else
4552static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
4553 const void *data)
4554{
4555}
4556#endif
4557
4558static inline
4559struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4560{
4561 return __skb_gso_segment(skb, features, true);
4562}
4563__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4564
4565static inline bool can_checksum_protocol(netdev_features_t features,
4566 __be16 protocol)
4567{
4568 if (protocol == htons(ETH_P_FCOE))
4569 return !!(features & NETIF_F_FCOE_CRC);
4570
4571
4572
4573 if (features & NETIF_F_HW_CSUM) {
4574
4575 return true;
4576 }
4577
4578 switch (protocol) {
4579 case htons(ETH_P_IP):
4580 return !!(features & NETIF_F_IP_CSUM);
4581 case htons(ETH_P_IPV6):
4582 return !!(features & NETIF_F_IPV6_CSUM);
4583 default:
4584 return false;
4585 }
4586}
4587
4588#ifdef CONFIG_BUG
4589void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4590#else
4591static inline void netdev_rx_csum_fault(struct net_device *dev,
4592 struct sk_buff *skb)
4593{
4594}
4595#endif
4596
4597void net_enable_timestamp(void);
4598void net_disable_timestamp(void);
4599
4600#ifdef CONFIG_PROC_FS
4601int __init dev_proc_init(void);
4602#else
4603#define dev_proc_init() 0
4604#endif
4605
4606static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
4607 struct sk_buff *skb, struct net_device *dev,
4608 bool more)
4609{
4610 __this_cpu_write(softnet_data.xmit.more, more);
4611 return ops->ndo_start_xmit(skb, dev);
4612}
4613
4614static inline bool netdev_xmit_more(void)
4615{
4616 return __this_cpu_read(softnet_data.xmit.more);
4617}
4618
4619static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4620 struct netdev_queue *txq, bool more)
4621{
4622 const struct net_device_ops *ops = dev->netdev_ops;
4623 netdev_tx_t rc;
4624
4625 rc = __netdev_start_xmit(ops, skb, dev, more);
4626 if (rc == NETDEV_TX_OK)
4627 txq_trans_update(txq);
4628
4629 return rc;
4630}
4631
4632int netdev_class_create_file_ns(const struct class_attribute *class_attr,
4633 const void *ns);
4634void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
4635 const void *ns);
4636
4637static inline int netdev_class_create_file(const struct class_attribute *class_attr)
4638{
4639 return netdev_class_create_file_ns(class_attr, NULL);
4640}
4641
4642static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
4643{
4644 netdev_class_remove_file_ns(class_attr, NULL);
4645}
4646
4647extern const struct kobj_ns_type_operations net_ns_type_operations;
4648
4649const char *netdev_drivername(const struct net_device *dev);
4650
4651void linkwatch_run_queue(void);
4652
4653static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4654 netdev_features_t f2)
4655{
4656 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4657 if (f1 & NETIF_F_HW_CSUM)
4658 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4659 else
4660 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4661 }
4662
4663 return f1 & f2;
4664}
4665
4666static inline netdev_features_t netdev_get_wanted_features(
4667 struct net_device *dev)
4668{
4669 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4670}
4671netdev_features_t netdev_increment_features(netdev_features_t all,
4672 netdev_features_t one, netdev_features_t mask);
4673
4674
4675
4676
4677
4678static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4679 netdev_features_t mask)
4680{
4681 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4682}
4683
4684int __netdev_update_features(struct net_device *dev);
4685void netdev_update_features(struct net_device *dev);
4686void netdev_change_features(struct net_device *dev);
4687
4688void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4689 struct net_device *dev);
4690
4691netdev_features_t passthru_features_check(struct sk_buff *skb,
4692 struct net_device *dev,
4693 netdev_features_t features);
4694netdev_features_t netif_skb_features(struct sk_buff *skb);
4695
4696static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4697{
4698 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4699
4700
4701 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4702 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4703 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4704 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4705 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4706 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4707 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4708 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4709 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4710 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4711 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4712 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4713 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4714 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4715 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4716 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
4717 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
4718 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
4719 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
4720
4721 return (features & feature) == feature;
4722}
4723
4724static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4725{
4726 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4727 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4728}
4729
4730static inline bool netif_needs_gso(struct sk_buff *skb,
4731 netdev_features_t features)
4732{
4733 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4734 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4735 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4736}
4737
4738static inline void netif_set_gso_max_size(struct net_device *dev,
4739 unsigned int size)
4740{
4741 dev->gso_max_size = size;
4742}
4743
4744static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4745 int pulled_hlen, u16 mac_offset,
4746 int mac_len)
4747{
4748 skb->protocol = protocol;
4749 skb->encapsulation = 1;
4750 skb_push(skb, pulled_hlen);
4751 skb_reset_transport_header(skb);
4752 skb->mac_header = mac_offset;
4753 skb->network_header = skb->mac_header + mac_len;
4754 skb->mac_len = mac_len;
4755}
4756
4757static inline bool netif_is_macsec(const struct net_device *dev)
4758{
4759 return dev->priv_flags & IFF_MACSEC;
4760}
4761
4762static inline bool netif_is_macvlan(const struct net_device *dev)
4763{
4764 return dev->priv_flags & IFF_MACVLAN;
4765}
4766
4767static inline bool netif_is_macvlan_port(const struct net_device *dev)
4768{
4769 return dev->priv_flags & IFF_MACVLAN_PORT;
4770}
4771
4772static inline bool netif_is_bond_master(const struct net_device *dev)
4773{
4774 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4775}
4776
4777static inline bool netif_is_bond_slave(const struct net_device *dev)
4778{
4779 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4780}
4781
4782static inline bool netif_supports_nofcs(struct net_device *dev)
4783{
4784 return dev->priv_flags & IFF_SUPP_NOFCS;
4785}
4786
4787static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
4788{
4789 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
4790}
4791
4792static inline bool netif_is_l3_master(const struct net_device *dev)
4793{
4794 return dev->priv_flags & IFF_L3MDEV_MASTER;
4795}
4796
4797static inline bool netif_is_l3_slave(const struct net_device *dev)
4798{
4799 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4800}
4801
4802static inline bool netif_is_bridge_master(const struct net_device *dev)
4803{
4804 return dev->priv_flags & IFF_EBRIDGE;
4805}
4806
4807static inline bool netif_is_bridge_port(const struct net_device *dev)
4808{
4809 return dev->priv_flags & IFF_BRIDGE_PORT;
4810}
4811
4812static inline bool netif_is_ovs_master(const struct net_device *dev)
4813{
4814 return dev->priv_flags & IFF_OPENVSWITCH;
4815}
4816
4817static inline bool netif_is_ovs_port(const struct net_device *dev)
4818{
4819 return dev->priv_flags & IFF_OVS_DATAPATH;
4820}
4821
4822static inline bool netif_is_team_master(const struct net_device *dev)
4823{
4824 return dev->priv_flags & IFF_TEAM;
4825}
4826
4827static inline bool netif_is_team_port(const struct net_device *dev)
4828{
4829 return dev->priv_flags & IFF_TEAM_PORT;
4830}
4831
4832static inline bool netif_is_lag_master(const struct net_device *dev)
4833{
4834 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4835}
4836
4837static inline bool netif_is_lag_port(const struct net_device *dev)
4838{
4839 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4840}
4841
4842static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4843{
4844 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4845}
4846
4847static inline bool netif_is_failover(const struct net_device *dev)
4848{
4849 return dev->priv_flags & IFF_FAILOVER;
4850}
4851
4852static inline bool netif_is_failover_slave(const struct net_device *dev)
4853{
4854 return dev->priv_flags & IFF_FAILOVER_SLAVE;
4855}
4856
4857
4858static inline void netif_keep_dst(struct net_device *dev)
4859{
4860 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4861}
4862
4863
4864static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4865{
4866
4867 return dev->priv_flags & IFF_MACSEC;
4868}
4869
4870extern struct pernet_operations __net_initdata loopback_net_ops;
4871
4872
4873
4874
4875
4876static inline const char *netdev_name(const struct net_device *dev)
4877{
4878 if (!dev->name[0] || strchr(dev->name, '%'))
4879 return "(unnamed net_device)";
4880 return dev->name;
4881}
4882
4883static inline bool netdev_unregistering(const struct net_device *dev)
4884{
4885 return dev->reg_state == NETREG_UNREGISTERING;
4886}
4887
4888static inline const char *netdev_reg_state(const struct net_device *dev)
4889{
4890 switch (dev->reg_state) {
4891 case NETREG_UNINITIALIZED: return " (uninitialized)";
4892 case NETREG_REGISTERED: return "";
4893 case NETREG_UNREGISTERING: return " (unregistering)";
4894 case NETREG_UNREGISTERED: return " (unregistered)";
4895 case NETREG_RELEASED: return " (released)";
4896 case NETREG_DUMMY: return " (dummy)";
4897 }
4898
4899 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4900 return " (unknown)";
4901}
4902
4903__printf(3, 4) __cold
4904void netdev_printk(const char *level, const struct net_device *dev,
4905 const char *format, ...);
4906__printf(2, 3) __cold
4907void netdev_emerg(const struct net_device *dev, const char *format, ...);
4908__printf(2, 3) __cold
4909void netdev_alert(const struct net_device *dev, const char *format, ...);
4910__printf(2, 3) __cold
4911void netdev_crit(const struct net_device *dev, const char *format, ...);
4912__printf(2, 3) __cold
4913void netdev_err(const struct net_device *dev, const char *format, ...);
4914__printf(2, 3) __cold
4915void netdev_warn(const struct net_device *dev, const char *format, ...);
4916__printf(2, 3) __cold
4917void netdev_notice(const struct net_device *dev, const char *format, ...);
4918__printf(2, 3) __cold
4919void netdev_info(const struct net_device *dev, const char *format, ...);
4920
4921#define netdev_level_once(level, dev, fmt, ...) \
4922do { \
4923 static bool __print_once __read_mostly; \
4924 \
4925 if (!__print_once) { \
4926 __print_once = true; \
4927 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
4928 } \
4929} while (0)
4930
4931#define netdev_emerg_once(dev, fmt, ...) \
4932 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
4933#define netdev_alert_once(dev, fmt, ...) \
4934 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
4935#define netdev_crit_once(dev, fmt, ...) \
4936 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
4937#define netdev_err_once(dev, fmt, ...) \
4938 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
4939#define netdev_warn_once(dev, fmt, ...) \
4940 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
4941#define netdev_notice_once(dev, fmt, ...) \
4942 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
4943#define netdev_info_once(dev, fmt, ...) \
4944 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
4945
4946#define MODULE_ALIAS_NETDEV(device) \
4947 MODULE_ALIAS("netdev-" device)
4948
4949#if defined(CONFIG_DYNAMIC_DEBUG) || \
4950 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
4951#define netdev_dbg(__dev, format, args...) \
4952do { \
4953 dynamic_netdev_dbg(__dev, format, ##args); \
4954} while (0)
4955#elif defined(DEBUG)
4956#define netdev_dbg(__dev, format, args...) \
4957 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4958#else
4959#define netdev_dbg(__dev, format, args...) \
4960({ \
4961 if (0) \
4962 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4963})
4964#endif
4965
4966#if defined(VERBOSE_DEBUG)
4967#define netdev_vdbg netdev_dbg
4968#else
4969
4970#define netdev_vdbg(dev, format, args...) \
4971({ \
4972 if (0) \
4973 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4974 0; \
4975})
4976#endif
4977
4978
4979
4980
4981
4982
4983#define netdev_WARN(dev, format, args...) \
4984 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
4985 netdev_reg_state(dev), ##args)
4986
4987#define netdev_WARN_ONCE(dev, format, args...) \
4988 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
4989 netdev_reg_state(dev), ##args)
4990
4991
4992
4993#define netif_printk(priv, type, level, dev, fmt, args...) \
4994do { \
4995 if (netif_msg_##type(priv)) \
4996 netdev_printk(level, (dev), fmt, ##args); \
4997} while (0)
4998
4999#define netif_level(level, priv, type, dev, fmt, args...) \
5000do { \
5001 if (netif_msg_##type(priv)) \
5002 netdev_##level(dev, fmt, ##args); \
5003} while (0)
5004
5005#define netif_emerg(priv, type, dev, fmt, args...) \
5006 netif_level(emerg, priv, type, dev, fmt, ##args)
5007#define netif_alert(priv, type, dev, fmt, args...) \
5008 netif_level(alert, priv, type, dev, fmt, ##args)
5009#define netif_crit(priv, type, dev, fmt, args...) \
5010 netif_level(crit, priv, type, dev, fmt, ##args)
5011#define netif_err(priv, type, dev, fmt, args...) \
5012 netif_level(err, priv, type, dev, fmt, ##args)
5013#define netif_warn(priv, type, dev, fmt, args...) \
5014 netif_level(warn, priv, type, dev, fmt, ##args)
5015#define netif_notice(priv, type, dev, fmt, args...) \
5016 netif_level(notice, priv, type, dev, fmt, ##args)
5017#define netif_info(priv, type, dev, fmt, args...) \
5018 netif_level(info, priv, type, dev, fmt, ##args)
5019
5020#if defined(CONFIG_DYNAMIC_DEBUG) || \
5021 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
5022#define netif_dbg(priv, type, netdev, format, args...) \
5023do { \
5024 if (netif_msg_##type(priv)) \
5025 dynamic_netdev_dbg(netdev, format, ##args); \
5026} while (0)
5027#elif defined(DEBUG)
5028#define netif_dbg(priv, type, dev, format, args...) \
5029 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
5030#else
5031#define netif_dbg(priv, type, dev, format, args...) \
5032({ \
5033 if (0) \
5034 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5035 0; \
5036})
5037#endif
5038
5039
5040#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
5041 do { \
5042 if (cond) \
5043 netif_dbg(priv, type, netdev, fmt, ##args); \
5044 else \
5045 netif_ ## level(priv, type, netdev, fmt, ##args); \
5046 } while (0)
5047
5048#if defined(VERBOSE_DEBUG)
5049#define netif_vdbg netif_dbg
5050#else
5051#define netif_vdbg(priv, type, dev, format, args...) \
5052({ \
5053 if (0) \
5054 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5055 0; \
5056})
5057#endif
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078#define PTYPE_HASH_SIZE (16)
5079#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5080
5081extern struct net_device *blackhole_netdev;
5082
5083#endif
5084