1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef _LINUX_NETDEVICE_H
22#define _LINUX_NETDEVICE_H
23
24#include <linux/timer.h>
25#include <linux/bug.h>
26#include <linux/delay.h>
27#include <linux/atomic.h>
28#include <linux/prefetch.h>
29#include <asm/cache.h>
30#include <asm/byteorder.h>
31
32#include <linux/percpu.h>
33#include <linux/rculist.h>
34#include <linux/workqueue.h>
35#include <linux/dynamic_queue_limits.h>
36
37#include <linux/ethtool.h>
38#include <net/net_namespace.h>
39#ifdef CONFIG_DCB
40#include <net/dcbnl.h>
41#endif
42#include <net/netprio_cgroup.h>
43#include <net/xdp.h>
44
45#include <linux/netdev_features.h>
46#include <linux/neighbour.h>
47#include <uapi/linux/netdevice.h>
48#include <uapi/linux/if_bonding.h>
49#include <uapi/linux/pkt_cls.h>
50#include <linux/hashtable.h>
51
52struct netpoll_info;
53struct device;
54struct phy_device;
55struct dsa_port;
56
57struct sfp_bus;
58
59struct wireless_dev;
60
61struct wpan_dev;
62struct mpls_dev;
63
64struct udp_tunnel_info;
65struct bpf_prog;
66struct xdp_buff;
67
68void netdev_set_default_ethtool_ops(struct net_device *dev,
69 const struct ethtool_ops *ops);
70
71
72#define NET_RX_SUCCESS 0
73#define NET_RX_DROP 1
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93#define NET_XMIT_SUCCESS 0x00
94#define NET_XMIT_DROP 0x01
95#define NET_XMIT_CN 0x02
96#define NET_XMIT_MASK 0x0f
97
98
99
100
101#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
102#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
103
104
105#define NETDEV_TX_MASK 0xf0
106
107enum netdev_tx {
108 __NETDEV_TX_MIN = INT_MIN,
109 NETDEV_TX_OK = 0x00,
110 NETDEV_TX_BUSY = 0x10,
111};
112typedef enum netdev_tx netdev_tx_t;
113
114
115
116
117
118static inline bool dev_xmit_complete(int rc)
119{
120
121
122
123
124
125
126 if (likely(rc < NET_XMIT_MASK))
127 return true;
128
129 return false;
130}
131
132
133
134
135
136
137#if defined(CONFIG_HYPERV_NET)
138# define LL_MAX_HEADER 128
139#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
140# if defined(CONFIG_MAC80211_MESH)
141# define LL_MAX_HEADER 128
142# else
143# define LL_MAX_HEADER 96
144# endif
145#else
146# define LL_MAX_HEADER 32
147#endif
148
149#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
150 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
151#define MAX_HEADER LL_MAX_HEADER
152#else
153#define MAX_HEADER (LL_MAX_HEADER + 48)
154#endif
155
156
157
158
159
160
161struct net_device_stats {
162 unsigned long rx_packets;
163 unsigned long tx_packets;
164 unsigned long rx_bytes;
165 unsigned long tx_bytes;
166 unsigned long rx_errors;
167 unsigned long tx_errors;
168 unsigned long rx_dropped;
169 unsigned long tx_dropped;
170 unsigned long multicast;
171 unsigned long collisions;
172 unsigned long rx_length_errors;
173 unsigned long rx_over_errors;
174 unsigned long rx_crc_errors;
175 unsigned long rx_frame_errors;
176 unsigned long rx_fifo_errors;
177 unsigned long rx_missed_errors;
178 unsigned long tx_aborted_errors;
179 unsigned long tx_carrier_errors;
180 unsigned long tx_fifo_errors;
181 unsigned long tx_heartbeat_errors;
182 unsigned long tx_window_errors;
183 unsigned long rx_compressed;
184 unsigned long tx_compressed;
185};
186
187
188#include <linux/cache.h>
189#include <linux/skbuff.h>
190
191#ifdef CONFIG_RPS
192#include <linux/static_key.h>
193extern struct static_key_false rps_needed;
194extern struct static_key_false rfs_needed;
195#endif
196
197struct neighbour;
198struct neigh_parms;
199struct sk_buff;
200
201struct netdev_hw_addr {
202 struct list_head list;
203 unsigned char addr[MAX_ADDR_LEN];
204 unsigned char type;
205#define NETDEV_HW_ADDR_T_LAN 1
206#define NETDEV_HW_ADDR_T_SAN 2
207#define NETDEV_HW_ADDR_T_SLAVE 3
208#define NETDEV_HW_ADDR_T_UNICAST 4
209#define NETDEV_HW_ADDR_T_MULTICAST 5
210 bool global_use;
211 int sync_cnt;
212 int refcount;
213 int synced;
214 struct rcu_head rcu_head;
215};
216
217struct netdev_hw_addr_list {
218 struct list_head list;
219 int count;
220};
221
222#define netdev_hw_addr_list_count(l) ((l)->count)
223#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
224#define netdev_hw_addr_list_for_each(ha, l) \
225 list_for_each_entry(ha, &(l)->list, list)
226
227#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
228#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
229#define netdev_for_each_uc_addr(ha, dev) \
230 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
231
232#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
233#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
234#define netdev_for_each_mc_addr(ha, dev) \
235 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
236
237struct hh_cache {
238 unsigned int hh_len;
239 seqlock_t hh_lock;
240
241
242#define HH_DATA_MOD 16
243#define HH_DATA_OFF(__len) \
244 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
245#define HH_DATA_ALIGN(__len) \
246 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
247 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
248};
249
250
251
252
253
254
255
256
257
258#define LL_RESERVED_SPACE(dev) \
259 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
260#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262
263struct header_ops {
264 int (*create) (struct sk_buff *skb, struct net_device *dev,
265 unsigned short type, const void *daddr,
266 const void *saddr, unsigned int len);
267 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
268 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
269 void (*cache_update)(struct hh_cache *hh,
270 const struct net_device *dev,
271 const unsigned char *haddr);
272 bool (*validate)(const char *ll_header, unsigned int len);
273 __be16 (*parse_protocol)(const struct sk_buff *skb);
274};
275
276
277
278
279
280
281enum netdev_state_t {
282 __LINK_STATE_START,
283 __LINK_STATE_PRESENT,
284 __LINK_STATE_NOCARRIER,
285 __LINK_STATE_LINKWATCH_PENDING,
286 __LINK_STATE_DORMANT,
287};
288
289
290
291
292
293
294struct netdev_boot_setup {
295 char name[IFNAMSIZ];
296 struct ifmap map;
297};
298#define NETDEV_BOOT_SETUP_MAX 8
299
300int __init netdev_boot_setup(char *str);
301
302struct gro_list {
303 struct list_head list;
304 int count;
305};
306
307
308
309
310
311#define GRO_HASH_BUCKETS 8
312
313
314
315
316struct napi_struct {
317
318
319
320
321
322
323 struct list_head poll_list;
324
325 unsigned long state;
326 int weight;
327 unsigned long gro_bitmask;
328 int (*poll)(struct napi_struct *, int);
329#ifdef CONFIG_NETPOLL
330 int poll_owner;
331#endif
332 struct net_device *dev;
333 struct gro_list gro_hash[GRO_HASH_BUCKETS];
334 struct sk_buff *skb;
335 struct hrtimer timer;
336 struct list_head dev_list;
337 struct hlist_node napi_hash_node;
338 unsigned int napi_id;
339};
340
341enum {
342 NAPI_STATE_SCHED,
343 NAPI_STATE_MISSED,
344 NAPI_STATE_DISABLE,
345 NAPI_STATE_NPSVC,
346 NAPI_STATE_HASHED,
347 NAPI_STATE_NO_BUSY_POLL,
348 NAPI_STATE_IN_BUSY_POLL,
349};
350
351enum {
352 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
353 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
354 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
355 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
356 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
357 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
358 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
359};
360
361enum gro_result {
362 GRO_MERGED,
363 GRO_MERGED_FREE,
364 GRO_HELD,
365 GRO_NORMAL,
366 GRO_DROP,
367 GRO_CONSUMED,
368};
369typedef enum gro_result gro_result_t;
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412enum rx_handler_result {
413 RX_HANDLER_CONSUMED,
414 RX_HANDLER_ANOTHER,
415 RX_HANDLER_EXACT,
416 RX_HANDLER_PASS,
417};
418typedef enum rx_handler_result rx_handler_result_t;
419typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
420
421void __napi_schedule(struct napi_struct *n);
422void __napi_schedule_irqoff(struct napi_struct *n);
423
424static inline bool napi_disable_pending(struct napi_struct *n)
425{
426 return test_bit(NAPI_STATE_DISABLE, &n->state);
427}
428
429bool napi_schedule_prep(struct napi_struct *n);
430
431
432
433
434
435
436
437
438static inline void napi_schedule(struct napi_struct *n)
439{
440 if (napi_schedule_prep(n))
441 __napi_schedule(n);
442}
443
444
445
446
447
448
449
450static inline void napi_schedule_irqoff(struct napi_struct *n)
451{
452 if (napi_schedule_prep(n))
453 __napi_schedule_irqoff(n);
454}
455
456
457static inline bool napi_reschedule(struct napi_struct *napi)
458{
459 if (napi_schedule_prep(napi)) {
460 __napi_schedule(napi);
461 return true;
462 }
463 return false;
464}
465
466bool napi_complete_done(struct napi_struct *n, int work_done);
467
468
469
470
471
472
473
474
475static inline bool napi_complete(struct napi_struct *n)
476{
477 return napi_complete_done(n, 0);
478}
479
480
481
482
483
484
485
486
487
488
489
490
491
492bool napi_hash_del(struct napi_struct *napi);
493
494
495
496
497
498
499
500
501void napi_disable(struct napi_struct *n);
502
503
504
505
506
507
508
509
510static inline void napi_enable(struct napi_struct *n)
511{
512 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
513 smp_mb__before_atomic();
514 clear_bit(NAPI_STATE_SCHED, &n->state);
515 clear_bit(NAPI_STATE_NPSVC, &n->state);
516}
517
518
519
520
521
522
523
524
525
526static inline void napi_synchronize(const struct napi_struct *n)
527{
528 if (IS_ENABLED(CONFIG_SMP))
529 while (test_bit(NAPI_STATE_SCHED, &n->state))
530 msleep(1);
531 else
532 barrier();
533}
534
535
536
537
538
539
540
541
542
543static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
544{
545 unsigned long val, new;
546
547 do {
548 val = READ_ONCE(n->state);
549 if (val & NAPIF_STATE_DISABLE)
550 return true;
551
552 if (!(val & NAPIF_STATE_SCHED))
553 return false;
554
555 new = val | NAPIF_STATE_MISSED;
556 } while (cmpxchg(&n->state, val, new) != val);
557
558 return true;
559}
560
561enum netdev_queue_state_t {
562 __QUEUE_STATE_DRV_XOFF,
563 __QUEUE_STATE_STACK_XOFF,
564 __QUEUE_STATE_FROZEN,
565};
566
567#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
568#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
569#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
570
571#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
572#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
573 QUEUE_STATE_FROZEN)
574#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
575 QUEUE_STATE_FROZEN)
576
577
578
579
580
581
582
583
584
585
586
587struct netdev_queue {
588
589
590
591 struct net_device *dev;
592 struct Qdisc __rcu *qdisc;
593 struct Qdisc *qdisc_sleeping;
594#ifdef CONFIG_SYSFS
595 struct kobject kobj;
596#endif
597#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
598 int numa_node;
599#endif
600 unsigned long tx_maxrate;
601
602
603
604
605 unsigned long trans_timeout;
606
607
608 struct net_device *sb_dev;
609#ifdef CONFIG_XDP_SOCKETS
610 struct xdp_umem *umem;
611#endif
612
613
614
615 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
616 int xmit_lock_owner;
617
618
619
620 unsigned long trans_start;
621
622 unsigned long state;
623
624#ifdef CONFIG_BQL
625 struct dql dql;
626#endif
627} ____cacheline_aligned_in_smp;
628
629extern int sysctl_fb_tunnels_only_for_init_net;
630extern int sysctl_devconf_inherit_init_net;
631
632static inline bool net_has_fallback_tunnels(const struct net *net)
633{
634 return net == &init_net ||
635 !IS_ENABLED(CONFIG_SYSCTL) ||
636 !sysctl_fb_tunnels_only_for_init_net;
637}
638
639static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
640{
641#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
642 return q->numa_node;
643#else
644 return NUMA_NO_NODE;
645#endif
646}
647
648static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
649{
650#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
651 q->numa_node = node;
652#endif
653}
654
655#ifdef CONFIG_RPS
656
657
658
659
660struct rps_map {
661 unsigned int len;
662 struct rcu_head rcu;
663 u16 cpus[0];
664};
665#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
666
667
668
669
670
671
672struct rps_dev_flow {
673 u16 cpu;
674 u16 filter;
675 unsigned int last_qtail;
676};
677#define RPS_NO_FILTER 0xffff
678
679
680
681
682struct rps_dev_flow_table {
683 unsigned int mask;
684 struct rcu_head rcu;
685 struct rps_dev_flow flows[0];
686};
687#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
688 ((_num) * sizeof(struct rps_dev_flow)))
689
690
691
692
693
694
695
696
697
698
699
700struct rps_sock_flow_table {
701 u32 mask;
702
703 u32 ents[0] ____cacheline_aligned_in_smp;
704};
705#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
706
707#define RPS_NO_CPU 0xffff
708
709extern u32 rps_cpu_mask;
710extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
711
712static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
713 u32 hash)
714{
715 if (table && hash) {
716 unsigned int index = hash & table->mask;
717 u32 val = hash & ~rps_cpu_mask;
718
719
720 val |= raw_smp_processor_id();
721
722 if (table->ents[index] != val)
723 table->ents[index] = val;
724 }
725}
726
727#ifdef CONFIG_RFS_ACCEL
728bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
729 u16 filter_id);
730#endif
731#endif
732
733
734struct netdev_rx_queue {
735#ifdef CONFIG_RPS
736 struct rps_map __rcu *rps_map;
737 struct rps_dev_flow_table __rcu *rps_flow_table;
738#endif
739 struct kobject kobj;
740 struct net_device *dev;
741 struct xdp_rxq_info xdp_rxq;
742#ifdef CONFIG_XDP_SOCKETS
743 struct xdp_umem *umem;
744#endif
745} ____cacheline_aligned_in_smp;
746
747
748
749
750struct rx_queue_attribute {
751 struct attribute attr;
752 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
753 ssize_t (*store)(struct netdev_rx_queue *queue,
754 const char *buf, size_t len);
755};
756
757#ifdef CONFIG_XPS
758
759
760
761
762struct xps_map {
763 unsigned int len;
764 unsigned int alloc_len;
765 struct rcu_head rcu;
766 u16 queues[0];
767};
768#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
769#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
770 - sizeof(struct xps_map)) / sizeof(u16))
771
772
773
774
775struct xps_dev_maps {
776 struct rcu_head rcu;
777 struct xps_map __rcu *attr_map[0];
778};
779
780#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
781 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
782
783#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
784 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
785
786#endif
787
788#define TC_MAX_QUEUE 16
789#define TC_BITMASK 15
790
791struct netdev_tc_txq {
792 u16 count;
793 u16 offset;
794};
795
796#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
797
798
799
800
801struct netdev_fcoe_hbainfo {
802 char manufacturer[64];
803 char serial_number[64];
804 char hardware_version[64];
805 char driver_version[64];
806 char optionrom_version[64];
807 char firmware_version[64];
808 char model[256];
809 char model_description[256];
810};
811#endif
812
813#define MAX_PHYS_ITEM_ID_LEN 32
814
815
816
817
818struct netdev_phys_item_id {
819 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
820 unsigned char id_len;
821};
822
823static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
824 struct netdev_phys_item_id *b)
825{
826 return a->id_len == b->id_len &&
827 memcmp(a->id, b->id, a->id_len) == 0;
828}
829
830typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
831 struct sk_buff *skb,
832 struct net_device *sb_dev);
833
834enum tc_setup_type {
835 TC_SETUP_QDISC_MQPRIO,
836 TC_SETUP_CLSU32,
837 TC_SETUP_CLSFLOWER,
838 TC_SETUP_CLSMATCHALL,
839 TC_SETUP_CLSBPF,
840 TC_SETUP_BLOCK,
841 TC_SETUP_QDISC_CBS,
842 TC_SETUP_QDISC_RED,
843 TC_SETUP_QDISC_PRIO,
844 TC_SETUP_QDISC_MQ,
845 TC_SETUP_QDISC_ETF,
846 TC_SETUP_ROOT_QDISC,
847 TC_SETUP_QDISC_GRED,
848};
849
850
851
852
853enum bpf_netdev_command {
854
855
856
857
858
859
860
861 XDP_SETUP_PROG,
862 XDP_SETUP_PROG_HW,
863 XDP_QUERY_PROG,
864 XDP_QUERY_PROG_HW,
865
866 BPF_OFFLOAD_MAP_ALLOC,
867 BPF_OFFLOAD_MAP_FREE,
868 XDP_SETUP_XSK_UMEM,
869};
870
871struct bpf_prog_offload_ops;
872struct netlink_ext_ack;
873struct xdp_umem;
874
875struct netdev_bpf {
876 enum bpf_netdev_command command;
877 union {
878
879 struct {
880 u32 flags;
881 struct bpf_prog *prog;
882 struct netlink_ext_ack *extack;
883 };
884
885 struct {
886 u32 prog_id;
887
888 u32 prog_flags;
889 };
890
891 struct {
892 struct bpf_offloaded_map *offmap;
893 };
894
895 struct {
896 struct xdp_umem *umem;
897 u16 queue_id;
898 } xsk;
899 };
900};
901
902#ifdef CONFIG_XFRM_OFFLOAD
903struct xfrmdev_ops {
904 int (*xdo_dev_state_add) (struct xfrm_state *x);
905 void (*xdo_dev_state_delete) (struct xfrm_state *x);
906 void (*xdo_dev_state_free) (struct xfrm_state *x);
907 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
908 struct xfrm_state *x);
909 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
910};
911#endif
912
913struct dev_ifalias {
914 struct rcu_head rcuhead;
915 char ifalias[];
916};
917
918struct devlink;
919struct tlsdev_ops;
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233struct net_device_ops {
1234 int (*ndo_init)(struct net_device *dev);
1235 void (*ndo_uninit)(struct net_device *dev);
1236 int (*ndo_open)(struct net_device *dev);
1237 int (*ndo_stop)(struct net_device *dev);
1238 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1239 struct net_device *dev);
1240 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1241 struct net_device *dev,
1242 netdev_features_t features);
1243 u16 (*ndo_select_queue)(struct net_device *dev,
1244 struct sk_buff *skb,
1245 struct net_device *sb_dev);
1246 void (*ndo_change_rx_flags)(struct net_device *dev,
1247 int flags);
1248 void (*ndo_set_rx_mode)(struct net_device *dev);
1249 int (*ndo_set_mac_address)(struct net_device *dev,
1250 void *addr);
1251 int (*ndo_validate_addr)(struct net_device *dev);
1252 int (*ndo_do_ioctl)(struct net_device *dev,
1253 struct ifreq *ifr, int cmd);
1254 int (*ndo_set_config)(struct net_device *dev,
1255 struct ifmap *map);
1256 int (*ndo_change_mtu)(struct net_device *dev,
1257 int new_mtu);
1258 int (*ndo_neigh_setup)(struct net_device *dev,
1259 struct neigh_parms *);
1260 void (*ndo_tx_timeout) (struct net_device *dev);
1261
1262 void (*ndo_get_stats64)(struct net_device *dev,
1263 struct rtnl_link_stats64 *storage);
1264 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1265 int (*ndo_get_offload_stats)(int attr_id,
1266 const struct net_device *dev,
1267 void *attr_data);
1268 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1269
1270 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1271 __be16 proto, u16 vid);
1272 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1273 __be16 proto, u16 vid);
1274#ifdef CONFIG_NET_POLL_CONTROLLER
1275 void (*ndo_poll_controller)(struct net_device *dev);
1276 int (*ndo_netpoll_setup)(struct net_device *dev,
1277 struct netpoll_info *info);
1278 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1279#endif
1280 int (*ndo_set_vf_mac)(struct net_device *dev,
1281 int queue, u8 *mac);
1282 int (*ndo_set_vf_vlan)(struct net_device *dev,
1283 int queue, u16 vlan,
1284 u8 qos, __be16 proto);
1285 int (*ndo_set_vf_rate)(struct net_device *dev,
1286 int vf, int min_tx_rate,
1287 int max_tx_rate);
1288 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1289 int vf, bool setting);
1290 int (*ndo_set_vf_trust)(struct net_device *dev,
1291 int vf, bool setting);
1292 int (*ndo_get_vf_config)(struct net_device *dev,
1293 int vf,
1294 struct ifla_vf_info *ivf);
1295 int (*ndo_set_vf_link_state)(struct net_device *dev,
1296 int vf, int link_state);
1297 int (*ndo_get_vf_stats)(struct net_device *dev,
1298 int vf,
1299 struct ifla_vf_stats
1300 *vf_stats);
1301 int (*ndo_set_vf_port)(struct net_device *dev,
1302 int vf,
1303 struct nlattr *port[]);
1304 int (*ndo_get_vf_port)(struct net_device *dev,
1305 int vf, struct sk_buff *skb);
1306 int (*ndo_set_vf_guid)(struct net_device *dev,
1307 int vf, u64 guid,
1308 int guid_type);
1309 int (*ndo_set_vf_rss_query_en)(
1310 struct net_device *dev,
1311 int vf, bool setting);
1312 int (*ndo_setup_tc)(struct net_device *dev,
1313 enum tc_setup_type type,
1314 void *type_data);
1315#if IS_ENABLED(CONFIG_FCOE)
1316 int (*ndo_fcoe_enable)(struct net_device *dev);
1317 int (*ndo_fcoe_disable)(struct net_device *dev);
1318 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1319 u16 xid,
1320 struct scatterlist *sgl,
1321 unsigned int sgc);
1322 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1323 u16 xid);
1324 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1325 u16 xid,
1326 struct scatterlist *sgl,
1327 unsigned int sgc);
1328 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1329 struct netdev_fcoe_hbainfo *hbainfo);
1330#endif
1331
1332#if IS_ENABLED(CONFIG_LIBFCOE)
1333#define NETDEV_FCOE_WWNN 0
1334#define NETDEV_FCOE_WWPN 1
1335 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1336 u64 *wwn, int type);
1337#endif
1338
1339#ifdef CONFIG_RFS_ACCEL
1340 int (*ndo_rx_flow_steer)(struct net_device *dev,
1341 const struct sk_buff *skb,
1342 u16 rxq_index,
1343 u32 flow_id);
1344#endif
1345 int (*ndo_add_slave)(struct net_device *dev,
1346 struct net_device *slave_dev,
1347 struct netlink_ext_ack *extack);
1348 int (*ndo_del_slave)(struct net_device *dev,
1349 struct net_device *slave_dev);
1350 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1351 netdev_features_t features);
1352 int (*ndo_set_features)(struct net_device *dev,
1353 netdev_features_t features);
1354 int (*ndo_neigh_construct)(struct net_device *dev,
1355 struct neighbour *n);
1356 void (*ndo_neigh_destroy)(struct net_device *dev,
1357 struct neighbour *n);
1358
1359 int (*ndo_fdb_add)(struct ndmsg *ndm,
1360 struct nlattr *tb[],
1361 struct net_device *dev,
1362 const unsigned char *addr,
1363 u16 vid,
1364 u16 flags,
1365 struct netlink_ext_ack *extack);
1366 int (*ndo_fdb_del)(struct ndmsg *ndm,
1367 struct nlattr *tb[],
1368 struct net_device *dev,
1369 const unsigned char *addr,
1370 u16 vid);
1371 int (*ndo_fdb_dump)(struct sk_buff *skb,
1372 struct netlink_callback *cb,
1373 struct net_device *dev,
1374 struct net_device *filter_dev,
1375 int *idx);
1376 int (*ndo_fdb_get)(struct sk_buff *skb,
1377 struct nlattr *tb[],
1378 struct net_device *dev,
1379 const unsigned char *addr,
1380 u16 vid, u32 portid, u32 seq,
1381 struct netlink_ext_ack *extack);
1382 int (*ndo_bridge_setlink)(struct net_device *dev,
1383 struct nlmsghdr *nlh,
1384 u16 flags,
1385 struct netlink_ext_ack *extack);
1386 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1387 u32 pid, u32 seq,
1388 struct net_device *dev,
1389 u32 filter_mask,
1390 int nlflags);
1391 int (*ndo_bridge_dellink)(struct net_device *dev,
1392 struct nlmsghdr *nlh,
1393 u16 flags);
1394 int (*ndo_change_carrier)(struct net_device *dev,
1395 bool new_carrier);
1396 int (*ndo_get_phys_port_id)(struct net_device *dev,
1397 struct netdev_phys_item_id *ppid);
1398 int (*ndo_get_port_parent_id)(struct net_device *dev,
1399 struct netdev_phys_item_id *ppid);
1400 int (*ndo_get_phys_port_name)(struct net_device *dev,
1401 char *name, size_t len);
1402 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1403 struct udp_tunnel_info *ti);
1404 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1405 struct udp_tunnel_info *ti);
1406 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1407 struct net_device *dev);
1408 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1409 void *priv);
1410
1411 int (*ndo_get_lock_subclass)(struct net_device *dev);
1412 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1413 int queue_index,
1414 u32 maxrate);
1415 int (*ndo_get_iflink)(const struct net_device *dev);
1416 int (*ndo_change_proto_down)(struct net_device *dev,
1417 bool proto_down);
1418 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1419 struct sk_buff *skb);
1420 void (*ndo_set_rx_headroom)(struct net_device *dev,
1421 int needed_headroom);
1422 int (*ndo_bpf)(struct net_device *dev,
1423 struct netdev_bpf *bpf);
1424 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1425 struct xdp_frame **xdp,
1426 u32 flags);
1427 int (*ndo_xsk_async_xmit)(struct net_device *dev,
1428 u32 queue_id);
1429 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
1430};
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478enum netdev_priv_flags {
1479 IFF_802_1Q_VLAN = 1<<0,
1480 IFF_EBRIDGE = 1<<1,
1481 IFF_BONDING = 1<<2,
1482 IFF_ISATAP = 1<<3,
1483 IFF_WAN_HDLC = 1<<4,
1484 IFF_XMIT_DST_RELEASE = 1<<5,
1485 IFF_DONT_BRIDGE = 1<<6,
1486 IFF_DISABLE_NETPOLL = 1<<7,
1487 IFF_MACVLAN_PORT = 1<<8,
1488 IFF_BRIDGE_PORT = 1<<9,
1489 IFF_OVS_DATAPATH = 1<<10,
1490 IFF_TX_SKB_SHARING = 1<<11,
1491 IFF_UNICAST_FLT = 1<<12,
1492 IFF_TEAM_PORT = 1<<13,
1493 IFF_SUPP_NOFCS = 1<<14,
1494 IFF_LIVE_ADDR_CHANGE = 1<<15,
1495 IFF_MACVLAN = 1<<16,
1496 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1497 IFF_L3MDEV_MASTER = 1<<18,
1498 IFF_NO_QUEUE = 1<<19,
1499 IFF_OPENVSWITCH = 1<<20,
1500 IFF_L3MDEV_SLAVE = 1<<21,
1501 IFF_TEAM = 1<<22,
1502 IFF_RXFH_CONFIGURED = 1<<23,
1503 IFF_PHONY_HEADROOM = 1<<24,
1504 IFF_MACSEC = 1<<25,
1505 IFF_NO_RX_HANDLER = 1<<26,
1506 IFF_FAILOVER = 1<<27,
1507 IFF_FAILOVER_SLAVE = 1<<28,
1508 IFF_L3MDEV_RX_HANDLER = 1<<29,
1509 IFF_LIVE_RENAME_OK = 1<<30,
1510};
1511
1512#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1513#define IFF_EBRIDGE IFF_EBRIDGE
1514#define IFF_BONDING IFF_BONDING
1515#define IFF_ISATAP IFF_ISATAP
1516#define IFF_WAN_HDLC IFF_WAN_HDLC
1517#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1518#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1519#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1520#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1521#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1522#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1523#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1524#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1525#define IFF_TEAM_PORT IFF_TEAM_PORT
1526#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1527#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1528#define IFF_MACVLAN IFF_MACVLAN
1529#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1530#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1531#define IFF_NO_QUEUE IFF_NO_QUEUE
1532#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1533#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1534#define IFF_TEAM IFF_TEAM
1535#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1536#define IFF_MACSEC IFF_MACSEC
1537#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1538#define IFF_FAILOVER IFF_FAILOVER
1539#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1540#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1541#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762struct net_device {
1763 char name[IFNAMSIZ];
1764 struct hlist_node name_hlist;
1765 struct dev_ifalias __rcu *ifalias;
1766
1767
1768
1769
1770 unsigned long mem_end;
1771 unsigned long mem_start;
1772 unsigned long base_addr;
1773 int irq;
1774
1775
1776
1777
1778
1779
1780
1781 unsigned long state;
1782
1783 struct list_head dev_list;
1784 struct list_head napi_list;
1785 struct list_head unreg_list;
1786 struct list_head close_list;
1787 struct list_head ptype_all;
1788 struct list_head ptype_specific;
1789
1790 struct {
1791 struct list_head upper;
1792 struct list_head lower;
1793 } adj_list;
1794
1795 netdev_features_t features;
1796 netdev_features_t hw_features;
1797 netdev_features_t wanted_features;
1798 netdev_features_t vlan_features;
1799 netdev_features_t hw_enc_features;
1800 netdev_features_t mpls_features;
1801 netdev_features_t gso_partial_features;
1802
1803 int ifindex;
1804 int group;
1805
1806 struct net_device_stats stats;
1807
1808 atomic_long_t rx_dropped;
1809 atomic_long_t tx_dropped;
1810 atomic_long_t rx_nohandler;
1811
1812
1813 atomic_t carrier_up_count;
1814 atomic_t carrier_down_count;
1815
1816#ifdef CONFIG_WIRELESS_EXT
1817 const struct iw_handler_def *wireless_handlers;
1818 struct iw_public_data *wireless_data;
1819#endif
1820 const struct net_device_ops *netdev_ops;
1821 const struct ethtool_ops *ethtool_ops;
1822#ifdef CONFIG_NET_L3_MASTER_DEV
1823 const struct l3mdev_ops *l3mdev_ops;
1824#endif
1825#if IS_ENABLED(CONFIG_IPV6)
1826 const struct ndisc_ops *ndisc_ops;
1827#endif
1828
1829#ifdef CONFIG_XFRM_OFFLOAD
1830 const struct xfrmdev_ops *xfrmdev_ops;
1831#endif
1832
1833#if IS_ENABLED(CONFIG_TLS_DEVICE)
1834 const struct tlsdev_ops *tlsdev_ops;
1835#endif
1836
1837 const struct header_ops *header_ops;
1838
1839 unsigned int flags;
1840 unsigned int priv_flags;
1841
1842 unsigned short gflags;
1843 unsigned short padded;
1844
1845 unsigned char operstate;
1846 unsigned char link_mode;
1847
1848 unsigned char if_port;
1849 unsigned char dma;
1850
1851 unsigned int mtu;
1852 unsigned int min_mtu;
1853 unsigned int max_mtu;
1854 unsigned short type;
1855 unsigned short hard_header_len;
1856 unsigned char min_header_len;
1857
1858 unsigned short needed_headroom;
1859 unsigned short needed_tailroom;
1860
1861
1862 unsigned char perm_addr[MAX_ADDR_LEN];
1863 unsigned char addr_assign_type;
1864 unsigned char addr_len;
1865 unsigned short neigh_priv_len;
1866 unsigned short dev_id;
1867 unsigned short dev_port;
1868 spinlock_t addr_list_lock;
1869 unsigned char name_assign_type;
1870 bool uc_promisc;
1871 struct netdev_hw_addr_list uc;
1872 struct netdev_hw_addr_list mc;
1873 struct netdev_hw_addr_list dev_addrs;
1874
1875#ifdef CONFIG_SYSFS
1876 struct kset *queues_kset;
1877#endif
1878 unsigned int promiscuity;
1879 unsigned int allmulti;
1880
1881
1882
1883
1884#if IS_ENABLED(CONFIG_VLAN_8021Q)
1885 struct vlan_info __rcu *vlan_info;
1886#endif
1887#if IS_ENABLED(CONFIG_NET_DSA)
1888 struct dsa_port *dsa_ptr;
1889#endif
1890#if IS_ENABLED(CONFIG_TIPC)
1891 struct tipc_bearer __rcu *tipc_ptr;
1892#endif
1893#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
1894 void *atalk_ptr;
1895#endif
1896 struct in_device __rcu *ip_ptr;
1897#if IS_ENABLED(CONFIG_DECNET)
1898 struct dn_dev __rcu *dn_ptr;
1899#endif
1900 struct inet6_dev __rcu *ip6_ptr;
1901#if IS_ENABLED(CONFIG_AX25)
1902 void *ax25_ptr;
1903#endif
1904 struct wireless_dev *ieee80211_ptr;
1905 struct wpan_dev *ieee802154_ptr;
1906#if IS_ENABLED(CONFIG_MPLS_ROUTING)
1907 struct mpls_dev __rcu *mpls_ptr;
1908#endif
1909
1910
1911
1912
1913
1914 unsigned char *dev_addr;
1915
1916 struct netdev_rx_queue *_rx;
1917 unsigned int num_rx_queues;
1918 unsigned int real_num_rx_queues;
1919
1920 struct bpf_prog __rcu *xdp_prog;
1921 unsigned long gro_flush_timeout;
1922 rx_handler_func_t __rcu *rx_handler;
1923 void __rcu *rx_handler_data;
1924
1925#ifdef CONFIG_NET_CLS_ACT
1926 struct mini_Qdisc __rcu *miniq_ingress;
1927#endif
1928 struct netdev_queue __rcu *ingress_queue;
1929#ifdef CONFIG_NETFILTER_INGRESS
1930 struct nf_hook_entries __rcu *nf_hooks_ingress;
1931#endif
1932
1933 unsigned char broadcast[MAX_ADDR_LEN];
1934#ifdef CONFIG_RFS_ACCEL
1935 struct cpu_rmap *rx_cpu_rmap;
1936#endif
1937 struct hlist_node index_hlist;
1938
1939
1940
1941
1942 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1943 unsigned int num_tx_queues;
1944 unsigned int real_num_tx_queues;
1945 struct Qdisc *qdisc;
1946#ifdef CONFIG_NET_SCHED
1947 DECLARE_HASHTABLE (qdisc_hash, 4);
1948#endif
1949 unsigned int tx_queue_len;
1950 spinlock_t tx_global_lock;
1951 int watchdog_timeo;
1952
1953#ifdef CONFIG_XPS
1954 struct xps_dev_maps __rcu *xps_cpus_map;
1955 struct xps_dev_maps __rcu *xps_rxqs_map;
1956#endif
1957#ifdef CONFIG_NET_CLS_ACT
1958 struct mini_Qdisc __rcu *miniq_egress;
1959#endif
1960
1961
1962 struct timer_list watchdog_timer;
1963
1964 int __percpu *pcpu_refcnt;
1965 struct list_head todo_list;
1966
1967 struct list_head link_watch_list;
1968
1969 enum { NETREG_UNINITIALIZED=0,
1970 NETREG_REGISTERED,
1971 NETREG_UNREGISTERING,
1972 NETREG_UNREGISTERED,
1973 NETREG_RELEASED,
1974 NETREG_DUMMY,
1975 } reg_state:8;
1976
1977 bool dismantle;
1978
1979 enum {
1980 RTNL_LINK_INITIALIZED,
1981 RTNL_LINK_INITIALIZING,
1982 } rtnl_link_state:16;
1983
1984 bool needs_free_netdev;
1985 void (*priv_destructor)(struct net_device *dev);
1986
1987#ifdef CONFIG_NETPOLL
1988 struct netpoll_info __rcu *npinfo;
1989#endif
1990
1991 possible_net_t nd_net;
1992
1993
1994 union {
1995 void *ml_priv;
1996 struct pcpu_lstats __percpu *lstats;
1997 struct pcpu_sw_netstats __percpu *tstats;
1998 struct pcpu_dstats __percpu *dstats;
1999 };
2000
2001#if IS_ENABLED(CONFIG_GARP)
2002 struct garp_port __rcu *garp_port;
2003#endif
2004#if IS_ENABLED(CONFIG_MRP)
2005 struct mrp_port __rcu *mrp_port;
2006#endif
2007
2008 struct device dev;
2009 const struct attribute_group *sysfs_groups[4];
2010 const struct attribute_group *sysfs_rx_queue_group;
2011
2012 const struct rtnl_link_ops *rtnl_link_ops;
2013
2014
2015#define GSO_MAX_SIZE 65536
2016 unsigned int gso_max_size;
2017#define GSO_MAX_SEGS 65535
2018 u16 gso_max_segs;
2019
2020#ifdef CONFIG_DCB
2021 const struct dcbnl_rtnl_ops *dcbnl_ops;
2022#endif
2023 s16 num_tc;
2024 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2025 u8 prio_tc_map[TC_BITMASK + 1];
2026
2027#if IS_ENABLED(CONFIG_FCOE)
2028 unsigned int fcoe_ddp_xid;
2029#endif
2030#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2031 struct netprio_map __rcu *priomap;
2032#endif
2033 struct phy_device *phydev;
2034 struct sfp_bus *sfp_bus;
2035 struct lock_class_key *qdisc_tx_busylock;
2036 struct lock_class_key *qdisc_running_key;
2037 bool proto_down;
2038 unsigned wol_enabled:1;
2039};
2040#define to_net_dev(d) container_of(d, struct net_device, dev)
2041
2042static inline bool netif_elide_gro(const struct net_device *dev)
2043{
2044 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2045 return true;
2046 return false;
2047}
2048
2049#define NETDEV_ALIGN 32
2050
2051static inline
2052int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2053{
2054 return dev->prio_tc_map[prio & TC_BITMASK];
2055}
2056
2057static inline
2058int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2059{
2060 if (tc >= dev->num_tc)
2061 return -EINVAL;
2062
2063 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2064 return 0;
2065}
2066
2067int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2068void netdev_reset_tc(struct net_device *dev);
2069int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2070int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2071
2072static inline
2073int netdev_get_num_tc(struct net_device *dev)
2074{
2075 return dev->num_tc;
2076}
2077
2078void netdev_unbind_sb_channel(struct net_device *dev,
2079 struct net_device *sb_dev);
2080int netdev_bind_sb_channel_queue(struct net_device *dev,
2081 struct net_device *sb_dev,
2082 u8 tc, u16 count, u16 offset);
2083int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2084static inline int netdev_get_sb_channel(struct net_device *dev)
2085{
2086 return max_t(int, -dev->num_tc, 0);
2087}
2088
2089static inline
2090struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2091 unsigned int index)
2092{
2093 return &dev->_tx[index];
2094}
2095
2096static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2097 const struct sk_buff *skb)
2098{
2099 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2100}
2101
2102static inline void netdev_for_each_tx_queue(struct net_device *dev,
2103 void (*f)(struct net_device *,
2104 struct netdev_queue *,
2105 void *),
2106 void *arg)
2107{
2108 unsigned int i;
2109
2110 for (i = 0; i < dev->num_tx_queues; i++)
2111 f(dev, &dev->_tx[i], arg);
2112}
2113
2114#define netdev_lockdep_set_classes(dev) \
2115{ \
2116 static struct lock_class_key qdisc_tx_busylock_key; \
2117 static struct lock_class_key qdisc_running_key; \
2118 static struct lock_class_key qdisc_xmit_lock_key; \
2119 static struct lock_class_key dev_addr_list_lock_key; \
2120 unsigned int i; \
2121 \
2122 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2123 (dev)->qdisc_running_key = &qdisc_running_key; \
2124 lockdep_set_class(&(dev)->addr_list_lock, \
2125 &dev_addr_list_lock_key); \
2126 for (i = 0; i < (dev)->num_tx_queues; i++) \
2127 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2128 &qdisc_xmit_lock_key); \
2129}
2130
2131u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2132 struct net_device *sb_dev);
2133struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2134 struct sk_buff *skb,
2135 struct net_device *sb_dev);
2136
2137
2138
2139
2140static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2141{
2142 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2143}
2144
2145static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2146{
2147 if (dev->netdev_ops->ndo_set_rx_headroom)
2148 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2149}
2150
2151
2152static inline void netdev_reset_rx_headroom(struct net_device *dev)
2153{
2154 netdev_set_rx_headroom(dev, -1);
2155}
2156
2157
2158
2159
2160static inline
2161struct net *dev_net(const struct net_device *dev)
2162{
2163 return read_pnet(&dev->nd_net);
2164}
2165
2166static inline
2167void dev_net_set(struct net_device *dev, struct net *net)
2168{
2169 write_pnet(&dev->nd_net, net);
2170}
2171
2172
2173
2174
2175
2176
2177
2178static inline void *netdev_priv(const struct net_device *dev)
2179{
2180 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2181}
2182
2183
2184
2185
2186#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2187
2188
2189
2190
2191
2192#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2193
2194
2195
2196
2197#define NAPI_POLL_WEIGHT 64
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2210 int (*poll)(struct napi_struct *, int), int weight);
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223static inline void netif_tx_napi_add(struct net_device *dev,
2224 struct napi_struct *napi,
2225 int (*poll)(struct napi_struct *, int),
2226 int weight)
2227{
2228 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2229 netif_napi_add(dev, napi, poll, weight);
2230}
2231
2232
2233
2234
2235
2236
2237
2238void netif_napi_del(struct napi_struct *napi);
2239
2240struct napi_gro_cb {
2241
2242 void *frag0;
2243
2244
2245 unsigned int frag0_len;
2246
2247
2248 int data_offset;
2249
2250
2251 u16 flush;
2252
2253
2254 u16 flush_id;
2255
2256
2257 u16 count;
2258
2259
2260 u16 gro_remcsum_start;
2261
2262
2263 unsigned long age;
2264
2265
2266 u16 proto;
2267
2268
2269 u8 same_flow:1;
2270
2271
2272 u8 encap_mark:1;
2273
2274
2275 u8 csum_valid:1;
2276
2277
2278 u8 csum_cnt:3;
2279
2280
2281 u8 free:2;
2282#define NAPI_GRO_FREE 1
2283#define NAPI_GRO_FREE_STOLEN_HEAD 2
2284
2285
2286 u8 is_ipv6:1;
2287
2288
2289 u8 is_fou:1;
2290
2291
2292 u8 is_atomic:1;
2293
2294
2295 u8 recursion_counter:4;
2296
2297
2298
2299
2300 __wsum csum;
2301
2302
2303 struct sk_buff *last;
2304};
2305
2306#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2307
2308#define GRO_RECURSION_LIMIT 15
2309static inline int gro_recursion_inc_test(struct sk_buff *skb)
2310{
2311 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2312}
2313
2314typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2315static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2316 struct list_head *head,
2317 struct sk_buff *skb)
2318{
2319 if (unlikely(gro_recursion_inc_test(skb))) {
2320 NAPI_GRO_CB(skb)->flush |= 1;
2321 return NULL;
2322 }
2323
2324 return cb(head, skb);
2325}
2326
2327typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2328 struct sk_buff *);
2329static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2330 struct sock *sk,
2331 struct list_head *head,
2332 struct sk_buff *skb)
2333{
2334 if (unlikely(gro_recursion_inc_test(skb))) {
2335 NAPI_GRO_CB(skb)->flush |= 1;
2336 return NULL;
2337 }
2338
2339 return cb(sk, head, skb);
2340}
2341
2342struct packet_type {
2343 __be16 type;
2344 bool ignore_outgoing;
2345 struct net_device *dev;
2346 int (*func) (struct sk_buff *,
2347 struct net_device *,
2348 struct packet_type *,
2349 struct net_device *);
2350 void (*list_func) (struct list_head *,
2351 struct packet_type *,
2352 struct net_device *);
2353 bool (*id_match)(struct packet_type *ptype,
2354 struct sock *sk);
2355 void *af_packet_priv;
2356 struct list_head list;
2357};
2358
2359struct offload_callbacks {
2360 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2361 netdev_features_t features);
2362 struct sk_buff *(*gro_receive)(struct list_head *head,
2363 struct sk_buff *skb);
2364 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2365};
2366
2367struct packet_offload {
2368 __be16 type;
2369 u16 priority;
2370 struct offload_callbacks callbacks;
2371 struct list_head list;
2372};
2373
2374
2375struct pcpu_sw_netstats {
2376 u64 rx_packets;
2377 u64 rx_bytes;
2378 u64 tx_packets;
2379 u64 tx_bytes;
2380 struct u64_stats_sync syncp;
2381} __aligned(4 * sizeof(u64));
2382
2383struct pcpu_lstats {
2384 u64 packets;
2385 u64 bytes;
2386 struct u64_stats_sync syncp;
2387} __aligned(2 * sizeof(u64));
2388
2389#define __netdev_alloc_pcpu_stats(type, gfp) \
2390({ \
2391 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2392 if (pcpu_stats) { \
2393 int __cpu; \
2394 for_each_possible_cpu(__cpu) { \
2395 typeof(type) *stat; \
2396 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2397 u64_stats_init(&stat->syncp); \
2398 } \
2399 } \
2400 pcpu_stats; \
2401})
2402
2403#define netdev_alloc_pcpu_stats(type) \
2404 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2405
2406enum netdev_lag_tx_type {
2407 NETDEV_LAG_TX_TYPE_UNKNOWN,
2408 NETDEV_LAG_TX_TYPE_RANDOM,
2409 NETDEV_LAG_TX_TYPE_BROADCAST,
2410 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2411 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2412 NETDEV_LAG_TX_TYPE_HASH,
2413};
2414
2415enum netdev_lag_hash {
2416 NETDEV_LAG_HASH_NONE,
2417 NETDEV_LAG_HASH_L2,
2418 NETDEV_LAG_HASH_L34,
2419 NETDEV_LAG_HASH_L23,
2420 NETDEV_LAG_HASH_E23,
2421 NETDEV_LAG_HASH_E34,
2422 NETDEV_LAG_HASH_UNKNOWN,
2423};
2424
2425struct netdev_lag_upper_info {
2426 enum netdev_lag_tx_type tx_type;
2427 enum netdev_lag_hash hash_type;
2428};
2429
2430struct netdev_lag_lower_state_info {
2431 u8 link_up : 1,
2432 tx_enabled : 1;
2433};
2434
2435#include <linux/notifier.h>
2436
2437
2438
2439
2440
2441enum netdev_cmd {
2442 NETDEV_UP = 1,
2443 NETDEV_DOWN,
2444 NETDEV_REBOOT,
2445
2446
2447
2448 NETDEV_CHANGE,
2449 NETDEV_REGISTER,
2450 NETDEV_UNREGISTER,
2451 NETDEV_CHANGEMTU,
2452 NETDEV_CHANGEADDR,
2453 NETDEV_PRE_CHANGEADDR,
2454 NETDEV_GOING_DOWN,
2455 NETDEV_CHANGENAME,
2456 NETDEV_FEAT_CHANGE,
2457 NETDEV_BONDING_FAILOVER,
2458 NETDEV_PRE_UP,
2459 NETDEV_PRE_TYPE_CHANGE,
2460 NETDEV_POST_TYPE_CHANGE,
2461 NETDEV_POST_INIT,
2462 NETDEV_RELEASE,
2463 NETDEV_NOTIFY_PEERS,
2464 NETDEV_JOIN,
2465 NETDEV_CHANGEUPPER,
2466 NETDEV_RESEND_IGMP,
2467 NETDEV_PRECHANGEMTU,
2468 NETDEV_CHANGEINFODATA,
2469 NETDEV_BONDING_INFO,
2470 NETDEV_PRECHANGEUPPER,
2471 NETDEV_CHANGELOWERSTATE,
2472 NETDEV_UDP_TUNNEL_PUSH_INFO,
2473 NETDEV_UDP_TUNNEL_DROP_INFO,
2474 NETDEV_CHANGE_TX_QUEUE_LEN,
2475 NETDEV_CVLAN_FILTER_PUSH_INFO,
2476 NETDEV_CVLAN_FILTER_DROP_INFO,
2477 NETDEV_SVLAN_FILTER_PUSH_INFO,
2478 NETDEV_SVLAN_FILTER_DROP_INFO,
2479};
2480const char *netdev_cmd_to_name(enum netdev_cmd cmd);
2481
2482int register_netdevice_notifier(struct notifier_block *nb);
2483int unregister_netdevice_notifier(struct notifier_block *nb);
2484
2485struct netdev_notifier_info {
2486 struct net_device *dev;
2487 struct netlink_ext_ack *extack;
2488};
2489
2490struct netdev_notifier_info_ext {
2491 struct netdev_notifier_info info;
2492 union {
2493 u32 mtu;
2494 } ext;
2495};
2496
2497struct netdev_notifier_change_info {
2498 struct netdev_notifier_info info;
2499 unsigned int flags_changed;
2500};
2501
2502struct netdev_notifier_changeupper_info {
2503 struct netdev_notifier_info info;
2504 struct net_device *upper_dev;
2505 bool master;
2506 bool linking;
2507 void *upper_info;
2508};
2509
2510struct netdev_notifier_changelowerstate_info {
2511 struct netdev_notifier_info info;
2512 void *lower_state_info;
2513};
2514
2515struct netdev_notifier_pre_changeaddr_info {
2516 struct netdev_notifier_info info;
2517 const unsigned char *dev_addr;
2518};
2519
2520static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2521 struct net_device *dev)
2522{
2523 info->dev = dev;
2524 info->extack = NULL;
2525}
2526
2527static inline struct net_device *
2528netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2529{
2530 return info->dev;
2531}
2532
2533static inline struct netlink_ext_ack *
2534netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2535{
2536 return info->extack;
2537}
2538
2539int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2540
2541
2542extern rwlock_t dev_base_lock;
2543
2544#define for_each_netdev(net, d) \
2545 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2546#define for_each_netdev_reverse(net, d) \
2547 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2548#define for_each_netdev_rcu(net, d) \
2549 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2550#define for_each_netdev_safe(net, d, n) \
2551 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2552#define for_each_netdev_continue(net, d) \
2553 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2554#define for_each_netdev_continue_rcu(net, d) \
2555 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2556#define for_each_netdev_in_bond_rcu(bond, slave) \
2557 for_each_netdev_rcu(&init_net, slave) \
2558 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2559#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2560
2561static inline struct net_device *next_net_device(struct net_device *dev)
2562{
2563 struct list_head *lh;
2564 struct net *net;
2565
2566 net = dev_net(dev);
2567 lh = dev->dev_list.next;
2568 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2569}
2570
2571static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2572{
2573 struct list_head *lh;
2574 struct net *net;
2575
2576 net = dev_net(dev);
2577 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2578 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2579}
2580
2581static inline struct net_device *first_net_device(struct net *net)
2582{
2583 return list_empty(&net->dev_base_head) ? NULL :
2584 net_device_entry(net->dev_base_head.next);
2585}
2586
2587static inline struct net_device *first_net_device_rcu(struct net *net)
2588{
2589 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2590
2591 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2592}
2593
2594int netdev_boot_setup_check(struct net_device *dev);
2595unsigned long netdev_boot_base(const char *prefix, int unit);
2596struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2597 const char *hwaddr);
2598struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2599struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2600void dev_add_pack(struct packet_type *pt);
2601void dev_remove_pack(struct packet_type *pt);
2602void __dev_remove_pack(struct packet_type *pt);
2603void dev_add_offload(struct packet_offload *po);
2604void dev_remove_offload(struct packet_offload *po);
2605
2606int dev_get_iflink(const struct net_device *dev);
2607int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2608struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2609 unsigned short mask);
2610struct net_device *dev_get_by_name(struct net *net, const char *name);
2611struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2612struct net_device *__dev_get_by_name(struct net *net, const char *name);
2613int dev_alloc_name(struct net_device *dev, const char *name);
2614int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
2615void dev_close(struct net_device *dev);
2616void dev_close_many(struct list_head *head, bool unlink);
2617void dev_disable_lro(struct net_device *dev);
2618int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2619u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2620 struct net_device *sb_dev);
2621u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2622 struct net_device *sb_dev);
2623int dev_queue_xmit(struct sk_buff *skb);
2624int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2625int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2626int register_netdevice(struct net_device *dev);
2627void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2628void unregister_netdevice_many(struct list_head *head);
2629static inline void unregister_netdevice(struct net_device *dev)
2630{
2631 unregister_netdevice_queue(dev, NULL);
2632}
2633
2634int netdev_refcnt_read(const struct net_device *dev);
2635void free_netdev(struct net_device *dev);
2636void netdev_freemem(struct net_device *dev);
2637void synchronize_net(void);
2638int init_dummy_netdev(struct net_device *dev);
2639
2640struct net_device *dev_get_by_index(struct net *net, int ifindex);
2641struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2642struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2643struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2644int netdev_get_name(struct net *net, char *name, int ifindex);
2645int dev_restart(struct net_device *dev);
2646int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2647
2648static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2649{
2650 return NAPI_GRO_CB(skb)->data_offset;
2651}
2652
2653static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2654{
2655 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2656}
2657
2658static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2659{
2660 NAPI_GRO_CB(skb)->data_offset += len;
2661}
2662
2663static inline void *skb_gro_header_fast(struct sk_buff *skb,
2664 unsigned int offset)
2665{
2666 return NAPI_GRO_CB(skb)->frag0 + offset;
2667}
2668
2669static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2670{
2671 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2672}
2673
2674static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2675{
2676 NAPI_GRO_CB(skb)->frag0 = NULL;
2677 NAPI_GRO_CB(skb)->frag0_len = 0;
2678}
2679
2680static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2681 unsigned int offset)
2682{
2683 if (!pskb_may_pull(skb, hlen))
2684 return NULL;
2685
2686 skb_gro_frag0_invalidate(skb);
2687 return skb->data + offset;
2688}
2689
2690static inline void *skb_gro_network_header(struct sk_buff *skb)
2691{
2692 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2693 skb_network_offset(skb);
2694}
2695
2696static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2697 const void *start, unsigned int len)
2698{
2699 if (NAPI_GRO_CB(skb)->csum_valid)
2700 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2701 csum_partial(start, len, 0));
2702}
2703
2704
2705
2706
2707
2708
2709__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2710
2711static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2712{
2713 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2714}
2715
2716static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2717 bool zero_okay,
2718 __sum16 check)
2719{
2720 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2721 skb_checksum_start_offset(skb) <
2722 skb_gro_offset(skb)) &&
2723 !skb_at_gro_remcsum_start(skb) &&
2724 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2725 (!zero_okay || check));
2726}
2727
2728static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2729 __wsum psum)
2730{
2731 if (NAPI_GRO_CB(skb)->csum_valid &&
2732 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2733 return 0;
2734
2735 NAPI_GRO_CB(skb)->csum = psum;
2736
2737 return __skb_gro_checksum_complete(skb);
2738}
2739
2740static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2741{
2742 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2743
2744 NAPI_GRO_CB(skb)->csum_cnt--;
2745 } else {
2746
2747
2748
2749
2750 __skb_incr_checksum_unnecessary(skb);
2751 }
2752}
2753
2754#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2755 compute_pseudo) \
2756({ \
2757 __sum16 __ret = 0; \
2758 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2759 __ret = __skb_gro_checksum_validate_complete(skb, \
2760 compute_pseudo(skb, proto)); \
2761 if (!__ret) \
2762 skb_gro_incr_csum_unnecessary(skb); \
2763 __ret; \
2764})
2765
2766#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2767 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2768
2769#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2770 compute_pseudo) \
2771 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2772
2773#define skb_gro_checksum_simple_validate(skb) \
2774 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2775
2776static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2777{
2778 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2779 !NAPI_GRO_CB(skb)->csum_valid);
2780}
2781
2782static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2783 __sum16 check, __wsum pseudo)
2784{
2785 NAPI_GRO_CB(skb)->csum = ~pseudo;
2786 NAPI_GRO_CB(skb)->csum_valid = 1;
2787}
2788
2789#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2790do { \
2791 if (__skb_gro_checksum_convert_check(skb)) \
2792 __skb_gro_checksum_convert(skb, check, \
2793 compute_pseudo(skb, proto)); \
2794} while (0)
2795
2796struct gro_remcsum {
2797 int offset;
2798 __wsum delta;
2799};
2800
2801static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2802{
2803 grc->offset = 0;
2804 grc->delta = 0;
2805}
2806
2807static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2808 unsigned int off, size_t hdrlen,
2809 int start, int offset,
2810 struct gro_remcsum *grc,
2811 bool nopartial)
2812{
2813 __wsum delta;
2814 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2815
2816 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2817
2818 if (!nopartial) {
2819 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2820 return ptr;
2821 }
2822
2823 ptr = skb_gro_header_fast(skb, off);
2824 if (skb_gro_header_hard(skb, off + plen)) {
2825 ptr = skb_gro_header_slow(skb, off + plen, off);
2826 if (!ptr)
2827 return NULL;
2828 }
2829
2830 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2831 start, offset);
2832
2833
2834 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2835
2836 grc->offset = off + hdrlen + offset;
2837 grc->delta = delta;
2838
2839 return ptr;
2840}
2841
2842static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2843 struct gro_remcsum *grc)
2844{
2845 void *ptr;
2846 size_t plen = grc->offset + sizeof(u16);
2847
2848 if (!grc->delta)
2849 return;
2850
2851 ptr = skb_gro_header_fast(skb, grc->offset);
2852 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2853 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2854 if (!ptr)
2855 return;
2856 }
2857
2858 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2859}
2860
2861#ifdef CONFIG_XFRM_OFFLOAD
2862static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2863{
2864 if (PTR_ERR(pp) != -EINPROGRESS)
2865 NAPI_GRO_CB(skb)->flush |= flush;
2866}
2867static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2868 struct sk_buff *pp,
2869 int flush,
2870 struct gro_remcsum *grc)
2871{
2872 if (PTR_ERR(pp) != -EINPROGRESS) {
2873 NAPI_GRO_CB(skb)->flush |= flush;
2874 skb_gro_remcsum_cleanup(skb, grc);
2875 skb->remcsum_offload = 0;
2876 }
2877}
2878#else
2879static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2880{
2881 NAPI_GRO_CB(skb)->flush |= flush;
2882}
2883static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2884 struct sk_buff *pp,
2885 int flush,
2886 struct gro_remcsum *grc)
2887{
2888 NAPI_GRO_CB(skb)->flush |= flush;
2889 skb_gro_remcsum_cleanup(skb, grc);
2890 skb->remcsum_offload = 0;
2891}
2892#endif
2893
2894static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2895 unsigned short type,
2896 const void *daddr, const void *saddr,
2897 unsigned int len)
2898{
2899 if (!dev->header_ops || !dev->header_ops->create)
2900 return 0;
2901
2902 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2903}
2904
2905static inline int dev_parse_header(const struct sk_buff *skb,
2906 unsigned char *haddr)
2907{
2908 const struct net_device *dev = skb->dev;
2909
2910 if (!dev->header_ops || !dev->header_ops->parse)
2911 return 0;
2912 return dev->header_ops->parse(skb, haddr);
2913}
2914
2915static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
2916{
2917 const struct net_device *dev = skb->dev;
2918
2919 if (!dev->header_ops || !dev->header_ops->parse_protocol)
2920 return 0;
2921 return dev->header_ops->parse_protocol(skb);
2922}
2923
2924
2925static inline bool dev_validate_header(const struct net_device *dev,
2926 char *ll_header, int len)
2927{
2928 if (likely(len >= dev->hard_header_len))
2929 return true;
2930 if (len < dev->min_header_len)
2931 return false;
2932
2933 if (capable(CAP_SYS_RAWIO)) {
2934 memset(ll_header + len, 0, dev->hard_header_len - len);
2935 return true;
2936 }
2937
2938 if (dev->header_ops && dev->header_ops->validate)
2939 return dev->header_ops->validate(ll_header, len);
2940
2941 return false;
2942}
2943
2944typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
2945 int len, int size);
2946int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2947static inline int unregister_gifconf(unsigned int family)
2948{
2949 return register_gifconf(family, NULL);
2950}
2951
2952#ifdef CONFIG_NET_FLOW_LIMIT
2953#define FLOW_LIMIT_HISTORY (1 << 7)
2954struct sd_flow_limit {
2955 u64 count;
2956 unsigned int num_buckets;
2957 unsigned int history_head;
2958 u16 history[FLOW_LIMIT_HISTORY];
2959 u8 buckets[];
2960};
2961
2962extern int netdev_flow_limit_table_len;
2963#endif
2964
2965
2966
2967
2968struct softnet_data {
2969 struct list_head poll_list;
2970 struct sk_buff_head process_queue;
2971
2972
2973 unsigned int processed;
2974 unsigned int time_squeeze;
2975 unsigned int received_rps;
2976#ifdef CONFIG_RPS
2977 struct softnet_data *rps_ipi_list;
2978#endif
2979#ifdef CONFIG_NET_FLOW_LIMIT
2980 struct sd_flow_limit __rcu *flow_limit;
2981#endif
2982 struct Qdisc *output_queue;
2983 struct Qdisc **output_queue_tailp;
2984 struct sk_buff *completion_queue;
2985#ifdef CONFIG_XFRM_OFFLOAD
2986 struct sk_buff_head xfrm_backlog;
2987#endif
2988
2989 struct {
2990 u16 recursion;
2991 u8 more;
2992 } xmit;
2993#ifdef CONFIG_RPS
2994
2995
2996
2997 unsigned int input_queue_head ____cacheline_aligned_in_smp;
2998
2999
3000 call_single_data_t csd ____cacheline_aligned_in_smp;
3001 struct softnet_data *rps_ipi_next;
3002 unsigned int cpu;
3003 unsigned int input_queue_tail;
3004#endif
3005 unsigned int dropped;
3006 struct sk_buff_head input_pkt_queue;
3007 struct napi_struct backlog;
3008
3009};
3010
3011static inline void input_queue_head_incr(struct softnet_data *sd)
3012{
3013#ifdef CONFIG_RPS
3014 sd->input_queue_head++;
3015#endif
3016}
3017
3018static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3019 unsigned int *qtail)
3020{
3021#ifdef CONFIG_RPS
3022 *qtail = ++sd->input_queue_tail;
3023#endif
3024}
3025
3026DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3027
3028static inline int dev_recursion_level(void)
3029{
3030 return this_cpu_read(softnet_data.xmit.recursion);
3031}
3032
3033#define XMIT_RECURSION_LIMIT 10
3034static inline bool dev_xmit_recursion(void)
3035{
3036 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3037 XMIT_RECURSION_LIMIT);
3038}
3039
3040static inline void dev_xmit_recursion_inc(void)
3041{
3042 __this_cpu_inc(softnet_data.xmit.recursion);
3043}
3044
3045static inline void dev_xmit_recursion_dec(void)
3046{
3047 __this_cpu_dec(softnet_data.xmit.recursion);
3048}
3049
3050void __netif_schedule(struct Qdisc *q);
3051void netif_schedule_queue(struct netdev_queue *txq);
3052
3053static inline void netif_tx_schedule_all(struct net_device *dev)
3054{
3055 unsigned int i;
3056
3057 for (i = 0; i < dev->num_tx_queues; i++)
3058 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3059}
3060
3061static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3062{
3063 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3064}
3065
3066
3067
3068
3069
3070
3071
3072static inline void netif_start_queue(struct net_device *dev)
3073{
3074 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3075}
3076
3077static inline void netif_tx_start_all_queues(struct net_device *dev)
3078{
3079 unsigned int i;
3080
3081 for (i = 0; i < dev->num_tx_queues; i++) {
3082 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3083 netif_tx_start_queue(txq);
3084 }
3085}
3086
3087void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3088
3089
3090
3091
3092
3093
3094
3095
3096static inline void netif_wake_queue(struct net_device *dev)
3097{
3098 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3099}
3100
3101static inline void netif_tx_wake_all_queues(struct net_device *dev)
3102{
3103 unsigned int i;
3104
3105 for (i = 0; i < dev->num_tx_queues; i++) {
3106 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3107 netif_tx_wake_queue(txq);
3108 }
3109}
3110
3111static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3112{
3113 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3114}
3115
3116
3117
3118
3119
3120
3121
3122
3123static inline void netif_stop_queue(struct net_device *dev)
3124{
3125 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3126}
3127
3128void netif_tx_stop_all_queues(struct net_device *dev);
3129
3130static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3131{
3132 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3133}
3134
3135
3136
3137
3138
3139
3140
3141static inline bool netif_queue_stopped(const struct net_device *dev)
3142{
3143 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3144}
3145
3146static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3147{
3148 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3149}
3150
3151static inline bool
3152netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3153{
3154 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3155}
3156
3157static inline bool
3158netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3159{
3160 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3161}
3162
3163
3164
3165
3166
3167
3168
3169
3170static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3171{
3172#ifdef CONFIG_BQL
3173 prefetchw(&dev_queue->dql.num_queued);
3174#endif
3175}
3176
3177
3178
3179
3180
3181
3182
3183
3184static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3185{
3186#ifdef CONFIG_BQL
3187 prefetchw(&dev_queue->dql.limit);
3188#endif
3189}
3190
3191static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3192 unsigned int bytes)
3193{
3194#ifdef CONFIG_BQL
3195 dql_queued(&dev_queue->dql, bytes);
3196
3197 if (likely(dql_avail(&dev_queue->dql) >= 0))
3198 return;
3199
3200 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3201
3202
3203
3204
3205
3206
3207 smp_mb();
3208
3209
3210 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3211 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3212#endif
3213}
3214
3215
3216
3217
3218
3219
3220
3221static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3222 unsigned int bytes,
3223 bool xmit_more)
3224{
3225 if (xmit_more) {
3226#ifdef CONFIG_BQL
3227 dql_queued(&dev_queue->dql, bytes);
3228#endif
3229 return netif_tx_queue_stopped(dev_queue);
3230 }
3231 netdev_tx_sent_queue(dev_queue, bytes);
3232 return true;
3233}
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3245{
3246 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3247}
3248
3249static inline bool __netdev_sent_queue(struct net_device *dev,
3250 unsigned int bytes,
3251 bool xmit_more)
3252{
3253 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3254 xmit_more);
3255}
3256
3257static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3258 unsigned int pkts, unsigned int bytes)
3259{
3260#ifdef CONFIG_BQL
3261 if (unlikely(!bytes))
3262 return;
3263
3264 dql_completed(&dev_queue->dql, bytes);
3265
3266
3267
3268
3269
3270
3271 smp_mb();
3272
3273 if (dql_avail(&dev_queue->dql) < 0)
3274 return;
3275
3276 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3277 netif_schedule_queue(dev_queue);
3278#endif
3279}
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291static inline void netdev_completed_queue(struct net_device *dev,
3292 unsigned int pkts, unsigned int bytes)
3293{
3294 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3295}
3296
3297static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3298{
3299#ifdef CONFIG_BQL
3300 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3301 dql_reset(&q->dql);
3302#endif
3303}
3304
3305
3306
3307
3308
3309
3310
3311
3312static inline void netdev_reset_queue(struct net_device *dev_queue)
3313{
3314 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3315}
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3326{
3327 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3328 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3329 dev->name, queue_index,
3330 dev->real_num_tx_queues);
3331 return 0;
3332 }
3333
3334 return queue_index;
3335}
3336
3337
3338
3339
3340
3341
3342
3343static inline bool netif_running(const struct net_device *dev)
3344{
3345 return test_bit(__LINK_STATE_START, &dev->state);
3346}
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3363{
3364 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3365
3366 netif_tx_start_queue(txq);
3367}
3368
3369
3370
3371
3372
3373
3374
3375
3376static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3377{
3378 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3379 netif_tx_stop_queue(txq);
3380}
3381
3382
3383
3384
3385
3386
3387
3388
3389static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3390 u16 queue_index)
3391{
3392 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3393
3394 return netif_tx_queue_stopped(txq);
3395}
3396
3397static inline bool netif_subqueue_stopped(const struct net_device *dev,
3398 struct sk_buff *skb)
3399{
3400 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3401}
3402
3403
3404
3405
3406
3407
3408
3409
3410static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3411{
3412 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3413
3414 netif_tx_wake_queue(txq);
3415}
3416
3417#ifdef CONFIG_XPS
3418int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3419 u16 index);
3420int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3421 u16 index, bool is_rxqs_map);
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431static inline bool netif_attr_test_mask(unsigned long j,
3432 const unsigned long *mask,
3433 unsigned int nr_bits)
3434{
3435 cpu_max_bits_warn(j, nr_bits);
3436 return test_bit(j, mask);
3437}
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447static inline bool netif_attr_test_online(unsigned long j,
3448 const unsigned long *online_mask,
3449 unsigned int nr_bits)
3450{
3451 cpu_max_bits_warn(j, nr_bits);
3452
3453 if (online_mask)
3454 return test_bit(j, online_mask);
3455
3456 return (j < nr_bits);
3457}
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3468 unsigned int nr_bits)
3469{
3470
3471 if (n != -1)
3472 cpu_max_bits_warn(n, nr_bits);
3473
3474 if (srcp)
3475 return find_next_bit(srcp, nr_bits, n + 1);
3476
3477 return n + 1;
3478}
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3490 const unsigned long *src2p,
3491 unsigned int nr_bits)
3492{
3493
3494 if (n != -1)
3495 cpu_max_bits_warn(n, nr_bits);
3496
3497 if (src1p && src2p)
3498 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3499 else if (src1p)
3500 return find_next_bit(src1p, nr_bits, n + 1);
3501 else if (src2p)
3502 return find_next_bit(src2p, nr_bits, n + 1);
3503
3504 return n + 1;
3505}
3506#else
3507static inline int netif_set_xps_queue(struct net_device *dev,
3508 const struct cpumask *mask,
3509 u16 index)
3510{
3511 return 0;
3512}
3513
3514static inline int __netif_set_xps_queue(struct net_device *dev,
3515 const unsigned long *mask,
3516 u16 index, bool is_rxqs_map)
3517{
3518 return 0;
3519}
3520#endif
3521
3522
3523
3524
3525
3526
3527
3528static inline bool netif_is_multiqueue(const struct net_device *dev)
3529{
3530 return dev->num_tx_queues > 1;
3531}
3532
3533int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3534
3535#ifdef CONFIG_SYSFS
3536int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3537#else
3538static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3539 unsigned int rxqs)
3540{
3541 dev->real_num_rx_queues = rxqs;
3542 return 0;
3543}
3544#endif
3545
3546static inline struct netdev_rx_queue *
3547__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3548{
3549 return dev->_rx + rxq;
3550}
3551
3552#ifdef CONFIG_SYSFS
3553static inline unsigned int get_netdev_rx_queue_index(
3554 struct netdev_rx_queue *queue)
3555{
3556 struct net_device *dev = queue->dev;
3557 int index = queue - dev->_rx;
3558
3559 BUG_ON(index >= dev->num_rx_queues);
3560 return index;
3561}
3562#endif
3563
3564#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3565int netif_get_num_default_rss_queues(void);
3566
3567enum skb_free_reason {
3568 SKB_REASON_CONSUMED,
3569 SKB_REASON_DROPPED,
3570};
3571
3572void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3573void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3595{
3596 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3597}
3598
3599static inline void dev_consume_skb_irq(struct sk_buff *skb)
3600{
3601 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3602}
3603
3604static inline void dev_kfree_skb_any(struct sk_buff *skb)
3605{
3606 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3607}
3608
3609static inline void dev_consume_skb_any(struct sk_buff *skb)
3610{
3611 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3612}
3613
3614void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3615int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
3616int netif_rx(struct sk_buff *skb);
3617int netif_rx_ni(struct sk_buff *skb);
3618int netif_receive_skb(struct sk_buff *skb);
3619int netif_receive_skb_core(struct sk_buff *skb);
3620void netif_receive_skb_list(struct list_head *head);
3621gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3622void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3623struct sk_buff *napi_get_frags(struct napi_struct *napi);
3624gro_result_t napi_gro_frags(struct napi_struct *napi);
3625struct packet_offload *gro_find_receive_by_type(__be16 type);
3626struct packet_offload *gro_find_complete_by_type(__be16 type);
3627
3628static inline void napi_free_frags(struct napi_struct *napi)
3629{
3630 kfree_skb(napi->skb);
3631 napi->skb = NULL;
3632}
3633
3634bool netdev_is_rx_handler_busy(struct net_device *dev);
3635int netdev_rx_handler_register(struct net_device *dev,
3636 rx_handler_func_t *rx_handler,
3637 void *rx_handler_data);
3638void netdev_rx_handler_unregister(struct net_device *dev);
3639
3640bool dev_valid_name(const char *name);
3641int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3642 bool *need_copyout);
3643int dev_ifconf(struct net *net, struct ifconf *, int);
3644int dev_ethtool(struct net *net, struct ifreq *);
3645unsigned int dev_get_flags(const struct net_device *);
3646int __dev_change_flags(struct net_device *dev, unsigned int flags,
3647 struct netlink_ext_ack *extack);
3648int dev_change_flags(struct net_device *dev, unsigned int flags,
3649 struct netlink_ext_ack *extack);
3650void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3651 unsigned int gchanges);
3652int dev_change_name(struct net_device *, const char *);
3653int dev_set_alias(struct net_device *, const char *, size_t);
3654int dev_get_alias(const struct net_device *, char *, size_t);
3655int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3656int __dev_set_mtu(struct net_device *, int);
3657int dev_set_mtu_ext(struct net_device *dev, int mtu,
3658 struct netlink_ext_ack *extack);
3659int dev_set_mtu(struct net_device *, int);
3660int dev_change_tx_queue_len(struct net_device *, unsigned long);
3661void dev_set_group(struct net_device *, int);
3662int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3663 struct netlink_ext_ack *extack);
3664int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3665 struct netlink_ext_ack *extack);
3666int dev_change_carrier(struct net_device *, bool new_carrier);
3667int dev_get_phys_port_id(struct net_device *dev,
3668 struct netdev_phys_item_id *ppid);
3669int dev_get_phys_port_name(struct net_device *dev,
3670 char *name, size_t len);
3671int dev_get_port_parent_id(struct net_device *dev,
3672 struct netdev_phys_item_id *ppid, bool recurse);
3673bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
3674int dev_change_proto_down(struct net_device *dev, bool proto_down);
3675int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
3676struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3677struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3678 struct netdev_queue *txq, int *ret);
3679
3680typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3681int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3682 int fd, u32 flags);
3683u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3684 enum bpf_netdev_command cmd);
3685int xdp_umem_query(struct net_device *dev, u16 queue_id);
3686
3687int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3688int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3689bool is_skb_forwardable(const struct net_device *dev,
3690 const struct sk_buff *skb);
3691
3692static __always_inline int ____dev_forward_skb(struct net_device *dev,
3693 struct sk_buff *skb)
3694{
3695 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3696 unlikely(!is_skb_forwardable(dev, skb))) {
3697 atomic_long_inc(&dev->rx_dropped);
3698 kfree_skb(skb);
3699 return NET_RX_DROP;
3700 }
3701
3702 skb_scrub_packet(skb, true);
3703 skb->priority = 0;
3704 return 0;
3705}
3706
3707bool dev_nit_active(struct net_device *dev);
3708void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3709
3710extern int netdev_budget;
3711extern unsigned int netdev_budget_usecs;
3712
3713
3714void netdev_run_todo(void);
3715
3716
3717
3718
3719
3720
3721
3722static inline void dev_put(struct net_device *dev)
3723{
3724 this_cpu_dec(*dev->pcpu_refcnt);
3725}
3726
3727
3728
3729
3730
3731
3732
3733static inline void dev_hold(struct net_device *dev)
3734{
3735 this_cpu_inc(*dev->pcpu_refcnt);
3736}
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747void linkwatch_init_dev(struct net_device *dev);
3748void linkwatch_fire_event(struct net_device *dev);
3749void linkwatch_forget_dev(struct net_device *dev);
3750
3751
3752
3753
3754
3755
3756
3757static inline bool netif_carrier_ok(const struct net_device *dev)
3758{
3759 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3760}
3761
3762unsigned long dev_trans_start(struct net_device *dev);
3763
3764void __netdev_watchdog_up(struct net_device *dev);
3765
3766void netif_carrier_on(struct net_device *dev);
3767
3768void netif_carrier_off(struct net_device *dev);
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782static inline void netif_dormant_on(struct net_device *dev)
3783{
3784 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3785 linkwatch_fire_event(dev);
3786}
3787
3788
3789
3790
3791
3792
3793
3794static inline void netif_dormant_off(struct net_device *dev)
3795{
3796 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3797 linkwatch_fire_event(dev);
3798}
3799
3800
3801
3802
3803
3804
3805
3806static inline bool netif_dormant(const struct net_device *dev)
3807{
3808 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3809}
3810
3811
3812
3813
3814
3815
3816
3817
3818static inline bool netif_oper_up(const struct net_device *dev)
3819{
3820 return (dev->operstate == IF_OPER_UP ||
3821 dev->operstate == IF_OPER_UNKNOWN );
3822}
3823
3824
3825
3826
3827
3828
3829
3830static inline bool netif_device_present(struct net_device *dev)
3831{
3832 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3833}
3834
3835void netif_device_detach(struct net_device *dev);
3836
3837void netif_device_attach(struct net_device *dev);
3838
3839
3840
3841
3842
3843enum {
3844 NETIF_MSG_DRV = 0x0001,
3845 NETIF_MSG_PROBE = 0x0002,
3846 NETIF_MSG_LINK = 0x0004,
3847 NETIF_MSG_TIMER = 0x0008,
3848 NETIF_MSG_IFDOWN = 0x0010,
3849 NETIF_MSG_IFUP = 0x0020,
3850 NETIF_MSG_RX_ERR = 0x0040,
3851 NETIF_MSG_TX_ERR = 0x0080,
3852 NETIF_MSG_TX_QUEUED = 0x0100,
3853 NETIF_MSG_INTR = 0x0200,
3854 NETIF_MSG_TX_DONE = 0x0400,
3855 NETIF_MSG_RX_STATUS = 0x0800,
3856 NETIF_MSG_PKTDATA = 0x1000,
3857 NETIF_MSG_HW = 0x2000,
3858 NETIF_MSG_WOL = 0x4000,
3859};
3860
3861#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3862#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3863#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3864#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3865#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3866#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3867#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3868#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3869#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3870#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3871#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3872#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3873#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3874#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3875#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3876
3877static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3878{
3879
3880 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3881 return default_msg_enable_bits;
3882 if (debug_value == 0)
3883 return 0;
3884
3885 return (1U << debug_value) - 1;
3886}
3887
3888static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3889{
3890 spin_lock(&txq->_xmit_lock);
3891 txq->xmit_lock_owner = cpu;
3892}
3893
3894static inline bool __netif_tx_acquire(struct netdev_queue *txq)
3895{
3896 __acquire(&txq->_xmit_lock);
3897 return true;
3898}
3899
3900static inline void __netif_tx_release(struct netdev_queue *txq)
3901{
3902 __release(&txq->_xmit_lock);
3903}
3904
3905static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3906{
3907 spin_lock_bh(&txq->_xmit_lock);
3908 txq->xmit_lock_owner = smp_processor_id();
3909}
3910
3911static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3912{
3913 bool ok = spin_trylock(&txq->_xmit_lock);
3914 if (likely(ok))
3915 txq->xmit_lock_owner = smp_processor_id();
3916 return ok;
3917}
3918
3919static inline void __netif_tx_unlock(struct netdev_queue *txq)
3920{
3921 txq->xmit_lock_owner = -1;
3922 spin_unlock(&txq->_xmit_lock);
3923}
3924
3925static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3926{
3927 txq->xmit_lock_owner = -1;
3928 spin_unlock_bh(&txq->_xmit_lock);
3929}
3930
3931static inline void txq_trans_update(struct netdev_queue *txq)
3932{
3933 if (txq->xmit_lock_owner != -1)
3934 txq->trans_start = jiffies;
3935}
3936
3937
3938static inline void netif_trans_update(struct net_device *dev)
3939{
3940 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3941
3942 if (txq->trans_start != jiffies)
3943 txq->trans_start = jiffies;
3944}
3945
3946
3947
3948
3949
3950
3951
3952static inline void netif_tx_lock(struct net_device *dev)
3953{
3954 unsigned int i;
3955 int cpu;
3956
3957 spin_lock(&dev->tx_global_lock);
3958 cpu = smp_processor_id();
3959 for (i = 0; i < dev->num_tx_queues; i++) {
3960 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3961
3962
3963
3964
3965
3966
3967
3968 __netif_tx_lock(txq, cpu);
3969 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3970 __netif_tx_unlock(txq);
3971 }
3972}
3973
3974static inline void netif_tx_lock_bh(struct net_device *dev)
3975{
3976 local_bh_disable();
3977 netif_tx_lock(dev);
3978}
3979
3980static inline void netif_tx_unlock(struct net_device *dev)
3981{
3982 unsigned int i;
3983
3984 for (i = 0; i < dev->num_tx_queues; i++) {
3985 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3986
3987
3988
3989
3990
3991 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3992 netif_schedule_queue(txq);
3993 }
3994 spin_unlock(&dev->tx_global_lock);
3995}
3996
3997static inline void netif_tx_unlock_bh(struct net_device *dev)
3998{
3999 netif_tx_unlock(dev);
4000 local_bh_enable();
4001}
4002
4003#define HARD_TX_LOCK(dev, txq, cpu) { \
4004 if ((dev->features & NETIF_F_LLTX) == 0) { \
4005 __netif_tx_lock(txq, cpu); \
4006 } else { \
4007 __netif_tx_acquire(txq); \
4008 } \
4009}
4010
4011#define HARD_TX_TRYLOCK(dev, txq) \
4012 (((dev->features & NETIF_F_LLTX) == 0) ? \
4013 __netif_tx_trylock(txq) : \
4014 __netif_tx_acquire(txq))
4015
4016#define HARD_TX_UNLOCK(dev, txq) { \
4017 if ((dev->features & NETIF_F_LLTX) == 0) { \
4018 __netif_tx_unlock(txq); \
4019 } else { \
4020 __netif_tx_release(txq); \
4021 } \
4022}
4023
4024static inline void netif_tx_disable(struct net_device *dev)
4025{
4026 unsigned int i;
4027 int cpu;
4028
4029 local_bh_disable();
4030 cpu = smp_processor_id();
4031 for (i = 0; i < dev->num_tx_queues; i++) {
4032 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4033
4034 __netif_tx_lock(txq, cpu);
4035 netif_tx_stop_queue(txq);
4036 __netif_tx_unlock(txq);
4037 }
4038 local_bh_enable();
4039}
4040
4041static inline void netif_addr_lock(struct net_device *dev)
4042{
4043 spin_lock(&dev->addr_list_lock);
4044}
4045
4046static inline void netif_addr_lock_nested(struct net_device *dev)
4047{
4048 int subclass = SINGLE_DEPTH_NESTING;
4049
4050 if (dev->netdev_ops->ndo_get_lock_subclass)
4051 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
4052
4053 spin_lock_nested(&dev->addr_list_lock, subclass);
4054}
4055
4056static inline void netif_addr_lock_bh(struct net_device *dev)
4057{
4058 spin_lock_bh(&dev->addr_list_lock);
4059}
4060
4061static inline void netif_addr_unlock(struct net_device *dev)
4062{
4063 spin_unlock(&dev->addr_list_lock);
4064}
4065
4066static inline void netif_addr_unlock_bh(struct net_device *dev)
4067{
4068 spin_unlock_bh(&dev->addr_list_lock);
4069}
4070
4071
4072
4073
4074
4075#define for_each_dev_addr(dev, ha) \
4076 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4077
4078
4079
4080void ether_setup(struct net_device *dev);
4081
4082
4083struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4084 unsigned char name_assign_type,
4085 void (*setup)(struct net_device *),
4086 unsigned int txqs, unsigned int rxqs);
4087int dev_get_valid_name(struct net *net, struct net_device *dev,
4088 const char *name);
4089
4090#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4091 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4092
4093#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4094 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4095 count)
4096
4097int register_netdev(struct net_device *dev);
4098void unregister_netdev(struct net_device *dev);
4099
4100
4101int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4102 struct netdev_hw_addr_list *from_list, int addr_len);
4103void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4104 struct netdev_hw_addr_list *from_list, int addr_len);
4105int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4106 struct net_device *dev,
4107 int (*sync)(struct net_device *, const unsigned char *),
4108 int (*unsync)(struct net_device *,
4109 const unsigned char *));
4110int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4111 struct net_device *dev,
4112 int (*sync)(struct net_device *,
4113 const unsigned char *, int),
4114 int (*unsync)(struct net_device *,
4115 const unsigned char *, int));
4116void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4117 struct net_device *dev,
4118 int (*unsync)(struct net_device *,
4119 const unsigned char *, int));
4120void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4121 struct net_device *dev,
4122 int (*unsync)(struct net_device *,
4123 const unsigned char *));
4124void __hw_addr_init(struct netdev_hw_addr_list *list);
4125
4126
4127int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4128 unsigned char addr_type);
4129int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4130 unsigned char addr_type);
4131void dev_addr_flush(struct net_device *dev);
4132int dev_addr_init(struct net_device *dev);
4133
4134
4135int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4136int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4137int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4138int dev_uc_sync(struct net_device *to, struct net_device *from);
4139int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4140void dev_uc_unsync(struct net_device *to, struct net_device *from);
4141void dev_uc_flush(struct net_device *dev);
4142void dev_uc_init(struct net_device *dev);
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153static inline int __dev_uc_sync(struct net_device *dev,
4154 int (*sync)(struct net_device *,
4155 const unsigned char *),
4156 int (*unsync)(struct net_device *,
4157 const unsigned char *))
4158{
4159 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4160}
4161
4162
4163
4164
4165
4166
4167
4168
4169static inline void __dev_uc_unsync(struct net_device *dev,
4170 int (*unsync)(struct net_device *,
4171 const unsigned char *))
4172{
4173 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4174}
4175
4176
4177int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4178int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4179int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4180int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4181int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4182int dev_mc_sync(struct net_device *to, struct net_device *from);
4183int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4184void dev_mc_unsync(struct net_device *to, struct net_device *from);
4185void dev_mc_flush(struct net_device *dev);
4186void dev_mc_init(struct net_device *dev);
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197static inline int __dev_mc_sync(struct net_device *dev,
4198 int (*sync)(struct net_device *,
4199 const unsigned char *),
4200 int (*unsync)(struct net_device *,
4201 const unsigned char *))
4202{
4203 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4204}
4205
4206
4207
4208
4209
4210
4211
4212
4213static inline void __dev_mc_unsync(struct net_device *dev,
4214 int (*unsync)(struct net_device *,
4215 const unsigned char *))
4216{
4217 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4218}
4219
4220
4221void dev_set_rx_mode(struct net_device *dev);
4222void __dev_set_rx_mode(struct net_device *dev);
4223int dev_set_promiscuity(struct net_device *dev, int inc);
4224int dev_set_allmulti(struct net_device *dev, int inc);
4225void netdev_state_change(struct net_device *dev);
4226void netdev_notify_peers(struct net_device *dev);
4227void netdev_features_change(struct net_device *dev);
4228
4229void dev_load(struct net *net, const char *name);
4230struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4231 struct rtnl_link_stats64 *storage);
4232void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4233 const struct net_device_stats *netdev_stats);
4234
4235extern int netdev_max_backlog;
4236extern int netdev_tstamp_prequeue;
4237extern int weight_p;
4238extern int dev_weight_rx_bias;
4239extern int dev_weight_tx_bias;
4240extern int dev_rx_weight;
4241extern int dev_tx_weight;
4242
4243bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4244struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4245 struct list_head **iter);
4246struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4247 struct list_head **iter);
4248
4249
4250#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4251 for (iter = &(dev)->adj_list.upper, \
4252 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4253 updev; \
4254 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4255
4256int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4257 int (*fn)(struct net_device *upper_dev,
4258 void *data),
4259 void *data);
4260
4261bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4262 struct net_device *upper_dev);
4263
4264bool netdev_has_any_upper_dev(struct net_device *dev);
4265
4266void *netdev_lower_get_next_private(struct net_device *dev,
4267 struct list_head **iter);
4268void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4269 struct list_head **iter);
4270
4271#define netdev_for_each_lower_private(dev, priv, iter) \
4272 for (iter = (dev)->adj_list.lower.next, \
4273 priv = netdev_lower_get_next_private(dev, &(iter)); \
4274 priv; \
4275 priv = netdev_lower_get_next_private(dev, &(iter)))
4276
4277#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4278 for (iter = &(dev)->adj_list.lower, \
4279 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4280 priv; \
4281 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4282
4283void *netdev_lower_get_next(struct net_device *dev,
4284 struct list_head **iter);
4285
4286#define netdev_for_each_lower_dev(dev, ldev, iter) \
4287 for (iter = (dev)->adj_list.lower.next, \
4288 ldev = netdev_lower_get_next(dev, &(iter)); \
4289 ldev; \
4290 ldev = netdev_lower_get_next(dev, &(iter)))
4291
4292struct net_device *netdev_all_lower_get_next(struct net_device *dev,
4293 struct list_head **iter);
4294struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
4295 struct list_head **iter);
4296
4297int netdev_walk_all_lower_dev(struct net_device *dev,
4298 int (*fn)(struct net_device *lower_dev,
4299 void *data),
4300 void *data);
4301int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4302 int (*fn)(struct net_device *lower_dev,
4303 void *data),
4304 void *data);
4305
4306void *netdev_adjacent_get_private(struct list_head *adj_list);
4307void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4308struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4309struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4310int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4311 struct netlink_ext_ack *extack);
4312int netdev_master_upper_dev_link(struct net_device *dev,
4313 struct net_device *upper_dev,
4314 void *upper_priv, void *upper_info,
4315 struct netlink_ext_ack *extack);
4316void netdev_upper_dev_unlink(struct net_device *dev,
4317 struct net_device *upper_dev);
4318void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4319void *netdev_lower_dev_get_private(struct net_device *dev,
4320 struct net_device *lower_dev);
4321void netdev_lower_state_changed(struct net_device *lower_dev,
4322 void *lower_state_info);
4323
4324
4325#define NETDEV_RSS_KEY_LEN 52
4326extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4327void netdev_rss_key_fill(void *buffer, size_t len);
4328
4329int dev_get_nest_level(struct net_device *dev);
4330int skb_checksum_help(struct sk_buff *skb);
4331int skb_crc32c_csum_help(struct sk_buff *skb);
4332int skb_csum_hwoffload_help(struct sk_buff *skb,
4333 const netdev_features_t features);
4334
4335struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4336 netdev_features_t features, bool tx_path);
4337struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4338 netdev_features_t features);
4339
4340struct netdev_bonding_info {
4341 ifslave slave;
4342 ifbond master;
4343};
4344
4345struct netdev_notifier_bonding_info {
4346 struct netdev_notifier_info info;
4347 struct netdev_bonding_info bonding_info;
4348};
4349
4350void netdev_bonding_info_change(struct net_device *dev,
4351 struct netdev_bonding_info *bonding_info);
4352
4353static inline
4354struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4355{
4356 return __skb_gso_segment(skb, features, true);
4357}
4358__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4359
4360static inline bool can_checksum_protocol(netdev_features_t features,
4361 __be16 protocol)
4362{
4363 if (protocol == htons(ETH_P_FCOE))
4364 return !!(features & NETIF_F_FCOE_CRC);
4365
4366
4367
4368 if (features & NETIF_F_HW_CSUM) {
4369
4370 return true;
4371 }
4372
4373 switch (protocol) {
4374 case htons(ETH_P_IP):
4375 return !!(features & NETIF_F_IP_CSUM);
4376 case htons(ETH_P_IPV6):
4377 return !!(features & NETIF_F_IPV6_CSUM);
4378 default:
4379 return false;
4380 }
4381}
4382
4383#ifdef CONFIG_BUG
4384void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4385#else
4386static inline void netdev_rx_csum_fault(struct net_device *dev,
4387 struct sk_buff *skb)
4388{
4389}
4390#endif
4391
4392void net_enable_timestamp(void);
4393void net_disable_timestamp(void);
4394
4395#ifdef CONFIG_PROC_FS
4396int __init dev_proc_init(void);
4397#else
4398#define dev_proc_init() 0
4399#endif
4400
4401static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
4402 struct sk_buff *skb, struct net_device *dev,
4403 bool more)
4404{
4405 __this_cpu_write(softnet_data.xmit.more, more);
4406 return ops->ndo_start_xmit(skb, dev);
4407}
4408
4409static inline bool netdev_xmit_more(void)
4410{
4411 return __this_cpu_read(softnet_data.xmit.more);
4412}
4413
4414static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4415 struct netdev_queue *txq, bool more)
4416{
4417 const struct net_device_ops *ops = dev->netdev_ops;
4418 netdev_tx_t rc;
4419
4420 rc = __netdev_start_xmit(ops, skb, dev, more);
4421 if (rc == NETDEV_TX_OK)
4422 txq_trans_update(txq);
4423
4424 return rc;
4425}
4426
4427int netdev_class_create_file_ns(const struct class_attribute *class_attr,
4428 const void *ns);
4429void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
4430 const void *ns);
4431
4432static inline int netdev_class_create_file(const struct class_attribute *class_attr)
4433{
4434 return netdev_class_create_file_ns(class_attr, NULL);
4435}
4436
4437static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
4438{
4439 netdev_class_remove_file_ns(class_attr, NULL);
4440}
4441
4442extern const struct kobj_ns_type_operations net_ns_type_operations;
4443
4444const char *netdev_drivername(const struct net_device *dev);
4445
4446void linkwatch_run_queue(void);
4447
4448static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4449 netdev_features_t f2)
4450{
4451 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4452 if (f1 & NETIF_F_HW_CSUM)
4453 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4454 else
4455 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4456 }
4457
4458 return f1 & f2;
4459}
4460
4461static inline netdev_features_t netdev_get_wanted_features(
4462 struct net_device *dev)
4463{
4464 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4465}
4466netdev_features_t netdev_increment_features(netdev_features_t all,
4467 netdev_features_t one, netdev_features_t mask);
4468
4469
4470
4471
4472
4473static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4474 netdev_features_t mask)
4475{
4476 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4477}
4478
4479int __netdev_update_features(struct net_device *dev);
4480void netdev_update_features(struct net_device *dev);
4481void netdev_change_features(struct net_device *dev);
4482
4483void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4484 struct net_device *dev);
4485
4486netdev_features_t passthru_features_check(struct sk_buff *skb,
4487 struct net_device *dev,
4488 netdev_features_t features);
4489netdev_features_t netif_skb_features(struct sk_buff *skb);
4490
4491static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4492{
4493 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4494
4495
4496 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4497 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4498 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4499 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4500 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4501 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4502 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4503 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4504 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4505 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4506 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4507 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4508 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4509 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4510 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4511 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
4512 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
4513 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
4514
4515 return (features & feature) == feature;
4516}
4517
4518static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4519{
4520 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4521 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4522}
4523
4524static inline bool netif_needs_gso(struct sk_buff *skb,
4525 netdev_features_t features)
4526{
4527 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4528 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4529 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4530}
4531
4532static inline void netif_set_gso_max_size(struct net_device *dev,
4533 unsigned int size)
4534{
4535 dev->gso_max_size = size;
4536}
4537
4538static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4539 int pulled_hlen, u16 mac_offset,
4540 int mac_len)
4541{
4542 skb->protocol = protocol;
4543 skb->encapsulation = 1;
4544 skb_push(skb, pulled_hlen);
4545 skb_reset_transport_header(skb);
4546 skb->mac_header = mac_offset;
4547 skb->network_header = skb->mac_header + mac_len;
4548 skb->mac_len = mac_len;
4549}
4550
4551static inline bool netif_is_macsec(const struct net_device *dev)
4552{
4553 return dev->priv_flags & IFF_MACSEC;
4554}
4555
4556static inline bool netif_is_macvlan(const struct net_device *dev)
4557{
4558 return dev->priv_flags & IFF_MACVLAN;
4559}
4560
4561static inline bool netif_is_macvlan_port(const struct net_device *dev)
4562{
4563 return dev->priv_flags & IFF_MACVLAN_PORT;
4564}
4565
4566static inline bool netif_is_bond_master(const struct net_device *dev)
4567{
4568 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4569}
4570
4571static inline bool netif_is_bond_slave(const struct net_device *dev)
4572{
4573 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4574}
4575
4576static inline bool netif_supports_nofcs(struct net_device *dev)
4577{
4578 return dev->priv_flags & IFF_SUPP_NOFCS;
4579}
4580
4581static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
4582{
4583 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
4584}
4585
4586static inline bool netif_is_l3_master(const struct net_device *dev)
4587{
4588 return dev->priv_flags & IFF_L3MDEV_MASTER;
4589}
4590
4591static inline bool netif_is_l3_slave(const struct net_device *dev)
4592{
4593 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4594}
4595
4596static inline bool netif_is_bridge_master(const struct net_device *dev)
4597{
4598 return dev->priv_flags & IFF_EBRIDGE;
4599}
4600
4601static inline bool netif_is_bridge_port(const struct net_device *dev)
4602{
4603 return dev->priv_flags & IFF_BRIDGE_PORT;
4604}
4605
4606static inline bool netif_is_ovs_master(const struct net_device *dev)
4607{
4608 return dev->priv_flags & IFF_OPENVSWITCH;
4609}
4610
4611static inline bool netif_is_ovs_port(const struct net_device *dev)
4612{
4613 return dev->priv_flags & IFF_OVS_DATAPATH;
4614}
4615
4616static inline bool netif_is_team_master(const struct net_device *dev)
4617{
4618 return dev->priv_flags & IFF_TEAM;
4619}
4620
4621static inline bool netif_is_team_port(const struct net_device *dev)
4622{
4623 return dev->priv_flags & IFF_TEAM_PORT;
4624}
4625
4626static inline bool netif_is_lag_master(const struct net_device *dev)
4627{
4628 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4629}
4630
4631static inline bool netif_is_lag_port(const struct net_device *dev)
4632{
4633 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4634}
4635
4636static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4637{
4638 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4639}
4640
4641static inline bool netif_is_failover(const struct net_device *dev)
4642{
4643 return dev->priv_flags & IFF_FAILOVER;
4644}
4645
4646static inline bool netif_is_failover_slave(const struct net_device *dev)
4647{
4648 return dev->priv_flags & IFF_FAILOVER_SLAVE;
4649}
4650
4651
4652static inline void netif_keep_dst(struct net_device *dev)
4653{
4654 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4655}
4656
4657
4658static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4659{
4660
4661 return dev->priv_flags & IFF_MACSEC;
4662}
4663
4664extern struct pernet_operations __net_initdata loopback_net_ops;
4665
4666
4667
4668
4669
4670static inline const char *netdev_name(const struct net_device *dev)
4671{
4672 if (!dev->name[0] || strchr(dev->name, '%'))
4673 return "(unnamed net_device)";
4674 return dev->name;
4675}
4676
4677static inline bool netdev_unregistering(const struct net_device *dev)
4678{
4679 return dev->reg_state == NETREG_UNREGISTERING;
4680}
4681
4682static inline const char *netdev_reg_state(const struct net_device *dev)
4683{
4684 switch (dev->reg_state) {
4685 case NETREG_UNINITIALIZED: return " (uninitialized)";
4686 case NETREG_REGISTERED: return "";
4687 case NETREG_UNREGISTERING: return " (unregistering)";
4688 case NETREG_UNREGISTERED: return " (unregistered)";
4689 case NETREG_RELEASED: return " (released)";
4690 case NETREG_DUMMY: return " (dummy)";
4691 }
4692
4693 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4694 return " (unknown)";
4695}
4696
4697__printf(3, 4) __cold
4698void netdev_printk(const char *level, const struct net_device *dev,
4699 const char *format, ...);
4700__printf(2, 3) __cold
4701void netdev_emerg(const struct net_device *dev, const char *format, ...);
4702__printf(2, 3) __cold
4703void netdev_alert(const struct net_device *dev, const char *format, ...);
4704__printf(2, 3) __cold
4705void netdev_crit(const struct net_device *dev, const char *format, ...);
4706__printf(2, 3) __cold
4707void netdev_err(const struct net_device *dev, const char *format, ...);
4708__printf(2, 3) __cold
4709void netdev_warn(const struct net_device *dev, const char *format, ...);
4710__printf(2, 3) __cold
4711void netdev_notice(const struct net_device *dev, const char *format, ...);
4712__printf(2, 3) __cold
4713void netdev_info(const struct net_device *dev, const char *format, ...);
4714
4715#define netdev_level_once(level, dev, fmt, ...) \
4716do { \
4717 static bool __print_once __read_mostly; \
4718 \
4719 if (!__print_once) { \
4720 __print_once = true; \
4721 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
4722 } \
4723} while (0)
4724
4725#define netdev_emerg_once(dev, fmt, ...) \
4726 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
4727#define netdev_alert_once(dev, fmt, ...) \
4728 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
4729#define netdev_crit_once(dev, fmt, ...) \
4730 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
4731#define netdev_err_once(dev, fmt, ...) \
4732 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
4733#define netdev_warn_once(dev, fmt, ...) \
4734 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
4735#define netdev_notice_once(dev, fmt, ...) \
4736 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
4737#define netdev_info_once(dev, fmt, ...) \
4738 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
4739
4740#define MODULE_ALIAS_NETDEV(device) \
4741 MODULE_ALIAS("netdev-" device)
4742
4743#if defined(CONFIG_DYNAMIC_DEBUG)
4744#define netdev_dbg(__dev, format, args...) \
4745do { \
4746 dynamic_netdev_dbg(__dev, format, ##args); \
4747} while (0)
4748#elif defined(DEBUG)
4749#define netdev_dbg(__dev, format, args...) \
4750 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4751#else
4752#define netdev_dbg(__dev, format, args...) \
4753({ \
4754 if (0) \
4755 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4756})
4757#endif
4758
4759#if defined(VERBOSE_DEBUG)
4760#define netdev_vdbg netdev_dbg
4761#else
4762
4763#define netdev_vdbg(dev, format, args...) \
4764({ \
4765 if (0) \
4766 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4767 0; \
4768})
4769#endif
4770
4771
4772
4773
4774
4775
4776#define netdev_WARN(dev, format, args...) \
4777 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
4778 netdev_reg_state(dev), ##args)
4779
4780#define netdev_WARN_ONCE(dev, format, args...) \
4781 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
4782 netdev_reg_state(dev), ##args)
4783
4784
4785
4786#define netif_printk(priv, type, level, dev, fmt, args...) \
4787do { \
4788 if (netif_msg_##type(priv)) \
4789 netdev_printk(level, (dev), fmt, ##args); \
4790} while (0)
4791
4792#define netif_level(level, priv, type, dev, fmt, args...) \
4793do { \
4794 if (netif_msg_##type(priv)) \
4795 netdev_##level(dev, fmt, ##args); \
4796} while (0)
4797
4798#define netif_emerg(priv, type, dev, fmt, args...) \
4799 netif_level(emerg, priv, type, dev, fmt, ##args)
4800#define netif_alert(priv, type, dev, fmt, args...) \
4801 netif_level(alert, priv, type, dev, fmt, ##args)
4802#define netif_crit(priv, type, dev, fmt, args...) \
4803 netif_level(crit, priv, type, dev, fmt, ##args)
4804#define netif_err(priv, type, dev, fmt, args...) \
4805 netif_level(err, priv, type, dev, fmt, ##args)
4806#define netif_warn(priv, type, dev, fmt, args...) \
4807 netif_level(warn, priv, type, dev, fmt, ##args)
4808#define netif_notice(priv, type, dev, fmt, args...) \
4809 netif_level(notice, priv, type, dev, fmt, ##args)
4810#define netif_info(priv, type, dev, fmt, args...) \
4811 netif_level(info, priv, type, dev, fmt, ##args)
4812
4813#if defined(CONFIG_DYNAMIC_DEBUG)
4814#define netif_dbg(priv, type, netdev, format, args...) \
4815do { \
4816 if (netif_msg_##type(priv)) \
4817 dynamic_netdev_dbg(netdev, format, ##args); \
4818} while (0)
4819#elif defined(DEBUG)
4820#define netif_dbg(priv, type, dev, format, args...) \
4821 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4822#else
4823#define netif_dbg(priv, type, dev, format, args...) \
4824({ \
4825 if (0) \
4826 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4827 0; \
4828})
4829#endif
4830
4831
4832#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
4833 do { \
4834 if (cond) \
4835 netif_dbg(priv, type, netdev, fmt, ##args); \
4836 else \
4837 netif_ ## level(priv, type, netdev, fmt, ##args); \
4838 } while (0)
4839
4840#if defined(VERBOSE_DEBUG)
4841#define netif_vdbg netif_dbg
4842#else
4843#define netif_vdbg(priv, type, dev, format, args...) \
4844({ \
4845 if (0) \
4846 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4847 0; \
4848})
4849#endif
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870#define PTYPE_HASH_SIZE (16)
4871#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4872
4873extern struct net_device *blackhole_netdev;
4874
4875#endif
4876