1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/timer.h>
29#include <linux/bug.h>
30#include <linux/delay.h>
31#include <linux/atomic.h>
32#include <linux/prefetch.h>
33#include <asm/cache.h>
34#include <asm/byteorder.h>
35
36#include <linux/percpu.h>
37#include <linux/rculist.h>
38#include <linux/workqueue.h>
39#include <linux/dynamic_queue_limits.h>
40
41#include <linux/ethtool.h>
42#include <net/net_namespace.h>
43#ifdef CONFIG_DCB
44#include <net/dcbnl.h>
45#endif
46#include <net/netprio_cgroup.h>
47#include <net/xdp.h>
48
49#include <linux/netdev_features.h>
50#include <linux/neighbour.h>
51#include <uapi/linux/netdevice.h>
52#include <uapi/linux/if_bonding.h>
53#include <uapi/linux/pkt_cls.h>
54#include <linux/hashtable.h>
55
56struct netpoll_info;
57struct device;
58struct phy_device;
59struct dsa_port;
60
61struct sfp_bus;
62
63struct wireless_dev;
64
65struct wpan_dev;
66struct mpls_dev;
67
68struct udp_tunnel_info;
69struct bpf_prog;
70struct xdp_buff;
71
72void netdev_set_default_ethtool_ops(struct net_device *dev,
73 const struct ethtool_ops *ops);
74
75
76#define NET_RX_SUCCESS 0
77#define NET_RX_DROP 1
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97#define NET_XMIT_SUCCESS 0x00
98#define NET_XMIT_DROP 0x01
99#define NET_XMIT_CN 0x02
100#define NET_XMIT_MASK 0x0f
101
102
103
104
105#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
106#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
107
108
109#define NETDEV_TX_MASK 0xf0
110
111enum netdev_tx {
112 __NETDEV_TX_MIN = INT_MIN,
113 NETDEV_TX_OK = 0x00,
114 NETDEV_TX_BUSY = 0x10,
115};
116typedef enum netdev_tx netdev_tx_t;
117
118
119
120
121
122static inline bool dev_xmit_complete(int rc)
123{
124
125
126
127
128
129
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
136
137
138
139
140
141#if defined(CONFIG_HYPERV_NET)
142# define LL_MAX_HEADER 128
143#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
144# if defined(CONFIG_MAC80211_MESH)
145# define LL_MAX_HEADER 128
146# else
147# define LL_MAX_HEADER 96
148# endif
149#else
150# define LL_MAX_HEADER 32
151#endif
152
153#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
154 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
155#define MAX_HEADER LL_MAX_HEADER
156#else
157#define MAX_HEADER (LL_MAX_HEADER + 48)
158#endif
159
160
161
162
163
164
165struct net_device_stats {
166 unsigned long rx_packets;
167 unsigned long tx_packets;
168 unsigned long rx_bytes;
169 unsigned long tx_bytes;
170 unsigned long rx_errors;
171 unsigned long tx_errors;
172 unsigned long rx_dropped;
173 unsigned long tx_dropped;
174 unsigned long multicast;
175 unsigned long collisions;
176 unsigned long rx_length_errors;
177 unsigned long rx_over_errors;
178 unsigned long rx_crc_errors;
179 unsigned long rx_frame_errors;
180 unsigned long rx_fifo_errors;
181 unsigned long rx_missed_errors;
182 unsigned long tx_aborted_errors;
183 unsigned long tx_carrier_errors;
184 unsigned long tx_fifo_errors;
185 unsigned long tx_heartbeat_errors;
186 unsigned long tx_window_errors;
187 unsigned long rx_compressed;
188 unsigned long tx_compressed;
189};
190
191
192#include <linux/cache.h>
193#include <linux/skbuff.h>
194
195#ifdef CONFIG_RPS
196#include <linux/static_key.h>
197extern struct static_key rps_needed;
198extern struct static_key rfs_needed;
199#endif
200
201struct neighbour;
202struct neigh_parms;
203struct sk_buff;
204
205struct netdev_hw_addr {
206 struct list_head list;
207 unsigned char addr[MAX_ADDR_LEN];
208 unsigned char type;
209#define NETDEV_HW_ADDR_T_LAN 1
210#define NETDEV_HW_ADDR_T_SAN 2
211#define NETDEV_HW_ADDR_T_SLAVE 3
212#define NETDEV_HW_ADDR_T_UNICAST 4
213#define NETDEV_HW_ADDR_T_MULTICAST 5
214 bool global_use;
215 int sync_cnt;
216 int refcount;
217 int synced;
218 struct rcu_head rcu_head;
219};
220
221struct netdev_hw_addr_list {
222 struct list_head list;
223 int count;
224};
225
226#define netdev_hw_addr_list_count(l) ((l)->count)
227#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
228#define netdev_hw_addr_list_for_each(ha, l) \
229 list_for_each_entry(ha, &(l)->list, list)
230
231#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
232#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
233#define netdev_for_each_uc_addr(ha, dev) \
234 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
235
236#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
237#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
238#define netdev_for_each_mc_addr(ha, dev) \
239 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
240
241struct hh_cache {
242 unsigned int hh_len;
243 seqlock_t hh_lock;
244
245
246#define HH_DATA_MOD 16
247#define HH_DATA_OFF(__len) \
248 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
249#define HH_DATA_ALIGN(__len) \
250 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
251 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
252};
253
254
255
256
257
258
259
260
261
262#define LL_RESERVED_SPACE(dev) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
265 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
266
267struct header_ops {
268 int (*create) (struct sk_buff *skb, struct net_device *dev,
269 unsigned short type, const void *daddr,
270 const void *saddr, unsigned int len);
271 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
272 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
273 void (*cache_update)(struct hh_cache *hh,
274 const struct net_device *dev,
275 const unsigned char *haddr);
276 bool (*validate)(const char *ll_header, unsigned int len);
277 __be16 (*parse_protocol)(const struct sk_buff *skb);
278};
279
280
281
282
283
284
285enum netdev_state_t {
286 __LINK_STATE_START,
287 __LINK_STATE_PRESENT,
288 __LINK_STATE_NOCARRIER,
289 __LINK_STATE_LINKWATCH_PENDING,
290 __LINK_STATE_DORMANT,
291};
292
293
294
295
296
297
298struct netdev_boot_setup {
299 char name[IFNAMSIZ];
300 struct ifmap map;
301};
302#define NETDEV_BOOT_SETUP_MAX 8
303
304int __init netdev_boot_setup(char *str);
305
306struct gro_list {
307 struct list_head list;
308 int count;
309};
310
311
312
313
314
315#define GRO_HASH_BUCKETS 8
316
317
318
319
320struct napi_struct {
321
322
323
324
325
326
327 struct list_head poll_list;
328
329 unsigned long state;
330 int weight;
331 unsigned long gro_bitmask;
332 int (*poll)(struct napi_struct *, int);
333#ifdef CONFIG_NETPOLL
334 int poll_owner;
335#endif
336 struct net_device *dev;
337 struct gro_list gro_hash[GRO_HASH_BUCKETS];
338 struct sk_buff *skb;
339 struct hrtimer timer;
340 struct list_head dev_list;
341 struct hlist_node napi_hash_node;
342 unsigned int napi_id;
343};
344
345enum {
346 NAPI_STATE_SCHED,
347 NAPI_STATE_MISSED,
348 NAPI_STATE_DISABLE,
349 NAPI_STATE_NPSVC,
350 NAPI_STATE_HASHED,
351 NAPI_STATE_NO_BUSY_POLL,
352 NAPI_STATE_IN_BUSY_POLL,
353};
354
355enum {
356 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
357 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
358 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
359 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
360 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
361 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
362 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
363};
364
365enum gro_result {
366 GRO_MERGED,
367 GRO_MERGED_FREE,
368 GRO_HELD,
369 GRO_NORMAL,
370 GRO_DROP,
371 GRO_CONSUMED,
372};
373typedef enum gro_result gro_result_t;
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416enum rx_handler_result {
417 RX_HANDLER_CONSUMED,
418 RX_HANDLER_ANOTHER,
419 RX_HANDLER_EXACT,
420 RX_HANDLER_PASS,
421};
422typedef enum rx_handler_result rx_handler_result_t;
423typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
424
425void __napi_schedule(struct napi_struct *n);
426void __napi_schedule_irqoff(struct napi_struct *n);
427
428static inline bool napi_disable_pending(struct napi_struct *n)
429{
430 return test_bit(NAPI_STATE_DISABLE, &n->state);
431}
432
433bool napi_schedule_prep(struct napi_struct *n);
434
435
436
437
438
439
440
441
442static inline void napi_schedule(struct napi_struct *n)
443{
444 if (napi_schedule_prep(n))
445 __napi_schedule(n);
446}
447
448
449
450
451
452
453
454static inline void napi_schedule_irqoff(struct napi_struct *n)
455{
456 if (napi_schedule_prep(n))
457 __napi_schedule_irqoff(n);
458}
459
460
461static inline bool napi_reschedule(struct napi_struct *napi)
462{
463 if (napi_schedule_prep(napi)) {
464 __napi_schedule(napi);
465 return true;
466 }
467 return false;
468}
469
470bool napi_complete_done(struct napi_struct *n, int work_done);
471
472
473
474
475
476
477
478
479static inline bool napi_complete(struct napi_struct *n)
480{
481 return napi_complete_done(n, 0);
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496bool napi_hash_del(struct napi_struct *napi);
497
498
499
500
501
502
503
504
505void napi_disable(struct napi_struct *n);
506
507
508
509
510
511
512
513
514static inline void napi_enable(struct napi_struct *n)
515{
516 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
517 smp_mb__before_atomic();
518 clear_bit(NAPI_STATE_SCHED, &n->state);
519 clear_bit(NAPI_STATE_NPSVC, &n->state);
520}
521
522
523
524
525
526
527
528
529
530static inline void napi_synchronize(const struct napi_struct *n)
531{
532 if (IS_ENABLED(CONFIG_SMP))
533 while (test_bit(NAPI_STATE_SCHED, &n->state))
534 msleep(1);
535 else
536 barrier();
537}
538
539
540
541
542
543
544
545
546
547static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
548{
549 unsigned long val, new;
550
551 do {
552 val = READ_ONCE(n->state);
553 if (val & NAPIF_STATE_DISABLE)
554 return true;
555
556 if (!(val & NAPIF_STATE_SCHED))
557 return false;
558
559 new = val | NAPIF_STATE_MISSED;
560 } while (cmpxchg(&n->state, val, new) != val);
561
562 return true;
563}
564
565enum netdev_queue_state_t {
566 __QUEUE_STATE_DRV_XOFF,
567 __QUEUE_STATE_STACK_XOFF,
568 __QUEUE_STATE_FROZEN,
569};
570
571#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
572#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
573#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
574
575#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
576#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
577 QUEUE_STATE_FROZEN)
578#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
579 QUEUE_STATE_FROZEN)
580
581
582
583
584
585
586
587
588
589
590
591struct netdev_queue {
592
593
594
595 struct net_device *dev;
596 struct Qdisc __rcu *qdisc;
597 struct Qdisc *qdisc_sleeping;
598#ifdef CONFIG_SYSFS
599 struct kobject kobj;
600#endif
601#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
602 int numa_node;
603#endif
604 unsigned long tx_maxrate;
605
606
607
608
609 unsigned long trans_timeout;
610
611
612 struct net_device *sb_dev;
613#ifdef CONFIG_XDP_SOCKETS
614 struct xdp_umem *umem;
615#endif
616
617
618
619 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
620 int xmit_lock_owner;
621
622
623
624 unsigned long trans_start;
625
626 unsigned long state;
627
628#ifdef CONFIG_BQL
629 struct dql dql;
630#endif
631} ____cacheline_aligned_in_smp;
632
633extern int sysctl_fb_tunnels_only_for_init_net;
634extern int sysctl_devconf_inherit_init_net;
635
636static inline bool net_has_fallback_tunnels(const struct net *net)
637{
638 return net == &init_net ||
639 !IS_ENABLED(CONFIG_SYSCTL) ||
640 !sysctl_fb_tunnels_only_for_init_net;
641}
642
643static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
644{
645#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
646 return q->numa_node;
647#else
648 return NUMA_NO_NODE;
649#endif
650}
651
652static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
653{
654#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
655 q->numa_node = node;
656#endif
657}
658
659#ifdef CONFIG_RPS
660
661
662
663
664struct rps_map {
665 unsigned int len;
666 struct rcu_head rcu;
667 u16 cpus[0];
668};
669#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
670
671
672
673
674
675
676struct rps_dev_flow {
677 u16 cpu;
678 u16 filter;
679 unsigned int last_qtail;
680};
681#define RPS_NO_FILTER 0xffff
682
683
684
685
686struct rps_dev_flow_table {
687 unsigned int mask;
688 struct rcu_head rcu;
689 struct rps_dev_flow flows[0];
690};
691#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
692 ((_num) * sizeof(struct rps_dev_flow)))
693
694
695
696
697
698
699
700
701
702
703
704struct rps_sock_flow_table {
705 u32 mask;
706
707 u32 ents[0] ____cacheline_aligned_in_smp;
708};
709#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
710
711#define RPS_NO_CPU 0xffff
712
713extern u32 rps_cpu_mask;
714extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
715
716static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
717 u32 hash)
718{
719 if (table && hash) {
720 unsigned int index = hash & table->mask;
721 u32 val = hash & ~rps_cpu_mask;
722
723
724 val |= raw_smp_processor_id();
725
726 if (table->ents[index] != val)
727 table->ents[index] = val;
728 }
729}
730
731#ifdef CONFIG_RFS_ACCEL
732bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
733 u16 filter_id);
734#endif
735#endif
736
737
738struct netdev_rx_queue {
739#ifdef CONFIG_RPS
740 struct rps_map __rcu *rps_map;
741 struct rps_dev_flow_table __rcu *rps_flow_table;
742#endif
743 struct kobject kobj;
744 struct net_device *dev;
745 struct xdp_rxq_info xdp_rxq;
746#ifdef CONFIG_XDP_SOCKETS
747 struct xdp_umem *umem;
748#endif
749} ____cacheline_aligned_in_smp;
750
751
752
753
754struct rx_queue_attribute {
755 struct attribute attr;
756 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
757 ssize_t (*store)(struct netdev_rx_queue *queue,
758 const char *buf, size_t len);
759};
760
761#ifdef CONFIG_XPS
762
763
764
765
766struct xps_map {
767 unsigned int len;
768 unsigned int alloc_len;
769 struct rcu_head rcu;
770 u16 queues[0];
771};
772#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
773#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
774 - sizeof(struct xps_map)) / sizeof(u16))
775
776
777
778
779struct xps_dev_maps {
780 struct rcu_head rcu;
781 struct xps_map __rcu *attr_map[0];
782};
783
784#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
785 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
786
787#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
788 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
789
790#endif
791
792#define TC_MAX_QUEUE 16
793#define TC_BITMASK 15
794
795struct netdev_tc_txq {
796 u16 count;
797 u16 offset;
798};
799
800#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
801
802
803
804
805struct netdev_fcoe_hbainfo {
806 char manufacturer[64];
807 char serial_number[64];
808 char hardware_version[64];
809 char driver_version[64];
810 char optionrom_version[64];
811 char firmware_version[64];
812 char model[256];
813 char model_description[256];
814};
815#endif
816
817#define MAX_PHYS_ITEM_ID_LEN 32
818
819
820
821
822struct netdev_phys_item_id {
823 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
824 unsigned char id_len;
825};
826
827static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
828 struct netdev_phys_item_id *b)
829{
830 return a->id_len == b->id_len &&
831 memcmp(a->id, b->id, a->id_len) == 0;
832}
833
834typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
835 struct sk_buff *skb,
836 struct net_device *sb_dev);
837
838enum tc_setup_type {
839 TC_SETUP_QDISC_MQPRIO,
840 TC_SETUP_CLSU32,
841 TC_SETUP_CLSFLOWER,
842 TC_SETUP_CLSMATCHALL,
843 TC_SETUP_CLSBPF,
844 TC_SETUP_BLOCK,
845 TC_SETUP_QDISC_CBS,
846 TC_SETUP_QDISC_RED,
847 TC_SETUP_QDISC_PRIO,
848 TC_SETUP_QDISC_MQ,
849 TC_SETUP_QDISC_ETF,
850 TC_SETUP_ROOT_QDISC,
851 TC_SETUP_QDISC_GRED,
852};
853
854
855
856
857enum bpf_netdev_command {
858
859
860
861
862
863
864
865 XDP_SETUP_PROG,
866 XDP_SETUP_PROG_HW,
867 XDP_QUERY_PROG,
868 XDP_QUERY_PROG_HW,
869
870 BPF_OFFLOAD_MAP_ALLOC,
871 BPF_OFFLOAD_MAP_FREE,
872 XDP_SETUP_XSK_UMEM,
873};
874
875struct bpf_prog_offload_ops;
876struct netlink_ext_ack;
877struct xdp_umem;
878
879struct netdev_bpf {
880 enum bpf_netdev_command command;
881 union {
882
883 struct {
884 u32 flags;
885 struct bpf_prog *prog;
886 struct netlink_ext_ack *extack;
887 };
888
889 struct {
890 u32 prog_id;
891
892 u32 prog_flags;
893 };
894
895 struct {
896 struct bpf_offloaded_map *offmap;
897 };
898
899 struct {
900 struct xdp_umem *umem;
901 u16 queue_id;
902 } xsk;
903 };
904};
905
906#ifdef CONFIG_XFRM_OFFLOAD
907struct xfrmdev_ops {
908 int (*xdo_dev_state_add) (struct xfrm_state *x);
909 void (*xdo_dev_state_delete) (struct xfrm_state *x);
910 void (*xdo_dev_state_free) (struct xfrm_state *x);
911 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
912 struct xfrm_state *x);
913 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
914};
915#endif
916
917#if IS_ENABLED(CONFIG_TLS_DEVICE)
918enum tls_offload_ctx_dir {
919 TLS_OFFLOAD_CTX_DIR_RX,
920 TLS_OFFLOAD_CTX_DIR_TX,
921};
922
923struct tls_crypto_info;
924struct tls_context;
925
926struct tlsdev_ops {
927 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
928 enum tls_offload_ctx_dir direction,
929 struct tls_crypto_info *crypto_info,
930 u32 start_offload_tcp_sn);
931 void (*tls_dev_del)(struct net_device *netdev,
932 struct tls_context *ctx,
933 enum tls_offload_ctx_dir direction);
934 void (*tls_dev_resync_rx)(struct net_device *netdev,
935 struct sock *sk, u32 seq, u64 rcd_sn);
936};
937#endif
938
939struct dev_ifalias {
940 struct rcu_head rcuhead;
941 char ifalias[];
942};
943
944struct devlink;
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259struct net_device_ops {
1260 int (*ndo_init)(struct net_device *dev);
1261 void (*ndo_uninit)(struct net_device *dev);
1262 int (*ndo_open)(struct net_device *dev);
1263 int (*ndo_stop)(struct net_device *dev);
1264 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1265 struct net_device *dev);
1266 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1267 struct net_device *dev,
1268 netdev_features_t features);
1269 u16 (*ndo_select_queue)(struct net_device *dev,
1270 struct sk_buff *skb,
1271 struct net_device *sb_dev,
1272 select_queue_fallback_t fallback);
1273 void (*ndo_change_rx_flags)(struct net_device *dev,
1274 int flags);
1275 void (*ndo_set_rx_mode)(struct net_device *dev);
1276 int (*ndo_set_mac_address)(struct net_device *dev,
1277 void *addr);
1278 int (*ndo_validate_addr)(struct net_device *dev);
1279 int (*ndo_do_ioctl)(struct net_device *dev,
1280 struct ifreq *ifr, int cmd);
1281 int (*ndo_set_config)(struct net_device *dev,
1282 struct ifmap *map);
1283 int (*ndo_change_mtu)(struct net_device *dev,
1284 int new_mtu);
1285 int (*ndo_neigh_setup)(struct net_device *dev,
1286 struct neigh_parms *);
1287 void (*ndo_tx_timeout) (struct net_device *dev);
1288
1289 void (*ndo_get_stats64)(struct net_device *dev,
1290 struct rtnl_link_stats64 *storage);
1291 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1292 int (*ndo_get_offload_stats)(int attr_id,
1293 const struct net_device *dev,
1294 void *attr_data);
1295 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1296
1297 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1298 __be16 proto, u16 vid);
1299 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1300 __be16 proto, u16 vid);
1301#ifdef CONFIG_NET_POLL_CONTROLLER
1302 void (*ndo_poll_controller)(struct net_device *dev);
1303 int (*ndo_netpoll_setup)(struct net_device *dev,
1304 struct netpoll_info *info);
1305 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1306#endif
1307 int (*ndo_set_vf_mac)(struct net_device *dev,
1308 int queue, u8 *mac);
1309 int (*ndo_set_vf_vlan)(struct net_device *dev,
1310 int queue, u16 vlan,
1311 u8 qos, __be16 proto);
1312 int (*ndo_set_vf_rate)(struct net_device *dev,
1313 int vf, int min_tx_rate,
1314 int max_tx_rate);
1315 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1316 int vf, bool setting);
1317 int (*ndo_set_vf_trust)(struct net_device *dev,
1318 int vf, bool setting);
1319 int (*ndo_get_vf_config)(struct net_device *dev,
1320 int vf,
1321 struct ifla_vf_info *ivf);
1322 int (*ndo_set_vf_link_state)(struct net_device *dev,
1323 int vf, int link_state);
1324 int (*ndo_get_vf_stats)(struct net_device *dev,
1325 int vf,
1326 struct ifla_vf_stats
1327 *vf_stats);
1328 int (*ndo_set_vf_port)(struct net_device *dev,
1329 int vf,
1330 struct nlattr *port[]);
1331 int (*ndo_get_vf_port)(struct net_device *dev,
1332 int vf, struct sk_buff *skb);
1333 int (*ndo_set_vf_guid)(struct net_device *dev,
1334 int vf, u64 guid,
1335 int guid_type);
1336 int (*ndo_set_vf_rss_query_en)(
1337 struct net_device *dev,
1338 int vf, bool setting);
1339 int (*ndo_setup_tc)(struct net_device *dev,
1340 enum tc_setup_type type,
1341 void *type_data);
1342#if IS_ENABLED(CONFIG_FCOE)
1343 int (*ndo_fcoe_enable)(struct net_device *dev);
1344 int (*ndo_fcoe_disable)(struct net_device *dev);
1345 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1346 u16 xid,
1347 struct scatterlist *sgl,
1348 unsigned int sgc);
1349 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1350 u16 xid);
1351 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1352 u16 xid,
1353 struct scatterlist *sgl,
1354 unsigned int sgc);
1355 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1356 struct netdev_fcoe_hbainfo *hbainfo);
1357#endif
1358
1359#if IS_ENABLED(CONFIG_LIBFCOE)
1360#define NETDEV_FCOE_WWNN 0
1361#define NETDEV_FCOE_WWPN 1
1362 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1363 u64 *wwn, int type);
1364#endif
1365
1366#ifdef CONFIG_RFS_ACCEL
1367 int (*ndo_rx_flow_steer)(struct net_device *dev,
1368 const struct sk_buff *skb,
1369 u16 rxq_index,
1370 u32 flow_id);
1371#endif
1372 int (*ndo_add_slave)(struct net_device *dev,
1373 struct net_device *slave_dev,
1374 struct netlink_ext_ack *extack);
1375 int (*ndo_del_slave)(struct net_device *dev,
1376 struct net_device *slave_dev);
1377 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1378 netdev_features_t features);
1379 int (*ndo_set_features)(struct net_device *dev,
1380 netdev_features_t features);
1381 int (*ndo_neigh_construct)(struct net_device *dev,
1382 struct neighbour *n);
1383 void (*ndo_neigh_destroy)(struct net_device *dev,
1384 struct neighbour *n);
1385
1386 int (*ndo_fdb_add)(struct ndmsg *ndm,
1387 struct nlattr *tb[],
1388 struct net_device *dev,
1389 const unsigned char *addr,
1390 u16 vid,
1391 u16 flags,
1392 struct netlink_ext_ack *extack);
1393 int (*ndo_fdb_del)(struct ndmsg *ndm,
1394 struct nlattr *tb[],
1395 struct net_device *dev,
1396 const unsigned char *addr,
1397 u16 vid);
1398 int (*ndo_fdb_dump)(struct sk_buff *skb,
1399 struct netlink_callback *cb,
1400 struct net_device *dev,
1401 struct net_device *filter_dev,
1402 int *idx);
1403 int (*ndo_fdb_get)(struct sk_buff *skb,
1404 struct nlattr *tb[],
1405 struct net_device *dev,
1406 const unsigned char *addr,
1407 u16 vid, u32 portid, u32 seq,
1408 struct netlink_ext_ack *extack);
1409 int (*ndo_bridge_setlink)(struct net_device *dev,
1410 struct nlmsghdr *nlh,
1411 u16 flags,
1412 struct netlink_ext_ack *extack);
1413 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1414 u32 pid, u32 seq,
1415 struct net_device *dev,
1416 u32 filter_mask,
1417 int nlflags);
1418 int (*ndo_bridge_dellink)(struct net_device *dev,
1419 struct nlmsghdr *nlh,
1420 u16 flags);
1421 int (*ndo_change_carrier)(struct net_device *dev,
1422 bool new_carrier);
1423 int (*ndo_get_phys_port_id)(struct net_device *dev,
1424 struct netdev_phys_item_id *ppid);
1425 int (*ndo_get_port_parent_id)(struct net_device *dev,
1426 struct netdev_phys_item_id *ppid);
1427 int (*ndo_get_phys_port_name)(struct net_device *dev,
1428 char *name, size_t len);
1429 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1430 struct udp_tunnel_info *ti);
1431 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1432 struct udp_tunnel_info *ti);
1433 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1434 struct net_device *dev);
1435 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1436 void *priv);
1437
1438 int (*ndo_get_lock_subclass)(struct net_device *dev);
1439 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1440 int queue_index,
1441 u32 maxrate);
1442 int (*ndo_get_iflink)(const struct net_device *dev);
1443 int (*ndo_change_proto_down)(struct net_device *dev,
1444 bool proto_down);
1445 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1446 struct sk_buff *skb);
1447 void (*ndo_set_rx_headroom)(struct net_device *dev,
1448 int needed_headroom);
1449 int (*ndo_bpf)(struct net_device *dev,
1450 struct netdev_bpf *bpf);
1451 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1452 struct xdp_frame **xdp,
1453 u32 flags);
1454 int (*ndo_xsk_async_xmit)(struct net_device *dev,
1455 u32 queue_id);
1456 struct devlink * (*ndo_get_devlink)(struct net_device *dev);
1457};
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505enum netdev_priv_flags {
1506 IFF_802_1Q_VLAN = 1<<0,
1507 IFF_EBRIDGE = 1<<1,
1508 IFF_BONDING = 1<<2,
1509 IFF_ISATAP = 1<<3,
1510 IFF_WAN_HDLC = 1<<4,
1511 IFF_XMIT_DST_RELEASE = 1<<5,
1512 IFF_DONT_BRIDGE = 1<<6,
1513 IFF_DISABLE_NETPOLL = 1<<7,
1514 IFF_MACVLAN_PORT = 1<<8,
1515 IFF_BRIDGE_PORT = 1<<9,
1516 IFF_OVS_DATAPATH = 1<<10,
1517 IFF_TX_SKB_SHARING = 1<<11,
1518 IFF_UNICAST_FLT = 1<<12,
1519 IFF_TEAM_PORT = 1<<13,
1520 IFF_SUPP_NOFCS = 1<<14,
1521 IFF_LIVE_ADDR_CHANGE = 1<<15,
1522 IFF_MACVLAN = 1<<16,
1523 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1524 IFF_L3MDEV_MASTER = 1<<18,
1525 IFF_NO_QUEUE = 1<<19,
1526 IFF_OPENVSWITCH = 1<<20,
1527 IFF_L3MDEV_SLAVE = 1<<21,
1528 IFF_TEAM = 1<<22,
1529 IFF_RXFH_CONFIGURED = 1<<23,
1530 IFF_PHONY_HEADROOM = 1<<24,
1531 IFF_MACSEC = 1<<25,
1532 IFF_NO_RX_HANDLER = 1<<26,
1533 IFF_FAILOVER = 1<<27,
1534 IFF_FAILOVER_SLAVE = 1<<28,
1535 IFF_L3MDEV_RX_HANDLER = 1<<29,
1536 IFF_LIVE_RENAME_OK = 1<<30,
1537};
1538
1539#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1540#define IFF_EBRIDGE IFF_EBRIDGE
1541#define IFF_BONDING IFF_BONDING
1542#define IFF_ISATAP IFF_ISATAP
1543#define IFF_WAN_HDLC IFF_WAN_HDLC
1544#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1545#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1546#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1547#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1548#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1549#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1550#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1551#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1552#define IFF_TEAM_PORT IFF_TEAM_PORT
1553#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1554#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1555#define IFF_MACVLAN IFF_MACVLAN
1556#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1557#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1558#define IFF_NO_QUEUE IFF_NO_QUEUE
1559#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1560#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1561#define IFF_TEAM IFF_TEAM
1562#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1563#define IFF_MACSEC IFF_MACSEC
1564#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1565#define IFF_FAILOVER IFF_FAILOVER
1566#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1567#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1568#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789struct net_device {
1790 char name[IFNAMSIZ];
1791 struct hlist_node name_hlist;
1792 struct dev_ifalias __rcu *ifalias;
1793
1794
1795
1796
1797 unsigned long mem_end;
1798 unsigned long mem_start;
1799 unsigned long base_addr;
1800 int irq;
1801
1802
1803
1804
1805
1806
1807
1808 unsigned long state;
1809
1810 struct list_head dev_list;
1811 struct list_head napi_list;
1812 struct list_head unreg_list;
1813 struct list_head close_list;
1814 struct list_head ptype_all;
1815 struct list_head ptype_specific;
1816
1817 struct {
1818 struct list_head upper;
1819 struct list_head lower;
1820 } adj_list;
1821
1822 netdev_features_t features;
1823 netdev_features_t hw_features;
1824 netdev_features_t wanted_features;
1825 netdev_features_t vlan_features;
1826 netdev_features_t hw_enc_features;
1827 netdev_features_t mpls_features;
1828 netdev_features_t gso_partial_features;
1829
1830 int ifindex;
1831 int group;
1832
1833 struct net_device_stats stats;
1834
1835 atomic_long_t rx_dropped;
1836 atomic_long_t tx_dropped;
1837 atomic_long_t rx_nohandler;
1838
1839
1840 atomic_t carrier_up_count;
1841 atomic_t carrier_down_count;
1842
1843#ifdef CONFIG_WIRELESS_EXT
1844 const struct iw_handler_def *wireless_handlers;
1845 struct iw_public_data *wireless_data;
1846#endif
1847 const struct net_device_ops *netdev_ops;
1848 const struct ethtool_ops *ethtool_ops;
1849#ifdef CONFIG_NET_L3_MASTER_DEV
1850 const struct l3mdev_ops *l3mdev_ops;
1851#endif
1852#if IS_ENABLED(CONFIG_IPV6)
1853 const struct ndisc_ops *ndisc_ops;
1854#endif
1855
1856#ifdef CONFIG_XFRM_OFFLOAD
1857 const struct xfrmdev_ops *xfrmdev_ops;
1858#endif
1859
1860#if IS_ENABLED(CONFIG_TLS_DEVICE)
1861 const struct tlsdev_ops *tlsdev_ops;
1862#endif
1863
1864 const struct header_ops *header_ops;
1865
1866 unsigned int flags;
1867 unsigned int priv_flags;
1868
1869 unsigned short gflags;
1870 unsigned short padded;
1871
1872 unsigned char operstate;
1873 unsigned char link_mode;
1874
1875 unsigned char if_port;
1876 unsigned char dma;
1877
1878 unsigned int mtu;
1879 unsigned int min_mtu;
1880 unsigned int max_mtu;
1881 unsigned short type;
1882 unsigned short hard_header_len;
1883 unsigned char min_header_len;
1884
1885 unsigned short needed_headroom;
1886 unsigned short needed_tailroom;
1887
1888
1889 unsigned char perm_addr[MAX_ADDR_LEN];
1890 unsigned char addr_assign_type;
1891 unsigned char addr_len;
1892 unsigned short neigh_priv_len;
1893 unsigned short dev_id;
1894 unsigned short dev_port;
1895 spinlock_t addr_list_lock;
1896 unsigned char name_assign_type;
1897 bool uc_promisc;
1898 struct netdev_hw_addr_list uc;
1899 struct netdev_hw_addr_list mc;
1900 struct netdev_hw_addr_list dev_addrs;
1901
1902#ifdef CONFIG_SYSFS
1903 struct kset *queues_kset;
1904#endif
1905 unsigned int promiscuity;
1906 unsigned int allmulti;
1907
1908
1909
1910
1911#if IS_ENABLED(CONFIG_VLAN_8021Q)
1912 struct vlan_info __rcu *vlan_info;
1913#endif
1914#if IS_ENABLED(CONFIG_NET_DSA)
1915 struct dsa_port *dsa_ptr;
1916#endif
1917#if IS_ENABLED(CONFIG_TIPC)
1918 struct tipc_bearer __rcu *tipc_ptr;
1919#endif
1920#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
1921 void *atalk_ptr;
1922#endif
1923 struct in_device __rcu *ip_ptr;
1924#if IS_ENABLED(CONFIG_DECNET)
1925 struct dn_dev __rcu *dn_ptr;
1926#endif
1927 struct inet6_dev __rcu *ip6_ptr;
1928#if IS_ENABLED(CONFIG_AX25)
1929 void *ax25_ptr;
1930#endif
1931 struct wireless_dev *ieee80211_ptr;
1932 struct wpan_dev *ieee802154_ptr;
1933#if IS_ENABLED(CONFIG_MPLS_ROUTING)
1934 struct mpls_dev __rcu *mpls_ptr;
1935#endif
1936
1937
1938
1939
1940
1941 unsigned char *dev_addr;
1942
1943 struct netdev_rx_queue *_rx;
1944 unsigned int num_rx_queues;
1945 unsigned int real_num_rx_queues;
1946
1947 struct bpf_prog __rcu *xdp_prog;
1948 unsigned long gro_flush_timeout;
1949 rx_handler_func_t __rcu *rx_handler;
1950 void __rcu *rx_handler_data;
1951
1952#ifdef CONFIG_NET_CLS_ACT
1953 struct mini_Qdisc __rcu *miniq_ingress;
1954#endif
1955 struct netdev_queue __rcu *ingress_queue;
1956#ifdef CONFIG_NETFILTER_INGRESS
1957 struct nf_hook_entries __rcu *nf_hooks_ingress;
1958#endif
1959
1960 unsigned char broadcast[MAX_ADDR_LEN];
1961#ifdef CONFIG_RFS_ACCEL
1962 struct cpu_rmap *rx_cpu_rmap;
1963#endif
1964 struct hlist_node index_hlist;
1965
1966
1967
1968
1969 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1970 unsigned int num_tx_queues;
1971 unsigned int real_num_tx_queues;
1972 struct Qdisc *qdisc;
1973#ifdef CONFIG_NET_SCHED
1974 DECLARE_HASHTABLE (qdisc_hash, 4);
1975#endif
1976 unsigned int tx_queue_len;
1977 spinlock_t tx_global_lock;
1978 int watchdog_timeo;
1979
1980#ifdef CONFIG_XPS
1981 struct xps_dev_maps __rcu *xps_cpus_map;
1982 struct xps_dev_maps __rcu *xps_rxqs_map;
1983#endif
1984#ifdef CONFIG_NET_CLS_ACT
1985 struct mini_Qdisc __rcu *miniq_egress;
1986#endif
1987
1988
1989 struct timer_list watchdog_timer;
1990
1991 int __percpu *pcpu_refcnt;
1992 struct list_head todo_list;
1993
1994 struct list_head link_watch_list;
1995
1996 enum { NETREG_UNINITIALIZED=0,
1997 NETREG_REGISTERED,
1998 NETREG_UNREGISTERING,
1999 NETREG_UNREGISTERED,
2000 NETREG_RELEASED,
2001 NETREG_DUMMY,
2002 } reg_state:8;
2003
2004 bool dismantle;
2005
2006 enum {
2007 RTNL_LINK_INITIALIZED,
2008 RTNL_LINK_INITIALIZING,
2009 } rtnl_link_state:16;
2010
2011 bool needs_free_netdev;
2012 void (*priv_destructor)(struct net_device *dev);
2013
2014#ifdef CONFIG_NETPOLL
2015 struct netpoll_info __rcu *npinfo;
2016#endif
2017
2018 possible_net_t nd_net;
2019
2020
2021 union {
2022 void *ml_priv;
2023 struct pcpu_lstats __percpu *lstats;
2024 struct pcpu_sw_netstats __percpu *tstats;
2025 struct pcpu_dstats __percpu *dstats;
2026 };
2027
2028#if IS_ENABLED(CONFIG_GARP)
2029 struct garp_port __rcu *garp_port;
2030#endif
2031#if IS_ENABLED(CONFIG_MRP)
2032 struct mrp_port __rcu *mrp_port;
2033#endif
2034
2035 struct device dev;
2036 const struct attribute_group *sysfs_groups[4];
2037 const struct attribute_group *sysfs_rx_queue_group;
2038
2039 const struct rtnl_link_ops *rtnl_link_ops;
2040
2041
2042#define GSO_MAX_SIZE 65536
2043 unsigned int gso_max_size;
2044#define GSO_MAX_SEGS 65535
2045 u16 gso_max_segs;
2046
2047#ifdef CONFIG_DCB
2048 const struct dcbnl_rtnl_ops *dcbnl_ops;
2049#endif
2050 s16 num_tc;
2051 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2052 u8 prio_tc_map[TC_BITMASK + 1];
2053
2054#if IS_ENABLED(CONFIG_FCOE)
2055 unsigned int fcoe_ddp_xid;
2056#endif
2057#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2058 struct netprio_map __rcu *priomap;
2059#endif
2060 struct phy_device *phydev;
2061 struct sfp_bus *sfp_bus;
2062 struct lock_class_key *qdisc_tx_busylock;
2063 struct lock_class_key *qdisc_running_key;
2064 bool proto_down;
2065 unsigned wol_enabled:1;
2066};
2067#define to_net_dev(d) container_of(d, struct net_device, dev)
2068
2069static inline bool netif_elide_gro(const struct net_device *dev)
2070{
2071 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2072 return true;
2073 return false;
2074}
2075
2076#define NETDEV_ALIGN 32
2077
2078static inline
2079int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2080{
2081 return dev->prio_tc_map[prio & TC_BITMASK];
2082}
2083
2084static inline
2085int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2086{
2087 if (tc >= dev->num_tc)
2088 return -EINVAL;
2089
2090 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2091 return 0;
2092}
2093
2094int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2095void netdev_reset_tc(struct net_device *dev);
2096int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2097int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2098
2099static inline
2100int netdev_get_num_tc(struct net_device *dev)
2101{
2102 return dev->num_tc;
2103}
2104
2105void netdev_unbind_sb_channel(struct net_device *dev,
2106 struct net_device *sb_dev);
2107int netdev_bind_sb_channel_queue(struct net_device *dev,
2108 struct net_device *sb_dev,
2109 u8 tc, u16 count, u16 offset);
2110int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2111static inline int netdev_get_sb_channel(struct net_device *dev)
2112{
2113 return max_t(int, -dev->num_tc, 0);
2114}
2115
2116static inline
2117struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2118 unsigned int index)
2119{
2120 return &dev->_tx[index];
2121}
2122
2123static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2124 const struct sk_buff *skb)
2125{
2126 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2127}
2128
2129static inline void netdev_for_each_tx_queue(struct net_device *dev,
2130 void (*f)(struct net_device *,
2131 struct netdev_queue *,
2132 void *),
2133 void *arg)
2134{
2135 unsigned int i;
2136
2137 for (i = 0; i < dev->num_tx_queues; i++)
2138 f(dev, &dev->_tx[i], arg);
2139}
2140
2141#define netdev_lockdep_set_classes(dev) \
2142{ \
2143 static struct lock_class_key qdisc_tx_busylock_key; \
2144 static struct lock_class_key qdisc_running_key; \
2145 static struct lock_class_key qdisc_xmit_lock_key; \
2146 static struct lock_class_key dev_addr_list_lock_key; \
2147 unsigned int i; \
2148 \
2149 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2150 (dev)->qdisc_running_key = &qdisc_running_key; \
2151 lockdep_set_class(&(dev)->addr_list_lock, \
2152 &dev_addr_list_lock_key); \
2153 for (i = 0; i < (dev)->num_tx_queues; i++) \
2154 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2155 &qdisc_xmit_lock_key); \
2156}
2157
2158struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2159 struct sk_buff *skb,
2160 struct net_device *sb_dev);
2161
2162
2163
2164
2165static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2166{
2167 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2168}
2169
2170static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2171{
2172 if (dev->netdev_ops->ndo_set_rx_headroom)
2173 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2174}
2175
2176
2177static inline void netdev_reset_rx_headroom(struct net_device *dev)
2178{
2179 netdev_set_rx_headroom(dev, -1);
2180}
2181
2182
2183
2184
2185static inline
2186struct net *dev_net(const struct net_device *dev)
2187{
2188 return read_pnet(&dev->nd_net);
2189}
2190
2191static inline
2192void dev_net_set(struct net_device *dev, struct net *net)
2193{
2194 write_pnet(&dev->nd_net, net);
2195}
2196
2197
2198
2199
2200
2201
2202
2203static inline void *netdev_priv(const struct net_device *dev)
2204{
2205 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2206}
2207
2208
2209
2210
2211#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2212
2213
2214
2215
2216
2217#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2218
2219
2220
2221
2222#define NAPI_POLL_WEIGHT 64
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2235 int (*poll)(struct napi_struct *, int), int weight);
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248static inline void netif_tx_napi_add(struct net_device *dev,
2249 struct napi_struct *napi,
2250 int (*poll)(struct napi_struct *, int),
2251 int weight)
2252{
2253 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2254 netif_napi_add(dev, napi, poll, weight);
2255}
2256
2257
2258
2259
2260
2261
2262
2263void netif_napi_del(struct napi_struct *napi);
2264
2265struct napi_gro_cb {
2266
2267 void *frag0;
2268
2269
2270 unsigned int frag0_len;
2271
2272
2273 int data_offset;
2274
2275
2276 u16 flush;
2277
2278
2279 u16 flush_id;
2280
2281
2282 u16 count;
2283
2284
2285 u16 gro_remcsum_start;
2286
2287
2288 unsigned long age;
2289
2290
2291 u16 proto;
2292
2293
2294 u8 same_flow:1;
2295
2296
2297 u8 encap_mark:1;
2298
2299
2300 u8 csum_valid:1;
2301
2302
2303 u8 csum_cnt:3;
2304
2305
2306 u8 free:2;
2307#define NAPI_GRO_FREE 1
2308#define NAPI_GRO_FREE_STOLEN_HEAD 2
2309
2310
2311 u8 is_ipv6:1;
2312
2313
2314 u8 is_fou:1;
2315
2316
2317 u8 is_atomic:1;
2318
2319
2320 u8 recursion_counter:4;
2321
2322
2323
2324
2325 __wsum csum;
2326
2327
2328 struct sk_buff *last;
2329};
2330
2331#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2332
2333#define GRO_RECURSION_LIMIT 15
2334static inline int gro_recursion_inc_test(struct sk_buff *skb)
2335{
2336 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2337}
2338
2339typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2340static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2341 struct list_head *head,
2342 struct sk_buff *skb)
2343{
2344 if (unlikely(gro_recursion_inc_test(skb))) {
2345 NAPI_GRO_CB(skb)->flush |= 1;
2346 return NULL;
2347 }
2348
2349 return cb(head, skb);
2350}
2351
2352typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2353 struct sk_buff *);
2354static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2355 struct sock *sk,
2356 struct list_head *head,
2357 struct sk_buff *skb)
2358{
2359 if (unlikely(gro_recursion_inc_test(skb))) {
2360 NAPI_GRO_CB(skb)->flush |= 1;
2361 return NULL;
2362 }
2363
2364 return cb(sk, head, skb);
2365}
2366
2367struct packet_type {
2368 __be16 type;
2369 bool ignore_outgoing;
2370 struct net_device *dev;
2371 int (*func) (struct sk_buff *,
2372 struct net_device *,
2373 struct packet_type *,
2374 struct net_device *);
2375 void (*list_func) (struct list_head *,
2376 struct packet_type *,
2377 struct net_device *);
2378 bool (*id_match)(struct packet_type *ptype,
2379 struct sock *sk);
2380 void *af_packet_priv;
2381 struct list_head list;
2382};
2383
2384struct offload_callbacks {
2385 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2386 netdev_features_t features);
2387 struct sk_buff *(*gro_receive)(struct list_head *head,
2388 struct sk_buff *skb);
2389 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2390};
2391
2392struct packet_offload {
2393 __be16 type;
2394 u16 priority;
2395 struct offload_callbacks callbacks;
2396 struct list_head list;
2397};
2398
2399
2400struct pcpu_sw_netstats {
2401 u64 rx_packets;
2402 u64 rx_bytes;
2403 u64 tx_packets;
2404 u64 tx_bytes;
2405 struct u64_stats_sync syncp;
2406} __aligned(4 * sizeof(u64));
2407
2408struct pcpu_lstats {
2409 u64 packets;
2410 u64 bytes;
2411 struct u64_stats_sync syncp;
2412} __aligned(2 * sizeof(u64));
2413
2414#define __netdev_alloc_pcpu_stats(type, gfp) \
2415({ \
2416 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2417 if (pcpu_stats) { \
2418 int __cpu; \
2419 for_each_possible_cpu(__cpu) { \
2420 typeof(type) *stat; \
2421 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2422 u64_stats_init(&stat->syncp); \
2423 } \
2424 } \
2425 pcpu_stats; \
2426})
2427
2428#define netdev_alloc_pcpu_stats(type) \
2429 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2430
2431enum netdev_lag_tx_type {
2432 NETDEV_LAG_TX_TYPE_UNKNOWN,
2433 NETDEV_LAG_TX_TYPE_RANDOM,
2434 NETDEV_LAG_TX_TYPE_BROADCAST,
2435 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2436 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2437 NETDEV_LAG_TX_TYPE_HASH,
2438};
2439
2440enum netdev_lag_hash {
2441 NETDEV_LAG_HASH_NONE,
2442 NETDEV_LAG_HASH_L2,
2443 NETDEV_LAG_HASH_L34,
2444 NETDEV_LAG_HASH_L23,
2445 NETDEV_LAG_HASH_E23,
2446 NETDEV_LAG_HASH_E34,
2447 NETDEV_LAG_HASH_UNKNOWN,
2448};
2449
2450struct netdev_lag_upper_info {
2451 enum netdev_lag_tx_type tx_type;
2452 enum netdev_lag_hash hash_type;
2453};
2454
2455struct netdev_lag_lower_state_info {
2456 u8 link_up : 1,
2457 tx_enabled : 1;
2458};
2459
2460#include <linux/notifier.h>
2461
2462
2463
2464
2465
2466enum netdev_cmd {
2467 NETDEV_UP = 1,
2468 NETDEV_DOWN,
2469 NETDEV_REBOOT,
2470
2471
2472
2473 NETDEV_CHANGE,
2474 NETDEV_REGISTER,
2475 NETDEV_UNREGISTER,
2476 NETDEV_CHANGEMTU,
2477 NETDEV_CHANGEADDR,
2478 NETDEV_PRE_CHANGEADDR,
2479 NETDEV_GOING_DOWN,
2480 NETDEV_CHANGENAME,
2481 NETDEV_FEAT_CHANGE,
2482 NETDEV_BONDING_FAILOVER,
2483 NETDEV_PRE_UP,
2484 NETDEV_PRE_TYPE_CHANGE,
2485 NETDEV_POST_TYPE_CHANGE,
2486 NETDEV_POST_INIT,
2487 NETDEV_RELEASE,
2488 NETDEV_NOTIFY_PEERS,
2489 NETDEV_JOIN,
2490 NETDEV_CHANGEUPPER,
2491 NETDEV_RESEND_IGMP,
2492 NETDEV_PRECHANGEMTU,
2493 NETDEV_CHANGEINFODATA,
2494 NETDEV_BONDING_INFO,
2495 NETDEV_PRECHANGEUPPER,
2496 NETDEV_CHANGELOWERSTATE,
2497 NETDEV_UDP_TUNNEL_PUSH_INFO,
2498 NETDEV_UDP_TUNNEL_DROP_INFO,
2499 NETDEV_CHANGE_TX_QUEUE_LEN,
2500 NETDEV_CVLAN_FILTER_PUSH_INFO,
2501 NETDEV_CVLAN_FILTER_DROP_INFO,
2502 NETDEV_SVLAN_FILTER_PUSH_INFO,
2503 NETDEV_SVLAN_FILTER_DROP_INFO,
2504};
2505const char *netdev_cmd_to_name(enum netdev_cmd cmd);
2506
2507int register_netdevice_notifier(struct notifier_block *nb);
2508int unregister_netdevice_notifier(struct notifier_block *nb);
2509
2510struct netdev_notifier_info {
2511 struct net_device *dev;
2512 struct netlink_ext_ack *extack;
2513};
2514
2515struct netdev_notifier_info_ext {
2516 struct netdev_notifier_info info;
2517 union {
2518 u32 mtu;
2519 } ext;
2520};
2521
2522struct netdev_notifier_change_info {
2523 struct netdev_notifier_info info;
2524 unsigned int flags_changed;
2525};
2526
2527struct netdev_notifier_changeupper_info {
2528 struct netdev_notifier_info info;
2529 struct net_device *upper_dev;
2530 bool master;
2531 bool linking;
2532 void *upper_info;
2533};
2534
2535struct netdev_notifier_changelowerstate_info {
2536 struct netdev_notifier_info info;
2537 void *lower_state_info;
2538};
2539
2540struct netdev_notifier_pre_changeaddr_info {
2541 struct netdev_notifier_info info;
2542 const unsigned char *dev_addr;
2543};
2544
2545static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2546 struct net_device *dev)
2547{
2548 info->dev = dev;
2549 info->extack = NULL;
2550}
2551
2552static inline struct net_device *
2553netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2554{
2555 return info->dev;
2556}
2557
2558static inline struct netlink_ext_ack *
2559netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2560{
2561 return info->extack;
2562}
2563
2564int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2565
2566
2567extern rwlock_t dev_base_lock;
2568
2569#define for_each_netdev(net, d) \
2570 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2571#define for_each_netdev_reverse(net, d) \
2572 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2573#define for_each_netdev_rcu(net, d) \
2574 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2575#define for_each_netdev_safe(net, d, n) \
2576 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2577#define for_each_netdev_continue(net, d) \
2578 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2579#define for_each_netdev_continue_rcu(net, d) \
2580 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2581#define for_each_netdev_in_bond_rcu(bond, slave) \
2582 for_each_netdev_rcu(&init_net, slave) \
2583 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2584#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2585
2586static inline struct net_device *next_net_device(struct net_device *dev)
2587{
2588 struct list_head *lh;
2589 struct net *net;
2590
2591 net = dev_net(dev);
2592 lh = dev->dev_list.next;
2593 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2594}
2595
2596static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2597{
2598 struct list_head *lh;
2599 struct net *net;
2600
2601 net = dev_net(dev);
2602 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2603 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2604}
2605
2606static inline struct net_device *first_net_device(struct net *net)
2607{
2608 return list_empty(&net->dev_base_head) ? NULL :
2609 net_device_entry(net->dev_base_head.next);
2610}
2611
2612static inline struct net_device *first_net_device_rcu(struct net *net)
2613{
2614 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2615
2616 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2617}
2618
2619int netdev_boot_setup_check(struct net_device *dev);
2620unsigned long netdev_boot_base(const char *prefix, int unit);
2621struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2622 const char *hwaddr);
2623struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2624struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2625void dev_add_pack(struct packet_type *pt);
2626void dev_remove_pack(struct packet_type *pt);
2627void __dev_remove_pack(struct packet_type *pt);
2628void dev_add_offload(struct packet_offload *po);
2629void dev_remove_offload(struct packet_offload *po);
2630
2631int dev_get_iflink(const struct net_device *dev);
2632int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2633struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2634 unsigned short mask);
2635struct net_device *dev_get_by_name(struct net *net, const char *name);
2636struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2637struct net_device *__dev_get_by_name(struct net *net, const char *name);
2638int dev_alloc_name(struct net_device *dev, const char *name);
2639int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
2640void dev_close(struct net_device *dev);
2641void dev_close_many(struct list_head *head, bool unlink);
2642void dev_disable_lro(struct net_device *dev);
2643int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2644u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2645 struct net_device *sb_dev,
2646 select_queue_fallback_t fallback);
2647u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2648 struct net_device *sb_dev,
2649 select_queue_fallback_t fallback);
2650int dev_queue_xmit(struct sk_buff *skb);
2651int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2652int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2653int register_netdevice(struct net_device *dev);
2654void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2655void unregister_netdevice_many(struct list_head *head);
2656static inline void unregister_netdevice(struct net_device *dev)
2657{
2658 unregister_netdevice_queue(dev, NULL);
2659}
2660
2661int netdev_refcnt_read(const struct net_device *dev);
2662void free_netdev(struct net_device *dev);
2663void netdev_freemem(struct net_device *dev);
2664void synchronize_net(void);
2665int init_dummy_netdev(struct net_device *dev);
2666
2667DECLARE_PER_CPU(int, xmit_recursion);
2668#define XMIT_RECURSION_LIMIT 10
2669
2670static inline int dev_recursion_level(void)
2671{
2672 return this_cpu_read(xmit_recursion);
2673}
2674
2675struct net_device *dev_get_by_index(struct net *net, int ifindex);
2676struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2677struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2678struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2679int netdev_get_name(struct net *net, char *name, int ifindex);
2680int dev_restart(struct net_device *dev);
2681int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2682
2683static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2684{
2685 return NAPI_GRO_CB(skb)->data_offset;
2686}
2687
2688static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2689{
2690 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2691}
2692
2693static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2694{
2695 NAPI_GRO_CB(skb)->data_offset += len;
2696}
2697
2698static inline void *skb_gro_header_fast(struct sk_buff *skb,
2699 unsigned int offset)
2700{
2701 return NAPI_GRO_CB(skb)->frag0 + offset;
2702}
2703
2704static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2705{
2706 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2707}
2708
2709static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2710{
2711 NAPI_GRO_CB(skb)->frag0 = NULL;
2712 NAPI_GRO_CB(skb)->frag0_len = 0;
2713}
2714
2715static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2716 unsigned int offset)
2717{
2718 if (!pskb_may_pull(skb, hlen))
2719 return NULL;
2720
2721 skb_gro_frag0_invalidate(skb);
2722 return skb->data + offset;
2723}
2724
2725static inline void *skb_gro_network_header(struct sk_buff *skb)
2726{
2727 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2728 skb_network_offset(skb);
2729}
2730
2731static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2732 const void *start, unsigned int len)
2733{
2734 if (NAPI_GRO_CB(skb)->csum_valid)
2735 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2736 csum_partial(start, len, 0));
2737}
2738
2739
2740
2741
2742
2743
2744__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2745
2746static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2747{
2748 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2749}
2750
2751static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2752 bool zero_okay,
2753 __sum16 check)
2754{
2755 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2756 skb_checksum_start_offset(skb) <
2757 skb_gro_offset(skb)) &&
2758 !skb_at_gro_remcsum_start(skb) &&
2759 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2760 (!zero_okay || check));
2761}
2762
2763static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2764 __wsum psum)
2765{
2766 if (NAPI_GRO_CB(skb)->csum_valid &&
2767 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2768 return 0;
2769
2770 NAPI_GRO_CB(skb)->csum = psum;
2771
2772 return __skb_gro_checksum_complete(skb);
2773}
2774
2775static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2776{
2777 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2778
2779 NAPI_GRO_CB(skb)->csum_cnt--;
2780 } else {
2781
2782
2783
2784
2785 __skb_incr_checksum_unnecessary(skb);
2786 }
2787}
2788
2789#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2790 compute_pseudo) \
2791({ \
2792 __sum16 __ret = 0; \
2793 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2794 __ret = __skb_gro_checksum_validate_complete(skb, \
2795 compute_pseudo(skb, proto)); \
2796 if (!__ret) \
2797 skb_gro_incr_csum_unnecessary(skb); \
2798 __ret; \
2799})
2800
2801#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2802 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2803
2804#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2805 compute_pseudo) \
2806 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2807
2808#define skb_gro_checksum_simple_validate(skb) \
2809 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2810
2811static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2812{
2813 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2814 !NAPI_GRO_CB(skb)->csum_valid);
2815}
2816
2817static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2818 __sum16 check, __wsum pseudo)
2819{
2820 NAPI_GRO_CB(skb)->csum = ~pseudo;
2821 NAPI_GRO_CB(skb)->csum_valid = 1;
2822}
2823
2824#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2825do { \
2826 if (__skb_gro_checksum_convert_check(skb)) \
2827 __skb_gro_checksum_convert(skb, check, \
2828 compute_pseudo(skb, proto)); \
2829} while (0)
2830
2831struct gro_remcsum {
2832 int offset;
2833 __wsum delta;
2834};
2835
2836static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2837{
2838 grc->offset = 0;
2839 grc->delta = 0;
2840}
2841
2842static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2843 unsigned int off, size_t hdrlen,
2844 int start, int offset,
2845 struct gro_remcsum *grc,
2846 bool nopartial)
2847{
2848 __wsum delta;
2849 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2850
2851 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2852
2853 if (!nopartial) {
2854 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2855 return ptr;
2856 }
2857
2858 ptr = skb_gro_header_fast(skb, off);
2859 if (skb_gro_header_hard(skb, off + plen)) {
2860 ptr = skb_gro_header_slow(skb, off + plen, off);
2861 if (!ptr)
2862 return NULL;
2863 }
2864
2865 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2866 start, offset);
2867
2868
2869 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2870
2871 grc->offset = off + hdrlen + offset;
2872 grc->delta = delta;
2873
2874 return ptr;
2875}
2876
2877static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2878 struct gro_remcsum *grc)
2879{
2880 void *ptr;
2881 size_t plen = grc->offset + sizeof(u16);
2882
2883 if (!grc->delta)
2884 return;
2885
2886 ptr = skb_gro_header_fast(skb, grc->offset);
2887 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2888 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2889 if (!ptr)
2890 return;
2891 }
2892
2893 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2894}
2895
2896#ifdef CONFIG_XFRM_OFFLOAD
2897static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2898{
2899 if (PTR_ERR(pp) != -EINPROGRESS)
2900 NAPI_GRO_CB(skb)->flush |= flush;
2901}
2902static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2903 struct sk_buff *pp,
2904 int flush,
2905 struct gro_remcsum *grc)
2906{
2907 if (PTR_ERR(pp) != -EINPROGRESS) {
2908 NAPI_GRO_CB(skb)->flush |= flush;
2909 skb_gro_remcsum_cleanup(skb, grc);
2910 skb->remcsum_offload = 0;
2911 }
2912}
2913#else
2914static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2915{
2916 NAPI_GRO_CB(skb)->flush |= flush;
2917}
2918static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2919 struct sk_buff *pp,
2920 int flush,
2921 struct gro_remcsum *grc)
2922{
2923 NAPI_GRO_CB(skb)->flush |= flush;
2924 skb_gro_remcsum_cleanup(skb, grc);
2925 skb->remcsum_offload = 0;
2926}
2927#endif
2928
2929static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2930 unsigned short type,
2931 const void *daddr, const void *saddr,
2932 unsigned int len)
2933{
2934 if (!dev->header_ops || !dev->header_ops->create)
2935 return 0;
2936
2937 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2938}
2939
2940static inline int dev_parse_header(const struct sk_buff *skb,
2941 unsigned char *haddr)
2942{
2943 const struct net_device *dev = skb->dev;
2944
2945 if (!dev->header_ops || !dev->header_ops->parse)
2946 return 0;
2947 return dev->header_ops->parse(skb, haddr);
2948}
2949
2950static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
2951{
2952 const struct net_device *dev = skb->dev;
2953
2954 if (!dev->header_ops || !dev->header_ops->parse_protocol)
2955 return 0;
2956 return dev->header_ops->parse_protocol(skb);
2957}
2958
2959
2960static inline bool dev_validate_header(const struct net_device *dev,
2961 char *ll_header, int len)
2962{
2963 if (likely(len >= dev->hard_header_len))
2964 return true;
2965 if (len < dev->min_header_len)
2966 return false;
2967
2968 if (capable(CAP_SYS_RAWIO)) {
2969 memset(ll_header + len, 0, dev->hard_header_len - len);
2970 return true;
2971 }
2972
2973 if (dev->header_ops && dev->header_ops->validate)
2974 return dev->header_ops->validate(ll_header, len);
2975
2976 return false;
2977}
2978
2979typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
2980 int len, int size);
2981int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2982static inline int unregister_gifconf(unsigned int family)
2983{
2984 return register_gifconf(family, NULL);
2985}
2986
2987#ifdef CONFIG_NET_FLOW_LIMIT
2988#define FLOW_LIMIT_HISTORY (1 << 7)
2989struct sd_flow_limit {
2990 u64 count;
2991 unsigned int num_buckets;
2992 unsigned int history_head;
2993 u16 history[FLOW_LIMIT_HISTORY];
2994 u8 buckets[];
2995};
2996
2997extern int netdev_flow_limit_table_len;
2998#endif
2999
3000
3001
3002
3003struct softnet_data {
3004 struct list_head poll_list;
3005 struct sk_buff_head process_queue;
3006
3007
3008 unsigned int processed;
3009 unsigned int time_squeeze;
3010 unsigned int received_rps;
3011#ifdef CONFIG_RPS
3012 struct softnet_data *rps_ipi_list;
3013#endif
3014#ifdef CONFIG_NET_FLOW_LIMIT
3015 struct sd_flow_limit __rcu *flow_limit;
3016#endif
3017 struct Qdisc *output_queue;
3018 struct Qdisc **output_queue_tailp;
3019 struct sk_buff *completion_queue;
3020#ifdef CONFIG_XFRM_OFFLOAD
3021 struct sk_buff_head xfrm_backlog;
3022#endif
3023#ifdef CONFIG_RPS
3024
3025
3026
3027 unsigned int input_queue_head ____cacheline_aligned_in_smp;
3028
3029
3030 call_single_data_t csd ____cacheline_aligned_in_smp;
3031 struct softnet_data *rps_ipi_next;
3032 unsigned int cpu;
3033 unsigned int input_queue_tail;
3034#endif
3035 unsigned int dropped;
3036 struct sk_buff_head input_pkt_queue;
3037 struct napi_struct backlog;
3038
3039};
3040
3041static inline void input_queue_head_incr(struct softnet_data *sd)
3042{
3043#ifdef CONFIG_RPS
3044 sd->input_queue_head++;
3045#endif
3046}
3047
3048static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3049 unsigned int *qtail)
3050{
3051#ifdef CONFIG_RPS
3052 *qtail = ++sd->input_queue_tail;
3053#endif
3054}
3055
3056DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3057
3058void __netif_schedule(struct Qdisc *q);
3059void netif_schedule_queue(struct netdev_queue *txq);
3060
3061static inline void netif_tx_schedule_all(struct net_device *dev)
3062{
3063 unsigned int i;
3064
3065 for (i = 0; i < dev->num_tx_queues; i++)
3066 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3067}
3068
3069static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3070{
3071 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3072}
3073
3074
3075
3076
3077
3078
3079
3080static inline void netif_start_queue(struct net_device *dev)
3081{
3082 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3083}
3084
3085static inline void netif_tx_start_all_queues(struct net_device *dev)
3086{
3087 unsigned int i;
3088
3089 for (i = 0; i < dev->num_tx_queues; i++) {
3090 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3091 netif_tx_start_queue(txq);
3092 }
3093}
3094
3095void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3096
3097
3098
3099
3100
3101
3102
3103
3104static inline void netif_wake_queue(struct net_device *dev)
3105{
3106 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3107}
3108
3109static inline void netif_tx_wake_all_queues(struct net_device *dev)
3110{
3111 unsigned int i;
3112
3113 for (i = 0; i < dev->num_tx_queues; i++) {
3114 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3115 netif_tx_wake_queue(txq);
3116 }
3117}
3118
3119static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3120{
3121 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3122}
3123
3124
3125
3126
3127
3128
3129
3130
3131static inline void netif_stop_queue(struct net_device *dev)
3132{
3133 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3134}
3135
3136void netif_tx_stop_all_queues(struct net_device *dev);
3137
3138static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3139{
3140 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3141}
3142
3143
3144
3145
3146
3147
3148
3149static inline bool netif_queue_stopped(const struct net_device *dev)
3150{
3151 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3152}
3153
3154static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3155{
3156 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3157}
3158
3159static inline bool
3160netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3161{
3162 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3163}
3164
3165static inline bool
3166netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3167{
3168 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3169}
3170
3171
3172
3173
3174
3175
3176
3177
3178static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3179{
3180#ifdef CONFIG_BQL
3181 prefetchw(&dev_queue->dql.num_queued);
3182#endif
3183}
3184
3185
3186
3187
3188
3189
3190
3191
3192static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3193{
3194#ifdef CONFIG_BQL
3195 prefetchw(&dev_queue->dql.limit);
3196#endif
3197}
3198
3199static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3200 unsigned int bytes)
3201{
3202#ifdef CONFIG_BQL
3203 dql_queued(&dev_queue->dql, bytes);
3204
3205 if (likely(dql_avail(&dev_queue->dql) >= 0))
3206 return;
3207
3208 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3209
3210
3211
3212
3213
3214
3215 smp_mb();
3216
3217
3218 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3219 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3220#endif
3221}
3222
3223
3224
3225
3226
3227
3228
3229static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3230 unsigned int bytes,
3231 bool xmit_more)
3232{
3233 if (xmit_more) {
3234#ifdef CONFIG_BQL
3235 dql_queued(&dev_queue->dql, bytes);
3236#endif
3237 return netif_tx_queue_stopped(dev_queue);
3238 }
3239 netdev_tx_sent_queue(dev_queue, bytes);
3240 return true;
3241}
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3253{
3254 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3255}
3256
3257static inline bool __netdev_sent_queue(struct net_device *dev,
3258 unsigned int bytes,
3259 bool xmit_more)
3260{
3261 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3262 xmit_more);
3263}
3264
3265static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3266 unsigned int pkts, unsigned int bytes)
3267{
3268#ifdef CONFIG_BQL
3269 if (unlikely(!bytes))
3270 return;
3271
3272 dql_completed(&dev_queue->dql, bytes);
3273
3274
3275
3276
3277
3278
3279 smp_mb();
3280
3281 if (dql_avail(&dev_queue->dql) < 0)
3282 return;
3283
3284 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3285 netif_schedule_queue(dev_queue);
3286#endif
3287}
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299static inline void netdev_completed_queue(struct net_device *dev,
3300 unsigned int pkts, unsigned int bytes)
3301{
3302 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3303}
3304
3305static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3306{
3307#ifdef CONFIG_BQL
3308 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3309 dql_reset(&q->dql);
3310#endif
3311}
3312
3313
3314
3315
3316
3317
3318
3319
3320static inline void netdev_reset_queue(struct net_device *dev_queue)
3321{
3322 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3323}
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3334{
3335 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3336 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3337 dev->name, queue_index,
3338 dev->real_num_tx_queues);
3339 return 0;
3340 }
3341
3342 return queue_index;
3343}
3344
3345
3346
3347
3348
3349
3350
3351static inline bool netif_running(const struct net_device *dev)
3352{
3353 return test_bit(__LINK_STATE_START, &dev->state);
3354}
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3371{
3372 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3373
3374 netif_tx_start_queue(txq);
3375}
3376
3377
3378
3379
3380
3381
3382
3383
3384static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3385{
3386 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3387 netif_tx_stop_queue(txq);
3388}
3389
3390
3391
3392
3393
3394
3395
3396
3397static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3398 u16 queue_index)
3399{
3400 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3401
3402 return netif_tx_queue_stopped(txq);
3403}
3404
3405static inline bool netif_subqueue_stopped(const struct net_device *dev,
3406 struct sk_buff *skb)
3407{
3408 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3409}
3410
3411
3412
3413
3414
3415
3416
3417
3418static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3419{
3420 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3421
3422 netif_tx_wake_queue(txq);
3423}
3424
3425#ifdef CONFIG_XPS
3426int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3427 u16 index);
3428int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3429 u16 index, bool is_rxqs_map);
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439static inline bool netif_attr_test_mask(unsigned long j,
3440 const unsigned long *mask,
3441 unsigned int nr_bits)
3442{
3443 cpu_max_bits_warn(j, nr_bits);
3444 return test_bit(j, mask);
3445}
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455static inline bool netif_attr_test_online(unsigned long j,
3456 const unsigned long *online_mask,
3457 unsigned int nr_bits)
3458{
3459 cpu_max_bits_warn(j, nr_bits);
3460
3461 if (online_mask)
3462 return test_bit(j, online_mask);
3463
3464 return (j < nr_bits);
3465}
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3476 unsigned int nr_bits)
3477{
3478
3479 if (n != -1)
3480 cpu_max_bits_warn(n, nr_bits);
3481
3482 if (srcp)
3483 return find_next_bit(srcp, nr_bits, n + 1);
3484
3485 return n + 1;
3486}
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3498 const unsigned long *src2p,
3499 unsigned int nr_bits)
3500{
3501
3502 if (n != -1)
3503 cpu_max_bits_warn(n, nr_bits);
3504
3505 if (src1p && src2p)
3506 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3507 else if (src1p)
3508 return find_next_bit(src1p, nr_bits, n + 1);
3509 else if (src2p)
3510 return find_next_bit(src2p, nr_bits, n + 1);
3511
3512 return n + 1;
3513}
3514#else
3515static inline int netif_set_xps_queue(struct net_device *dev,
3516 const struct cpumask *mask,
3517 u16 index)
3518{
3519 return 0;
3520}
3521
3522static inline int __netif_set_xps_queue(struct net_device *dev,
3523 const unsigned long *mask,
3524 u16 index, bool is_rxqs_map)
3525{
3526 return 0;
3527}
3528#endif
3529
3530
3531
3532
3533
3534
3535
3536static inline bool netif_is_multiqueue(const struct net_device *dev)
3537{
3538 return dev->num_tx_queues > 1;
3539}
3540
3541int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3542
3543#ifdef CONFIG_SYSFS
3544int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3545#else
3546static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3547 unsigned int rxqs)
3548{
3549 dev->real_num_rx_queues = rxqs;
3550 return 0;
3551}
3552#endif
3553
3554static inline struct netdev_rx_queue *
3555__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3556{
3557 return dev->_rx + rxq;
3558}
3559
3560#ifdef CONFIG_SYSFS
3561static inline unsigned int get_netdev_rx_queue_index(
3562 struct netdev_rx_queue *queue)
3563{
3564 struct net_device *dev = queue->dev;
3565 int index = queue - dev->_rx;
3566
3567 BUG_ON(index >= dev->num_rx_queues);
3568 return index;
3569}
3570#endif
3571
3572#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3573int netif_get_num_default_rss_queues(void);
3574
3575enum skb_free_reason {
3576 SKB_REASON_CONSUMED,
3577 SKB_REASON_DROPPED,
3578};
3579
3580void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3581void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3603{
3604 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3605}
3606
3607static inline void dev_consume_skb_irq(struct sk_buff *skb)
3608{
3609 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3610}
3611
3612static inline void dev_kfree_skb_any(struct sk_buff *skb)
3613{
3614 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3615}
3616
3617static inline void dev_consume_skb_any(struct sk_buff *skb)
3618{
3619 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3620}
3621
3622void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3623int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
3624int netif_rx(struct sk_buff *skb);
3625int netif_rx_ni(struct sk_buff *skb);
3626int netif_receive_skb(struct sk_buff *skb);
3627int netif_receive_skb_core(struct sk_buff *skb);
3628void netif_receive_skb_list(struct list_head *head);
3629gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3630void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3631struct sk_buff *napi_get_frags(struct napi_struct *napi);
3632gro_result_t napi_gro_frags(struct napi_struct *napi);
3633struct packet_offload *gro_find_receive_by_type(__be16 type);
3634struct packet_offload *gro_find_complete_by_type(__be16 type);
3635
3636static inline void napi_free_frags(struct napi_struct *napi)
3637{
3638 kfree_skb(napi->skb);
3639 napi->skb = NULL;
3640}
3641
3642bool netdev_is_rx_handler_busy(struct net_device *dev);
3643int netdev_rx_handler_register(struct net_device *dev,
3644 rx_handler_func_t *rx_handler,
3645 void *rx_handler_data);
3646void netdev_rx_handler_unregister(struct net_device *dev);
3647
3648bool dev_valid_name(const char *name);
3649int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3650 bool *need_copyout);
3651int dev_ifconf(struct net *net, struct ifconf *, int);
3652int dev_ethtool(struct net *net, struct ifreq *);
3653unsigned int dev_get_flags(const struct net_device *);
3654int __dev_change_flags(struct net_device *dev, unsigned int flags,
3655 struct netlink_ext_ack *extack);
3656int dev_change_flags(struct net_device *dev, unsigned int flags,
3657 struct netlink_ext_ack *extack);
3658void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3659 unsigned int gchanges);
3660int dev_change_name(struct net_device *, const char *);
3661int dev_set_alias(struct net_device *, const char *, size_t);
3662int dev_get_alias(const struct net_device *, char *, size_t);
3663int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3664int __dev_set_mtu(struct net_device *, int);
3665int dev_set_mtu_ext(struct net_device *dev, int mtu,
3666 struct netlink_ext_ack *extack);
3667int dev_set_mtu(struct net_device *, int);
3668int dev_change_tx_queue_len(struct net_device *, unsigned long);
3669void dev_set_group(struct net_device *, int);
3670int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3671 struct netlink_ext_ack *extack);
3672int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3673 struct netlink_ext_ack *extack);
3674int dev_change_carrier(struct net_device *, bool new_carrier);
3675int dev_get_phys_port_id(struct net_device *dev,
3676 struct netdev_phys_item_id *ppid);
3677int dev_get_phys_port_name(struct net_device *dev,
3678 char *name, size_t len);
3679int dev_get_port_parent_id(struct net_device *dev,
3680 struct netdev_phys_item_id *ppid, bool recurse);
3681bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
3682int dev_change_proto_down(struct net_device *dev, bool proto_down);
3683int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
3684struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3685struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3686 struct netdev_queue *txq, int *ret);
3687
3688typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3689int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3690 int fd, u32 flags);
3691u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3692 enum bpf_netdev_command cmd);
3693int xdp_umem_query(struct net_device *dev, u16 queue_id);
3694
3695int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3696int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3697bool is_skb_forwardable(const struct net_device *dev,
3698 const struct sk_buff *skb);
3699
3700static __always_inline int ____dev_forward_skb(struct net_device *dev,
3701 struct sk_buff *skb)
3702{
3703 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3704 unlikely(!is_skb_forwardable(dev, skb))) {
3705 atomic_long_inc(&dev->rx_dropped);
3706 kfree_skb(skb);
3707 return NET_RX_DROP;
3708 }
3709
3710 skb_scrub_packet(skb, true);
3711 skb->priority = 0;
3712 return 0;
3713}
3714
3715bool dev_nit_active(struct net_device *dev);
3716void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3717
3718extern int netdev_budget;
3719extern unsigned int netdev_budget_usecs;
3720
3721
3722void netdev_run_todo(void);
3723
3724
3725
3726
3727
3728
3729
3730static inline void dev_put(struct net_device *dev)
3731{
3732 this_cpu_dec(*dev->pcpu_refcnt);
3733}
3734
3735
3736
3737
3738
3739
3740
3741static inline void dev_hold(struct net_device *dev)
3742{
3743 this_cpu_inc(*dev->pcpu_refcnt);
3744}
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755void linkwatch_init_dev(struct net_device *dev);
3756void linkwatch_fire_event(struct net_device *dev);
3757void linkwatch_forget_dev(struct net_device *dev);
3758
3759
3760
3761
3762
3763
3764
3765static inline bool netif_carrier_ok(const struct net_device *dev)
3766{
3767 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3768}
3769
3770unsigned long dev_trans_start(struct net_device *dev);
3771
3772void __netdev_watchdog_up(struct net_device *dev);
3773
3774void netif_carrier_on(struct net_device *dev);
3775
3776void netif_carrier_off(struct net_device *dev);
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790static inline void netif_dormant_on(struct net_device *dev)
3791{
3792 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3793 linkwatch_fire_event(dev);
3794}
3795
3796
3797
3798
3799
3800
3801
3802static inline void netif_dormant_off(struct net_device *dev)
3803{
3804 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3805 linkwatch_fire_event(dev);
3806}
3807
3808
3809
3810
3811
3812
3813
3814static inline bool netif_dormant(const struct net_device *dev)
3815{
3816 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3817}
3818
3819
3820
3821
3822
3823
3824
3825
3826static inline bool netif_oper_up(const struct net_device *dev)
3827{
3828 return (dev->operstate == IF_OPER_UP ||
3829 dev->operstate == IF_OPER_UNKNOWN );
3830}
3831
3832
3833
3834
3835
3836
3837
3838static inline bool netif_device_present(struct net_device *dev)
3839{
3840 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3841}
3842
3843void netif_device_detach(struct net_device *dev);
3844
3845void netif_device_attach(struct net_device *dev);
3846
3847
3848
3849
3850
3851enum {
3852 NETIF_MSG_DRV = 0x0001,
3853 NETIF_MSG_PROBE = 0x0002,
3854 NETIF_MSG_LINK = 0x0004,
3855 NETIF_MSG_TIMER = 0x0008,
3856 NETIF_MSG_IFDOWN = 0x0010,
3857 NETIF_MSG_IFUP = 0x0020,
3858 NETIF_MSG_RX_ERR = 0x0040,
3859 NETIF_MSG_TX_ERR = 0x0080,
3860 NETIF_MSG_TX_QUEUED = 0x0100,
3861 NETIF_MSG_INTR = 0x0200,
3862 NETIF_MSG_TX_DONE = 0x0400,
3863 NETIF_MSG_RX_STATUS = 0x0800,
3864 NETIF_MSG_PKTDATA = 0x1000,
3865 NETIF_MSG_HW = 0x2000,
3866 NETIF_MSG_WOL = 0x4000,
3867};
3868
3869#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3870#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3871#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3872#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3873#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3874#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3875#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3876#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3877#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3878#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3879#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3880#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3881#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3882#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3883#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3884
3885static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3886{
3887
3888 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3889 return default_msg_enable_bits;
3890 if (debug_value == 0)
3891 return 0;
3892
3893 return (1U << debug_value) - 1;
3894}
3895
3896static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3897{
3898 spin_lock(&txq->_xmit_lock);
3899 txq->xmit_lock_owner = cpu;
3900}
3901
3902static inline bool __netif_tx_acquire(struct netdev_queue *txq)
3903{
3904 __acquire(&txq->_xmit_lock);
3905 return true;
3906}
3907
3908static inline void __netif_tx_release(struct netdev_queue *txq)
3909{
3910 __release(&txq->_xmit_lock);
3911}
3912
3913static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3914{
3915 spin_lock_bh(&txq->_xmit_lock);
3916 txq->xmit_lock_owner = smp_processor_id();
3917}
3918
3919static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3920{
3921 bool ok = spin_trylock(&txq->_xmit_lock);
3922 if (likely(ok))
3923 txq->xmit_lock_owner = smp_processor_id();
3924 return ok;
3925}
3926
3927static inline void __netif_tx_unlock(struct netdev_queue *txq)
3928{
3929 txq->xmit_lock_owner = -1;
3930 spin_unlock(&txq->_xmit_lock);
3931}
3932
3933static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3934{
3935 txq->xmit_lock_owner = -1;
3936 spin_unlock_bh(&txq->_xmit_lock);
3937}
3938
3939static inline void txq_trans_update(struct netdev_queue *txq)
3940{
3941 if (txq->xmit_lock_owner != -1)
3942 txq->trans_start = jiffies;
3943}
3944
3945
3946static inline void netif_trans_update(struct net_device *dev)
3947{
3948 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3949
3950 if (txq->trans_start != jiffies)
3951 txq->trans_start = jiffies;
3952}
3953
3954
3955
3956
3957
3958
3959
3960static inline void netif_tx_lock(struct net_device *dev)
3961{
3962 unsigned int i;
3963 int cpu;
3964
3965 spin_lock(&dev->tx_global_lock);
3966 cpu = smp_processor_id();
3967 for (i = 0; i < dev->num_tx_queues; i++) {
3968 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3969
3970
3971
3972
3973
3974
3975
3976 __netif_tx_lock(txq, cpu);
3977 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3978 __netif_tx_unlock(txq);
3979 }
3980}
3981
3982static inline void netif_tx_lock_bh(struct net_device *dev)
3983{
3984 local_bh_disable();
3985 netif_tx_lock(dev);
3986}
3987
3988static inline void netif_tx_unlock(struct net_device *dev)
3989{
3990 unsigned int i;
3991
3992 for (i = 0; i < dev->num_tx_queues; i++) {
3993 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3994
3995
3996
3997
3998
3999 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
4000 netif_schedule_queue(txq);
4001 }
4002 spin_unlock(&dev->tx_global_lock);
4003}
4004
4005static inline void netif_tx_unlock_bh(struct net_device *dev)
4006{
4007 netif_tx_unlock(dev);
4008 local_bh_enable();
4009}
4010
4011#define HARD_TX_LOCK(dev, txq, cpu) { \
4012 if ((dev->features & NETIF_F_LLTX) == 0) { \
4013 __netif_tx_lock(txq, cpu); \
4014 } else { \
4015 __netif_tx_acquire(txq); \
4016 } \
4017}
4018
4019#define HARD_TX_TRYLOCK(dev, txq) \
4020 (((dev->features & NETIF_F_LLTX) == 0) ? \
4021 __netif_tx_trylock(txq) : \
4022 __netif_tx_acquire(txq))
4023
4024#define HARD_TX_UNLOCK(dev, txq) { \
4025 if ((dev->features & NETIF_F_LLTX) == 0) { \
4026 __netif_tx_unlock(txq); \
4027 } else { \
4028 __netif_tx_release(txq); \
4029 } \
4030}
4031
4032static inline void netif_tx_disable(struct net_device *dev)
4033{
4034 unsigned int i;
4035 int cpu;
4036
4037 local_bh_disable();
4038 cpu = smp_processor_id();
4039 for (i = 0; i < dev->num_tx_queues; i++) {
4040 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4041
4042 __netif_tx_lock(txq, cpu);
4043 netif_tx_stop_queue(txq);
4044 __netif_tx_unlock(txq);
4045 }
4046 local_bh_enable();
4047}
4048
4049static inline void netif_addr_lock(struct net_device *dev)
4050{
4051 spin_lock(&dev->addr_list_lock);
4052}
4053
4054static inline void netif_addr_lock_nested(struct net_device *dev)
4055{
4056 int subclass = SINGLE_DEPTH_NESTING;
4057
4058 if (dev->netdev_ops->ndo_get_lock_subclass)
4059 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
4060
4061 spin_lock_nested(&dev->addr_list_lock, subclass);
4062}
4063
4064static inline void netif_addr_lock_bh(struct net_device *dev)
4065{
4066 spin_lock_bh(&dev->addr_list_lock);
4067}
4068
4069static inline void netif_addr_unlock(struct net_device *dev)
4070{
4071 spin_unlock(&dev->addr_list_lock);
4072}
4073
4074static inline void netif_addr_unlock_bh(struct net_device *dev)
4075{
4076 spin_unlock_bh(&dev->addr_list_lock);
4077}
4078
4079
4080
4081
4082
4083#define for_each_dev_addr(dev, ha) \
4084 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4085
4086
4087
4088void ether_setup(struct net_device *dev);
4089
4090
4091struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4092 unsigned char name_assign_type,
4093 void (*setup)(struct net_device *),
4094 unsigned int txqs, unsigned int rxqs);
4095int dev_get_valid_name(struct net *net, struct net_device *dev,
4096 const char *name);
4097
4098#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4099 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4100
4101#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4102 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4103 count)
4104
4105int register_netdev(struct net_device *dev);
4106void unregister_netdev(struct net_device *dev);
4107
4108
4109int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4110 struct netdev_hw_addr_list *from_list, int addr_len);
4111void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4112 struct netdev_hw_addr_list *from_list, int addr_len);
4113int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4114 struct net_device *dev,
4115 int (*sync)(struct net_device *, const unsigned char *),
4116 int (*unsync)(struct net_device *,
4117 const unsigned char *));
4118int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4119 struct net_device *dev,
4120 int (*sync)(struct net_device *,
4121 const unsigned char *, int),
4122 int (*unsync)(struct net_device *,
4123 const unsigned char *, int));
4124void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4125 struct net_device *dev,
4126 int (*unsync)(struct net_device *,
4127 const unsigned char *, int));
4128void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4129 struct net_device *dev,
4130 int (*unsync)(struct net_device *,
4131 const unsigned char *));
4132void __hw_addr_init(struct netdev_hw_addr_list *list);
4133
4134
4135int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4136 unsigned char addr_type);
4137int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4138 unsigned char addr_type);
4139void dev_addr_flush(struct net_device *dev);
4140int dev_addr_init(struct net_device *dev);
4141
4142
4143int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4144int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4145int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4146int dev_uc_sync(struct net_device *to, struct net_device *from);
4147int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4148void dev_uc_unsync(struct net_device *to, struct net_device *from);
4149void dev_uc_flush(struct net_device *dev);
4150void dev_uc_init(struct net_device *dev);
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161static inline int __dev_uc_sync(struct net_device *dev,
4162 int (*sync)(struct net_device *,
4163 const unsigned char *),
4164 int (*unsync)(struct net_device *,
4165 const unsigned char *))
4166{
4167 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4168}
4169
4170
4171
4172
4173
4174
4175
4176
4177static inline void __dev_uc_unsync(struct net_device *dev,
4178 int (*unsync)(struct net_device *,
4179 const unsigned char *))
4180{
4181 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4182}
4183
4184
4185int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4186int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4187int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4188int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4189int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4190int dev_mc_sync(struct net_device *to, struct net_device *from);
4191int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4192void dev_mc_unsync(struct net_device *to, struct net_device *from);
4193void dev_mc_flush(struct net_device *dev);
4194void dev_mc_init(struct net_device *dev);
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205static inline int __dev_mc_sync(struct net_device *dev,
4206 int (*sync)(struct net_device *,
4207 const unsigned char *),
4208 int (*unsync)(struct net_device *,
4209 const unsigned char *))
4210{
4211 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4212}
4213
4214
4215
4216
4217
4218
4219
4220
4221static inline void __dev_mc_unsync(struct net_device *dev,
4222 int (*unsync)(struct net_device *,
4223 const unsigned char *))
4224{
4225 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4226}
4227
4228
4229void dev_set_rx_mode(struct net_device *dev);
4230void __dev_set_rx_mode(struct net_device *dev);
4231int dev_set_promiscuity(struct net_device *dev, int inc);
4232int dev_set_allmulti(struct net_device *dev, int inc);
4233void netdev_state_change(struct net_device *dev);
4234void netdev_notify_peers(struct net_device *dev);
4235void netdev_features_change(struct net_device *dev);
4236
4237void dev_load(struct net *net, const char *name);
4238struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4239 struct rtnl_link_stats64 *storage);
4240void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4241 const struct net_device_stats *netdev_stats);
4242
4243extern int netdev_max_backlog;
4244extern int netdev_tstamp_prequeue;
4245extern int weight_p;
4246extern int dev_weight_rx_bias;
4247extern int dev_weight_tx_bias;
4248extern int dev_rx_weight;
4249extern int dev_tx_weight;
4250
4251bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4252struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4253 struct list_head **iter);
4254struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4255 struct list_head **iter);
4256
4257
4258#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4259 for (iter = &(dev)->adj_list.upper, \
4260 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4261 updev; \
4262 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4263
4264int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4265 int (*fn)(struct net_device *upper_dev,
4266 void *data),
4267 void *data);
4268
4269bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4270 struct net_device *upper_dev);
4271
4272bool netdev_has_any_upper_dev(struct net_device *dev);
4273
4274void *netdev_lower_get_next_private(struct net_device *dev,
4275 struct list_head **iter);
4276void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4277 struct list_head **iter);
4278
4279#define netdev_for_each_lower_private(dev, priv, iter) \
4280 for (iter = (dev)->adj_list.lower.next, \
4281 priv = netdev_lower_get_next_private(dev, &(iter)); \
4282 priv; \
4283 priv = netdev_lower_get_next_private(dev, &(iter)))
4284
4285#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4286 for (iter = &(dev)->adj_list.lower, \
4287 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4288 priv; \
4289 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4290
4291void *netdev_lower_get_next(struct net_device *dev,
4292 struct list_head **iter);
4293
4294#define netdev_for_each_lower_dev(dev, ldev, iter) \
4295 for (iter = (dev)->adj_list.lower.next, \
4296 ldev = netdev_lower_get_next(dev, &(iter)); \
4297 ldev; \
4298 ldev = netdev_lower_get_next(dev, &(iter)))
4299
4300struct net_device *netdev_all_lower_get_next(struct net_device *dev,
4301 struct list_head **iter);
4302struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
4303 struct list_head **iter);
4304
4305int netdev_walk_all_lower_dev(struct net_device *dev,
4306 int (*fn)(struct net_device *lower_dev,
4307 void *data),
4308 void *data);
4309int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4310 int (*fn)(struct net_device *lower_dev,
4311 void *data),
4312 void *data);
4313
4314void *netdev_adjacent_get_private(struct list_head *adj_list);
4315void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4316struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4317struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4318int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4319 struct netlink_ext_ack *extack);
4320int netdev_master_upper_dev_link(struct net_device *dev,
4321 struct net_device *upper_dev,
4322 void *upper_priv, void *upper_info,
4323 struct netlink_ext_ack *extack);
4324void netdev_upper_dev_unlink(struct net_device *dev,
4325 struct net_device *upper_dev);
4326void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4327void *netdev_lower_dev_get_private(struct net_device *dev,
4328 struct net_device *lower_dev);
4329void netdev_lower_state_changed(struct net_device *lower_dev,
4330 void *lower_state_info);
4331
4332
4333#define NETDEV_RSS_KEY_LEN 52
4334extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4335void netdev_rss_key_fill(void *buffer, size_t len);
4336
4337int dev_get_nest_level(struct net_device *dev);
4338int skb_checksum_help(struct sk_buff *skb);
4339int skb_crc32c_csum_help(struct sk_buff *skb);
4340int skb_csum_hwoffload_help(struct sk_buff *skb,
4341 const netdev_features_t features);
4342
4343struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4344 netdev_features_t features, bool tx_path);
4345struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4346 netdev_features_t features);
4347
4348struct netdev_bonding_info {
4349 ifslave slave;
4350 ifbond master;
4351};
4352
4353struct netdev_notifier_bonding_info {
4354 struct netdev_notifier_info info;
4355 struct netdev_bonding_info bonding_info;
4356};
4357
4358void netdev_bonding_info_change(struct net_device *dev,
4359 struct netdev_bonding_info *bonding_info);
4360
4361static inline
4362struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4363{
4364 return __skb_gso_segment(skb, features, true);
4365}
4366__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4367
4368static inline bool can_checksum_protocol(netdev_features_t features,
4369 __be16 protocol)
4370{
4371 if (protocol == htons(ETH_P_FCOE))
4372 return !!(features & NETIF_F_FCOE_CRC);
4373
4374
4375
4376 if (features & NETIF_F_HW_CSUM) {
4377
4378 return true;
4379 }
4380
4381 switch (protocol) {
4382 case htons(ETH_P_IP):
4383 return !!(features & NETIF_F_IP_CSUM);
4384 case htons(ETH_P_IPV6):
4385 return !!(features & NETIF_F_IPV6_CSUM);
4386 default:
4387 return false;
4388 }
4389}
4390
4391#ifdef CONFIG_BUG
4392void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4393#else
4394static inline void netdev_rx_csum_fault(struct net_device *dev,
4395 struct sk_buff *skb)
4396{
4397}
4398#endif
4399
4400void net_enable_timestamp(void);
4401void net_disable_timestamp(void);
4402
4403#ifdef CONFIG_PROC_FS
4404int __init dev_proc_init(void);
4405#else
4406#define dev_proc_init() 0
4407#endif
4408
4409static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
4410 struct sk_buff *skb, struct net_device *dev,
4411 bool more)
4412{
4413 skb->xmit_more = more ? 1 : 0;
4414 return ops->ndo_start_xmit(skb, dev);
4415}
4416
4417static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4418 struct netdev_queue *txq, bool more)
4419{
4420 const struct net_device_ops *ops = dev->netdev_ops;
4421 netdev_tx_t rc;
4422
4423 rc = __netdev_start_xmit(ops, skb, dev, more);
4424 if (rc == NETDEV_TX_OK)
4425 txq_trans_update(txq);
4426
4427 return rc;
4428}
4429
4430int netdev_class_create_file_ns(const struct class_attribute *class_attr,
4431 const void *ns);
4432void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
4433 const void *ns);
4434
4435static inline int netdev_class_create_file(const struct class_attribute *class_attr)
4436{
4437 return netdev_class_create_file_ns(class_attr, NULL);
4438}
4439
4440static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
4441{
4442 netdev_class_remove_file_ns(class_attr, NULL);
4443}
4444
4445extern const struct kobj_ns_type_operations net_ns_type_operations;
4446
4447const char *netdev_drivername(const struct net_device *dev);
4448
4449void linkwatch_run_queue(void);
4450
4451static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4452 netdev_features_t f2)
4453{
4454 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4455 if (f1 & NETIF_F_HW_CSUM)
4456 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4457 else
4458 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4459 }
4460
4461 return f1 & f2;
4462}
4463
4464static inline netdev_features_t netdev_get_wanted_features(
4465 struct net_device *dev)
4466{
4467 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4468}
4469netdev_features_t netdev_increment_features(netdev_features_t all,
4470 netdev_features_t one, netdev_features_t mask);
4471
4472
4473
4474
4475
4476static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4477 netdev_features_t mask)
4478{
4479 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4480}
4481
4482int __netdev_update_features(struct net_device *dev);
4483void netdev_update_features(struct net_device *dev);
4484void netdev_change_features(struct net_device *dev);
4485
4486void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4487 struct net_device *dev);
4488
4489netdev_features_t passthru_features_check(struct sk_buff *skb,
4490 struct net_device *dev,
4491 netdev_features_t features);
4492netdev_features_t netif_skb_features(struct sk_buff *skb);
4493
4494static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4495{
4496 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4497
4498
4499 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4500 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4501 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4502 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4503 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4504 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4505 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4506 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4507 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4508 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4509 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4510 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4511 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4512 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4513 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4514 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
4515 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
4516 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
4517
4518 return (features & feature) == feature;
4519}
4520
4521static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4522{
4523 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4524 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4525}
4526
4527static inline bool netif_needs_gso(struct sk_buff *skb,
4528 netdev_features_t features)
4529{
4530 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4531 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4532 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4533}
4534
4535static inline void netif_set_gso_max_size(struct net_device *dev,
4536 unsigned int size)
4537{
4538 dev->gso_max_size = size;
4539}
4540
4541static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4542 int pulled_hlen, u16 mac_offset,
4543 int mac_len)
4544{
4545 skb->protocol = protocol;
4546 skb->encapsulation = 1;
4547 skb_push(skb, pulled_hlen);
4548 skb_reset_transport_header(skb);
4549 skb->mac_header = mac_offset;
4550 skb->network_header = skb->mac_header + mac_len;
4551 skb->mac_len = mac_len;
4552}
4553
4554static inline bool netif_is_macsec(const struct net_device *dev)
4555{
4556 return dev->priv_flags & IFF_MACSEC;
4557}
4558
4559static inline bool netif_is_macvlan(const struct net_device *dev)
4560{
4561 return dev->priv_flags & IFF_MACVLAN;
4562}
4563
4564static inline bool netif_is_macvlan_port(const struct net_device *dev)
4565{
4566 return dev->priv_flags & IFF_MACVLAN_PORT;
4567}
4568
4569static inline bool netif_is_bond_master(const struct net_device *dev)
4570{
4571 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4572}
4573
4574static inline bool netif_is_bond_slave(const struct net_device *dev)
4575{
4576 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4577}
4578
4579static inline bool netif_supports_nofcs(struct net_device *dev)
4580{
4581 return dev->priv_flags & IFF_SUPP_NOFCS;
4582}
4583
4584static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
4585{
4586 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
4587}
4588
4589static inline bool netif_is_l3_master(const struct net_device *dev)
4590{
4591 return dev->priv_flags & IFF_L3MDEV_MASTER;
4592}
4593
4594static inline bool netif_is_l3_slave(const struct net_device *dev)
4595{
4596 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4597}
4598
4599static inline bool netif_is_bridge_master(const struct net_device *dev)
4600{
4601 return dev->priv_flags & IFF_EBRIDGE;
4602}
4603
4604static inline bool netif_is_bridge_port(const struct net_device *dev)
4605{
4606 return dev->priv_flags & IFF_BRIDGE_PORT;
4607}
4608
4609static inline bool netif_is_ovs_master(const struct net_device *dev)
4610{
4611 return dev->priv_flags & IFF_OPENVSWITCH;
4612}
4613
4614static inline bool netif_is_ovs_port(const struct net_device *dev)
4615{
4616 return dev->priv_flags & IFF_OVS_DATAPATH;
4617}
4618
4619static inline bool netif_is_team_master(const struct net_device *dev)
4620{
4621 return dev->priv_flags & IFF_TEAM;
4622}
4623
4624static inline bool netif_is_team_port(const struct net_device *dev)
4625{
4626 return dev->priv_flags & IFF_TEAM_PORT;
4627}
4628
4629static inline bool netif_is_lag_master(const struct net_device *dev)
4630{
4631 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4632}
4633
4634static inline bool netif_is_lag_port(const struct net_device *dev)
4635{
4636 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4637}
4638
4639static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4640{
4641 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4642}
4643
4644static inline bool netif_is_failover(const struct net_device *dev)
4645{
4646 return dev->priv_flags & IFF_FAILOVER;
4647}
4648
4649static inline bool netif_is_failover_slave(const struct net_device *dev)
4650{
4651 return dev->priv_flags & IFF_FAILOVER_SLAVE;
4652}
4653
4654
4655static inline void netif_keep_dst(struct net_device *dev)
4656{
4657 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4658}
4659
4660
4661static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4662{
4663
4664 return dev->priv_flags & IFF_MACSEC;
4665}
4666
4667extern struct pernet_operations __net_initdata loopback_net_ops;
4668
4669
4670
4671
4672
4673static inline const char *netdev_name(const struct net_device *dev)
4674{
4675 if (!dev->name[0] || strchr(dev->name, '%'))
4676 return "(unnamed net_device)";
4677 return dev->name;
4678}
4679
4680static inline bool netdev_unregistering(const struct net_device *dev)
4681{
4682 return dev->reg_state == NETREG_UNREGISTERING;
4683}
4684
4685static inline const char *netdev_reg_state(const struct net_device *dev)
4686{
4687 switch (dev->reg_state) {
4688 case NETREG_UNINITIALIZED: return " (uninitialized)";
4689 case NETREG_REGISTERED: return "";
4690 case NETREG_UNREGISTERING: return " (unregistering)";
4691 case NETREG_UNREGISTERED: return " (unregistered)";
4692 case NETREG_RELEASED: return " (released)";
4693 case NETREG_DUMMY: return " (dummy)";
4694 }
4695
4696 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4697 return " (unknown)";
4698}
4699
4700__printf(3, 4) __cold
4701void netdev_printk(const char *level, const struct net_device *dev,
4702 const char *format, ...);
4703__printf(2, 3) __cold
4704void netdev_emerg(const struct net_device *dev, const char *format, ...);
4705__printf(2, 3) __cold
4706void netdev_alert(const struct net_device *dev, const char *format, ...);
4707__printf(2, 3) __cold
4708void netdev_crit(const struct net_device *dev, const char *format, ...);
4709__printf(2, 3) __cold
4710void netdev_err(const struct net_device *dev, const char *format, ...);
4711__printf(2, 3) __cold
4712void netdev_warn(const struct net_device *dev, const char *format, ...);
4713__printf(2, 3) __cold
4714void netdev_notice(const struct net_device *dev, const char *format, ...);
4715__printf(2, 3) __cold
4716void netdev_info(const struct net_device *dev, const char *format, ...);
4717
4718#define netdev_level_once(level, dev, fmt, ...) \
4719do { \
4720 static bool __print_once __read_mostly; \
4721 \
4722 if (!__print_once) { \
4723 __print_once = true; \
4724 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
4725 } \
4726} while (0)
4727
4728#define netdev_emerg_once(dev, fmt, ...) \
4729 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
4730#define netdev_alert_once(dev, fmt, ...) \
4731 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
4732#define netdev_crit_once(dev, fmt, ...) \
4733 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
4734#define netdev_err_once(dev, fmt, ...) \
4735 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
4736#define netdev_warn_once(dev, fmt, ...) \
4737 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
4738#define netdev_notice_once(dev, fmt, ...) \
4739 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
4740#define netdev_info_once(dev, fmt, ...) \
4741 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
4742
4743#define MODULE_ALIAS_NETDEV(device) \
4744 MODULE_ALIAS("netdev-" device)
4745
4746#if defined(CONFIG_DYNAMIC_DEBUG)
4747#define netdev_dbg(__dev, format, args...) \
4748do { \
4749 dynamic_netdev_dbg(__dev, format, ##args); \
4750} while (0)
4751#elif defined(DEBUG)
4752#define netdev_dbg(__dev, format, args...) \
4753 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4754#else
4755#define netdev_dbg(__dev, format, args...) \
4756({ \
4757 if (0) \
4758 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4759})
4760#endif
4761
4762#if defined(VERBOSE_DEBUG)
4763#define netdev_vdbg netdev_dbg
4764#else
4765
4766#define netdev_vdbg(dev, format, args...) \
4767({ \
4768 if (0) \
4769 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4770 0; \
4771})
4772#endif
4773
4774
4775
4776
4777
4778
4779#define netdev_WARN(dev, format, args...) \
4780 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
4781 netdev_reg_state(dev), ##args)
4782
4783#define netdev_WARN_ONCE(dev, format, args...) \
4784 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
4785 netdev_reg_state(dev), ##args)
4786
4787
4788
4789#define netif_printk(priv, type, level, dev, fmt, args...) \
4790do { \
4791 if (netif_msg_##type(priv)) \
4792 netdev_printk(level, (dev), fmt, ##args); \
4793} while (0)
4794
4795#define netif_level(level, priv, type, dev, fmt, args...) \
4796do { \
4797 if (netif_msg_##type(priv)) \
4798 netdev_##level(dev, fmt, ##args); \
4799} while (0)
4800
4801#define netif_emerg(priv, type, dev, fmt, args...) \
4802 netif_level(emerg, priv, type, dev, fmt, ##args)
4803#define netif_alert(priv, type, dev, fmt, args...) \
4804 netif_level(alert, priv, type, dev, fmt, ##args)
4805#define netif_crit(priv, type, dev, fmt, args...) \
4806 netif_level(crit, priv, type, dev, fmt, ##args)
4807#define netif_err(priv, type, dev, fmt, args...) \
4808 netif_level(err, priv, type, dev, fmt, ##args)
4809#define netif_warn(priv, type, dev, fmt, args...) \
4810 netif_level(warn, priv, type, dev, fmt, ##args)
4811#define netif_notice(priv, type, dev, fmt, args...) \
4812 netif_level(notice, priv, type, dev, fmt, ##args)
4813#define netif_info(priv, type, dev, fmt, args...) \
4814 netif_level(info, priv, type, dev, fmt, ##args)
4815
4816#if defined(CONFIG_DYNAMIC_DEBUG)
4817#define netif_dbg(priv, type, netdev, format, args...) \
4818do { \
4819 if (netif_msg_##type(priv)) \
4820 dynamic_netdev_dbg(netdev, format, ##args); \
4821} while (0)
4822#elif defined(DEBUG)
4823#define netif_dbg(priv, type, dev, format, args...) \
4824 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4825#else
4826#define netif_dbg(priv, type, dev, format, args...) \
4827({ \
4828 if (0) \
4829 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4830 0; \
4831})
4832#endif
4833
4834
4835#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
4836 do { \
4837 if (cond) \
4838 netif_dbg(priv, type, netdev, fmt, ##args); \
4839 else \
4840 netif_ ## level(priv, type, netdev, fmt, ##args); \
4841 } while (0)
4842
4843#if defined(VERBOSE_DEBUG)
4844#define netif_vdbg netif_dbg
4845#else
4846#define netif_vdbg(priv, type, dev, format, args...) \
4847({ \
4848 if (0) \
4849 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4850 0; \
4851})
4852#endif
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873#define PTYPE_HASH_SIZE (16)
4874#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4875
4876#endif
4877