1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef _LINUX_NETDEVICE_H
22#define _LINUX_NETDEVICE_H
23
24#include <linux/timer.h>
25#include <linux/bug.h>
26#include <linux/delay.h>
27#include <linux/atomic.h>
28#include <linux/prefetch.h>
29#include <asm/cache.h>
30#include <asm/byteorder.h>
31
32#include <linux/percpu.h>
33#include <linux/rculist.h>
34#include <linux/workqueue.h>
35#include <linux/dynamic_queue_limits.h>
36
37#include <net/net_namespace.h>
38#ifdef CONFIG_DCB
39#include <net/dcbnl.h>
40#endif
41#include <net/netprio_cgroup.h>
42#include <net/xdp.h>
43
44#include <linux/netdev_features.h>
45#include <linux/neighbour.h>
46#include <uapi/linux/netdevice.h>
47#include <uapi/linux/if_bonding.h>
48#include <uapi/linux/pkt_cls.h>
49#include <linux/hashtable.h>
50#include <linux/rbtree.h>
51
52struct netpoll_info;
53struct device;
54struct ethtool_ops;
55struct phy_device;
56struct dsa_port;
57struct ip_tunnel_parm;
58struct macsec_context;
59struct macsec_ops;
60
61struct sfp_bus;
62
63struct wireless_dev;
64
65struct wpan_dev;
66struct mpls_dev;
67
68struct udp_tunnel_info;
69struct udp_tunnel_nic_info;
70struct udp_tunnel_nic;
71struct bpf_prog;
72struct xdp_buff;
73
74void synchronize_net(void);
75void netdev_set_default_ethtool_ops(struct net_device *dev,
76 const struct ethtool_ops *ops);
77
78
79#define NET_RX_SUCCESS 0
80#define NET_RX_DROP 1
81
82#define MAX_NEST_DEV 8
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#define NET_XMIT_SUCCESS 0x00
103#define NET_XMIT_DROP 0x01
104#define NET_XMIT_CN 0x02
105#define NET_XMIT_MASK 0x0f
106
107
108
109
110#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
111#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
112
113
114#define NETDEV_TX_MASK 0xf0
115
116enum netdev_tx {
117 __NETDEV_TX_MIN = INT_MIN,
118 NETDEV_TX_OK = 0x00,
119 NETDEV_TX_BUSY = 0x10,
120};
121typedef enum netdev_tx netdev_tx_t;
122
123
124
125
126
127static inline bool dev_xmit_complete(int rc)
128{
129
130
131
132
133
134
135 if (likely(rc < NET_XMIT_MASK))
136 return true;
137
138 return false;
139}
140
141
142
143
144
145
146#if defined(CONFIG_HYPERV_NET)
147# define LL_MAX_HEADER 128
148#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
149# if defined(CONFIG_MAC80211_MESH)
150# define LL_MAX_HEADER 128
151# else
152# define LL_MAX_HEADER 96
153# endif
154#else
155# define LL_MAX_HEADER 32
156#endif
157
158#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
159 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
160#define MAX_HEADER LL_MAX_HEADER
161#else
162#define MAX_HEADER (LL_MAX_HEADER + 48)
163#endif
164
165
166
167
168
169
170struct net_device_stats {
171 unsigned long rx_packets;
172 unsigned long tx_packets;
173 unsigned long rx_bytes;
174 unsigned long tx_bytes;
175 unsigned long rx_errors;
176 unsigned long tx_errors;
177 unsigned long rx_dropped;
178 unsigned long tx_dropped;
179 unsigned long multicast;
180 unsigned long collisions;
181 unsigned long rx_length_errors;
182 unsigned long rx_over_errors;
183 unsigned long rx_crc_errors;
184 unsigned long rx_frame_errors;
185 unsigned long rx_fifo_errors;
186 unsigned long rx_missed_errors;
187 unsigned long tx_aborted_errors;
188 unsigned long tx_carrier_errors;
189 unsigned long tx_fifo_errors;
190 unsigned long tx_heartbeat_errors;
191 unsigned long tx_window_errors;
192 unsigned long rx_compressed;
193 unsigned long tx_compressed;
194};
195
196
197#include <linux/cache.h>
198#include <linux/skbuff.h>
199
200#ifdef CONFIG_RPS
201#include <linux/static_key.h>
202extern struct static_key_false rps_needed;
203extern struct static_key_false rfs_needed;
204#endif
205
206struct neighbour;
207struct neigh_parms;
208struct sk_buff;
209
210struct netdev_hw_addr {
211 struct list_head list;
212 struct rb_node node;
213 unsigned char addr[MAX_ADDR_LEN];
214 unsigned char type;
215#define NETDEV_HW_ADDR_T_LAN 1
216#define NETDEV_HW_ADDR_T_SAN 2
217#define NETDEV_HW_ADDR_T_UNICAST 3
218#define NETDEV_HW_ADDR_T_MULTICAST 4
219 bool global_use;
220 int sync_cnt;
221 int refcount;
222 int synced;
223 struct rcu_head rcu_head;
224};
225
226struct netdev_hw_addr_list {
227 struct list_head list;
228 int count;
229
230
231 struct rb_root tree;
232};
233
234#define netdev_hw_addr_list_count(l) ((l)->count)
235#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
236#define netdev_hw_addr_list_for_each(ha, l) \
237 list_for_each_entry(ha, &(l)->list, list)
238
239#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
240#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
241#define netdev_for_each_uc_addr(ha, dev) \
242 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
243
244#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
245#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
246#define netdev_for_each_mc_addr(ha, dev) \
247 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
248
249struct hh_cache {
250 unsigned int hh_len;
251 seqlock_t hh_lock;
252
253
254#define HH_DATA_MOD 16
255#define HH_DATA_OFF(__len) \
256 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
257#define HH_DATA_ALIGN(__len) \
258 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
259 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
260};
261
262
263
264
265
266
267
268
269
270#define LL_RESERVED_SPACE(dev) \
271 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
272#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
273 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
274
275struct header_ops {
276 int (*create) (struct sk_buff *skb, struct net_device *dev,
277 unsigned short type, const void *daddr,
278 const void *saddr, unsigned int len);
279 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
280 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
281 void (*cache_update)(struct hh_cache *hh,
282 const struct net_device *dev,
283 const unsigned char *haddr);
284 bool (*validate)(const char *ll_header, unsigned int len);
285 __be16 (*parse_protocol)(const struct sk_buff *skb);
286};
287
288
289
290
291
292
293enum netdev_state_t {
294 __LINK_STATE_START,
295 __LINK_STATE_PRESENT,
296 __LINK_STATE_NOCARRIER,
297 __LINK_STATE_LINKWATCH_PENDING,
298 __LINK_STATE_DORMANT,
299 __LINK_STATE_TESTING,
300};
301
302
303struct gro_list {
304 struct list_head list;
305 int count;
306};
307
308
309
310
311
312#define GRO_HASH_BUCKETS 8
313
314
315
316
317struct napi_struct {
318
319
320
321
322
323
324 struct list_head poll_list;
325
326 unsigned long state;
327 int weight;
328 int defer_hard_irqs_count;
329 unsigned long gro_bitmask;
330 int (*poll)(struct napi_struct *, int);
331#ifdef CONFIG_NETPOLL
332 int poll_owner;
333#endif
334 struct net_device *dev;
335 struct gro_list gro_hash[GRO_HASH_BUCKETS];
336 struct sk_buff *skb;
337 struct list_head rx_list;
338 int rx_count;
339 struct hrtimer timer;
340 struct list_head dev_list;
341 struct hlist_node napi_hash_node;
342 unsigned int napi_id;
343 struct task_struct *thread;
344};
345
346enum {
347 NAPI_STATE_SCHED,
348 NAPI_STATE_MISSED,
349 NAPI_STATE_DISABLE,
350 NAPI_STATE_NPSVC,
351 NAPI_STATE_LISTED,
352 NAPI_STATE_NO_BUSY_POLL,
353 NAPI_STATE_IN_BUSY_POLL,
354 NAPI_STATE_PREFER_BUSY_POLL,
355 NAPI_STATE_THREADED,
356 NAPI_STATE_SCHED_THREADED,
357};
358
359enum {
360 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
361 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
362 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
363 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
364 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
365 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
366 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
367 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
368 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
369 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
370};
371
372enum gro_result {
373 GRO_MERGED,
374 GRO_MERGED_FREE,
375 GRO_HELD,
376 GRO_NORMAL,
377 GRO_CONSUMED,
378};
379typedef enum gro_result gro_result_t;
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422enum rx_handler_result {
423 RX_HANDLER_CONSUMED,
424 RX_HANDLER_ANOTHER,
425 RX_HANDLER_EXACT,
426 RX_HANDLER_PASS,
427};
428typedef enum rx_handler_result rx_handler_result_t;
429typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
430
431void __napi_schedule(struct napi_struct *n);
432void __napi_schedule_irqoff(struct napi_struct *n);
433
434static inline bool napi_disable_pending(struct napi_struct *n)
435{
436 return test_bit(NAPI_STATE_DISABLE, &n->state);
437}
438
439static inline bool napi_prefer_busy_poll(struct napi_struct *n)
440{
441 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
442}
443
444bool napi_schedule_prep(struct napi_struct *n);
445
446
447
448
449
450
451
452
453static inline void napi_schedule(struct napi_struct *n)
454{
455 if (napi_schedule_prep(n))
456 __napi_schedule(n);
457}
458
459
460
461
462
463
464
465static inline void napi_schedule_irqoff(struct napi_struct *n)
466{
467 if (napi_schedule_prep(n))
468 __napi_schedule_irqoff(n);
469}
470
471
472static inline bool napi_reschedule(struct napi_struct *napi)
473{
474 if (napi_schedule_prep(napi)) {
475 __napi_schedule(napi);
476 return true;
477 }
478 return false;
479}
480
481bool napi_complete_done(struct napi_struct *n, int work_done);
482
483
484
485
486
487
488
489
490static inline bool napi_complete(struct napi_struct *n)
491{
492 return napi_complete_done(n, 0);
493}
494
495int dev_set_threaded(struct net_device *dev, bool threaded);
496
497
498
499
500
501
502
503
504void napi_disable(struct napi_struct *n);
505
506void napi_enable(struct napi_struct *n);
507
508
509
510
511
512
513
514
515
516static inline void napi_synchronize(const struct napi_struct *n)
517{
518 if (IS_ENABLED(CONFIG_SMP))
519 while (test_bit(NAPI_STATE_SCHED, &n->state))
520 msleep(1);
521 else
522 barrier();
523}
524
525
526
527
528
529
530
531
532
533static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
534{
535 unsigned long val, new;
536
537 do {
538 val = READ_ONCE(n->state);
539 if (val & NAPIF_STATE_DISABLE)
540 return true;
541
542 if (!(val & NAPIF_STATE_SCHED))
543 return false;
544
545 new = val | NAPIF_STATE_MISSED;
546 } while (cmpxchg(&n->state, val, new) != val);
547
548 return true;
549}
550
551enum netdev_queue_state_t {
552 __QUEUE_STATE_DRV_XOFF,
553 __QUEUE_STATE_STACK_XOFF,
554 __QUEUE_STATE_FROZEN,
555};
556
557#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
558#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
559#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
560
561#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
562#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
563 QUEUE_STATE_FROZEN)
564#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
565 QUEUE_STATE_FROZEN)
566
567
568
569
570
571
572
573
574
575
576
577struct netdev_queue {
578
579
580
581 struct net_device *dev;
582 struct Qdisc __rcu *qdisc;
583 struct Qdisc *qdisc_sleeping;
584#ifdef CONFIG_SYSFS
585 struct kobject kobj;
586#endif
587#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
588 int numa_node;
589#endif
590 unsigned long tx_maxrate;
591
592
593
594
595 unsigned long trans_timeout;
596
597
598 struct net_device *sb_dev;
599#ifdef CONFIG_XDP_SOCKETS
600 struct xsk_buff_pool *pool;
601#endif
602
603
604
605 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
606 int xmit_lock_owner;
607
608
609
610 unsigned long trans_start;
611
612 unsigned long state;
613
614#ifdef CONFIG_BQL
615 struct dql dql;
616#endif
617} ____cacheline_aligned_in_smp;
618
619extern int sysctl_fb_tunnels_only_for_init_net;
620extern int sysctl_devconf_inherit_init_net;
621
622
623
624
625
626
627static inline bool net_has_fallback_tunnels(const struct net *net)
628{
629 return !IS_ENABLED(CONFIG_SYSCTL) ||
630 !sysctl_fb_tunnels_only_for_init_net ||
631 (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
632}
633
634static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
635{
636#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
637 return q->numa_node;
638#else
639 return NUMA_NO_NODE;
640#endif
641}
642
643static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
644{
645#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
646 q->numa_node = node;
647#endif
648}
649
650#ifdef CONFIG_RPS
651
652
653
654
655struct rps_map {
656 unsigned int len;
657 struct rcu_head rcu;
658 u16 cpus[];
659};
660#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
661
662
663
664
665
666
667struct rps_dev_flow {
668 u16 cpu;
669 u16 filter;
670 unsigned int last_qtail;
671};
672#define RPS_NO_FILTER 0xffff
673
674
675
676
677struct rps_dev_flow_table {
678 unsigned int mask;
679 struct rcu_head rcu;
680 struct rps_dev_flow flows[];
681};
682#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
683 ((_num) * sizeof(struct rps_dev_flow)))
684
685
686
687
688
689
690
691
692
693
694
695struct rps_sock_flow_table {
696 u32 mask;
697
698 u32 ents[] ____cacheline_aligned_in_smp;
699};
700#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
701
702#define RPS_NO_CPU 0xffff
703
704extern u32 rps_cpu_mask;
705extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
706
707static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
708 u32 hash)
709{
710 if (table && hash) {
711 unsigned int index = hash & table->mask;
712 u32 val = hash & ~rps_cpu_mask;
713
714
715 val |= raw_smp_processor_id();
716
717 if (table->ents[index] != val)
718 table->ents[index] = val;
719 }
720}
721
722#ifdef CONFIG_RFS_ACCEL
723bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
724 u16 filter_id);
725#endif
726#endif
727
728
729struct netdev_rx_queue {
730 struct xdp_rxq_info xdp_rxq;
731#ifdef CONFIG_RPS
732 struct rps_map __rcu *rps_map;
733 struct rps_dev_flow_table __rcu *rps_flow_table;
734#endif
735 struct kobject kobj;
736 struct net_device *dev;
737#ifdef CONFIG_XDP_SOCKETS
738 struct xsk_buff_pool *pool;
739#endif
740} ____cacheline_aligned_in_smp;
741
742
743
744
745struct rx_queue_attribute {
746 struct attribute attr;
747 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
748 ssize_t (*store)(struct netdev_rx_queue *queue,
749 const char *buf, size_t len);
750};
751
752
753enum xps_map_type {
754 XPS_CPUS = 0,
755 XPS_RXQS,
756 XPS_MAPS_MAX,
757};
758
759#ifdef CONFIG_XPS
760
761
762
763
764struct xps_map {
765 unsigned int len;
766 unsigned int alloc_len;
767 struct rcu_head rcu;
768 u16 queues[];
769};
770#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
771#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
772 - sizeof(struct xps_map)) / sizeof(u16))
773
774
775
776
777
778
779
780
781
782
783
784
785struct xps_dev_maps {
786 struct rcu_head rcu;
787 unsigned int nr_ids;
788 s16 num_tc;
789 struct xps_map __rcu *attr_map[];
790};
791
792#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
793 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
794
795#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
796 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
797
798#endif
799
800#define TC_MAX_QUEUE 16
801#define TC_BITMASK 15
802
803struct netdev_tc_txq {
804 u16 count;
805 u16 offset;
806};
807
808#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
809
810
811
812
813struct netdev_fcoe_hbainfo {
814 char manufacturer[64];
815 char serial_number[64];
816 char hardware_version[64];
817 char driver_version[64];
818 char optionrom_version[64];
819 char firmware_version[64];
820 char model[256];
821 char model_description[256];
822};
823#endif
824
825#define MAX_PHYS_ITEM_ID_LEN 32
826
827
828
829
830struct netdev_phys_item_id {
831 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
832 unsigned char id_len;
833};
834
835static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
836 struct netdev_phys_item_id *b)
837{
838 return a->id_len == b->id_len &&
839 memcmp(a->id, b->id, a->id_len) == 0;
840}
841
842typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
843 struct sk_buff *skb,
844 struct net_device *sb_dev);
845
846enum net_device_path_type {
847 DEV_PATH_ETHERNET = 0,
848 DEV_PATH_VLAN,
849 DEV_PATH_BRIDGE,
850 DEV_PATH_PPPOE,
851 DEV_PATH_DSA,
852};
853
854struct net_device_path {
855 enum net_device_path_type type;
856 const struct net_device *dev;
857 union {
858 struct {
859 u16 id;
860 __be16 proto;
861 u8 h_dest[ETH_ALEN];
862 } encap;
863 struct {
864 enum {
865 DEV_PATH_BR_VLAN_KEEP,
866 DEV_PATH_BR_VLAN_TAG,
867 DEV_PATH_BR_VLAN_UNTAG,
868 DEV_PATH_BR_VLAN_UNTAG_HW,
869 } vlan_mode;
870 u16 vlan_id;
871 __be16 vlan_proto;
872 } bridge;
873 struct {
874 int port;
875 u16 proto;
876 } dsa;
877 };
878};
879
880#define NET_DEVICE_PATH_STACK_MAX 5
881#define NET_DEVICE_PATH_VLAN_MAX 2
882
883struct net_device_path_stack {
884 int num_paths;
885 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
886};
887
888struct net_device_path_ctx {
889 const struct net_device *dev;
890 const u8 *daddr;
891
892 int num_vlans;
893 struct {
894 u16 id;
895 __be16 proto;
896 } vlan[NET_DEVICE_PATH_VLAN_MAX];
897};
898
899enum tc_setup_type {
900 TC_SETUP_QDISC_MQPRIO,
901 TC_SETUP_CLSU32,
902 TC_SETUP_CLSFLOWER,
903 TC_SETUP_CLSMATCHALL,
904 TC_SETUP_CLSBPF,
905 TC_SETUP_BLOCK,
906 TC_SETUP_QDISC_CBS,
907 TC_SETUP_QDISC_RED,
908 TC_SETUP_QDISC_PRIO,
909 TC_SETUP_QDISC_MQ,
910 TC_SETUP_QDISC_ETF,
911 TC_SETUP_ROOT_QDISC,
912 TC_SETUP_QDISC_GRED,
913 TC_SETUP_QDISC_TAPRIO,
914 TC_SETUP_FT,
915 TC_SETUP_QDISC_ETS,
916 TC_SETUP_QDISC_TBF,
917 TC_SETUP_QDISC_FIFO,
918 TC_SETUP_QDISC_HTB,
919};
920
921
922
923
924enum bpf_netdev_command {
925
926
927
928
929
930
931
932 XDP_SETUP_PROG,
933 XDP_SETUP_PROG_HW,
934
935 BPF_OFFLOAD_MAP_ALLOC,
936 BPF_OFFLOAD_MAP_FREE,
937 XDP_SETUP_XSK_POOL,
938};
939
940struct bpf_prog_offload_ops;
941struct netlink_ext_ack;
942struct xdp_umem;
943struct xdp_dev_bulk_queue;
944struct bpf_xdp_link;
945
946enum bpf_xdp_mode {
947 XDP_MODE_SKB = 0,
948 XDP_MODE_DRV = 1,
949 XDP_MODE_HW = 2,
950 __MAX_XDP_MODE
951};
952
953struct bpf_xdp_entity {
954 struct bpf_prog *prog;
955 struct bpf_xdp_link *link;
956};
957
958struct netdev_bpf {
959 enum bpf_netdev_command command;
960 union {
961
962 struct {
963 u32 flags;
964 struct bpf_prog *prog;
965 struct netlink_ext_ack *extack;
966 };
967
968 struct {
969 struct bpf_offloaded_map *offmap;
970 };
971
972 struct {
973 struct xsk_buff_pool *pool;
974 u16 queue_id;
975 } xsk;
976 };
977};
978
979
980#define XDP_WAKEUP_RX (1 << 0)
981#define XDP_WAKEUP_TX (1 << 1)
982
983#ifdef CONFIG_XFRM_OFFLOAD
984struct xfrmdev_ops {
985 int (*xdo_dev_state_add) (struct xfrm_state *x);
986 void (*xdo_dev_state_delete) (struct xfrm_state *x);
987 void (*xdo_dev_state_free) (struct xfrm_state *x);
988 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
989 struct xfrm_state *x);
990 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
991};
992#endif
993
994struct dev_ifalias {
995 struct rcu_head rcuhead;
996 char ifalias[];
997};
998
999struct devlink;
1000struct tlsdev_ops;
1001
1002struct netdev_name_node {
1003 struct hlist_node hlist;
1004 struct list_head list;
1005 struct net_device *dev;
1006 const char *name;
1007};
1008
1009int netdev_name_node_alt_create(struct net_device *dev, const char *name);
1010int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
1011
1012struct netdev_net_notifier {
1013 struct list_head list;
1014 struct notifier_block *nb;
1015};
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348struct net_device_ops {
1349 int (*ndo_init)(struct net_device *dev);
1350 void (*ndo_uninit)(struct net_device *dev);
1351 int (*ndo_open)(struct net_device *dev);
1352 int (*ndo_stop)(struct net_device *dev);
1353 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1354 struct net_device *dev);
1355 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1356 struct net_device *dev,
1357 netdev_features_t features);
1358 u16 (*ndo_select_queue)(struct net_device *dev,
1359 struct sk_buff *skb,
1360 struct net_device *sb_dev);
1361 void (*ndo_change_rx_flags)(struct net_device *dev,
1362 int flags);
1363 void (*ndo_set_rx_mode)(struct net_device *dev);
1364 int (*ndo_set_mac_address)(struct net_device *dev,
1365 void *addr);
1366 int (*ndo_validate_addr)(struct net_device *dev);
1367 int (*ndo_do_ioctl)(struct net_device *dev,
1368 struct ifreq *ifr, int cmd);
1369 int (*ndo_eth_ioctl)(struct net_device *dev,
1370 struct ifreq *ifr, int cmd);
1371 int (*ndo_siocbond)(struct net_device *dev,
1372 struct ifreq *ifr, int cmd);
1373 int (*ndo_siocwandev)(struct net_device *dev,
1374 struct if_settings *ifs);
1375 int (*ndo_siocdevprivate)(struct net_device *dev,
1376 struct ifreq *ifr,
1377 void __user *data, int cmd);
1378 int (*ndo_set_config)(struct net_device *dev,
1379 struct ifmap *map);
1380 int (*ndo_change_mtu)(struct net_device *dev,
1381 int new_mtu);
1382 int (*ndo_neigh_setup)(struct net_device *dev,
1383 struct neigh_parms *);
1384 void (*ndo_tx_timeout) (struct net_device *dev,
1385 unsigned int txqueue);
1386
1387 void (*ndo_get_stats64)(struct net_device *dev,
1388 struct rtnl_link_stats64 *storage);
1389 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1390 int (*ndo_get_offload_stats)(int attr_id,
1391 const struct net_device *dev,
1392 void *attr_data);
1393 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1394
1395 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1396 __be16 proto, u16 vid);
1397 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1398 __be16 proto, u16 vid);
1399#ifdef CONFIG_NET_POLL_CONTROLLER
1400 void (*ndo_poll_controller)(struct net_device *dev);
1401 int (*ndo_netpoll_setup)(struct net_device *dev,
1402 struct netpoll_info *info);
1403 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1404#endif
1405 int (*ndo_set_vf_mac)(struct net_device *dev,
1406 int queue, u8 *mac);
1407 int (*ndo_set_vf_vlan)(struct net_device *dev,
1408 int queue, u16 vlan,
1409 u8 qos, __be16 proto);
1410 int (*ndo_set_vf_rate)(struct net_device *dev,
1411 int vf, int min_tx_rate,
1412 int max_tx_rate);
1413 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1414 int vf, bool setting);
1415 int (*ndo_set_vf_trust)(struct net_device *dev,
1416 int vf, bool setting);
1417 int (*ndo_get_vf_config)(struct net_device *dev,
1418 int vf,
1419 struct ifla_vf_info *ivf);
1420 int (*ndo_set_vf_link_state)(struct net_device *dev,
1421 int vf, int link_state);
1422 int (*ndo_get_vf_stats)(struct net_device *dev,
1423 int vf,
1424 struct ifla_vf_stats
1425 *vf_stats);
1426 int (*ndo_set_vf_port)(struct net_device *dev,
1427 int vf,
1428 struct nlattr *port[]);
1429 int (*ndo_get_vf_port)(struct net_device *dev,
1430 int vf, struct sk_buff *skb);
1431 int (*ndo_get_vf_guid)(struct net_device *dev,
1432 int vf,
1433 struct ifla_vf_guid *node_guid,
1434 struct ifla_vf_guid *port_guid);
1435 int (*ndo_set_vf_guid)(struct net_device *dev,
1436 int vf, u64 guid,
1437 int guid_type);
1438 int (*ndo_set_vf_rss_query_en)(
1439 struct net_device *dev,
1440 int vf, bool setting);
1441 int (*ndo_setup_tc)(struct net_device *dev,
1442 enum tc_setup_type type,
1443 void *type_data);
1444#if IS_ENABLED(CONFIG_FCOE)
1445 int (*ndo_fcoe_enable)(struct net_device *dev);
1446 int (*ndo_fcoe_disable)(struct net_device *dev);
1447 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1448 u16 xid,
1449 struct scatterlist *sgl,
1450 unsigned int sgc);
1451 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1452 u16 xid);
1453 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1454 u16 xid,
1455 struct scatterlist *sgl,
1456 unsigned int sgc);
1457 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1458 struct netdev_fcoe_hbainfo *hbainfo);
1459#endif
1460
1461#if IS_ENABLED(CONFIG_LIBFCOE)
1462#define NETDEV_FCOE_WWNN 0
1463#define NETDEV_FCOE_WWPN 1
1464 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1465 u64 *wwn, int type);
1466#endif
1467
1468#ifdef CONFIG_RFS_ACCEL
1469 int (*ndo_rx_flow_steer)(struct net_device *dev,
1470 const struct sk_buff *skb,
1471 u16 rxq_index,
1472 u32 flow_id);
1473#endif
1474 int (*ndo_add_slave)(struct net_device *dev,
1475 struct net_device *slave_dev,
1476 struct netlink_ext_ack *extack);
1477 int (*ndo_del_slave)(struct net_device *dev,
1478 struct net_device *slave_dev);
1479 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
1480 struct sk_buff *skb,
1481 bool all_slaves);
1482 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
1483 struct sock *sk);
1484 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1485 netdev_features_t features);
1486 int (*ndo_set_features)(struct net_device *dev,
1487 netdev_features_t features);
1488 int (*ndo_neigh_construct)(struct net_device *dev,
1489 struct neighbour *n);
1490 void (*ndo_neigh_destroy)(struct net_device *dev,
1491 struct neighbour *n);
1492
1493 int (*ndo_fdb_add)(struct ndmsg *ndm,
1494 struct nlattr *tb[],
1495 struct net_device *dev,
1496 const unsigned char *addr,
1497 u16 vid,
1498 u16 flags,
1499 struct netlink_ext_ack *extack);
1500 int (*ndo_fdb_del)(struct ndmsg *ndm,
1501 struct nlattr *tb[],
1502 struct net_device *dev,
1503 const unsigned char *addr,
1504 u16 vid);
1505 int (*ndo_fdb_dump)(struct sk_buff *skb,
1506 struct netlink_callback *cb,
1507 struct net_device *dev,
1508 struct net_device *filter_dev,
1509 int *idx);
1510 int (*ndo_fdb_get)(struct sk_buff *skb,
1511 struct nlattr *tb[],
1512 struct net_device *dev,
1513 const unsigned char *addr,
1514 u16 vid, u32 portid, u32 seq,
1515 struct netlink_ext_ack *extack);
1516 int (*ndo_bridge_setlink)(struct net_device *dev,
1517 struct nlmsghdr *nlh,
1518 u16 flags,
1519 struct netlink_ext_ack *extack);
1520 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1521 u32 pid, u32 seq,
1522 struct net_device *dev,
1523 u32 filter_mask,
1524 int nlflags);
1525 int (*ndo_bridge_dellink)(struct net_device *dev,
1526 struct nlmsghdr *nlh,
1527 u16 flags);
1528 int (*ndo_change_carrier)(struct net_device *dev,
1529 bool new_carrier);
1530 int (*ndo_get_phys_port_id)(struct net_device *dev,
1531 struct netdev_phys_item_id *ppid);
1532 int (*ndo_get_port_parent_id)(struct net_device *dev,
1533 struct netdev_phys_item_id *ppid);
1534 int (*ndo_get_phys_port_name)(struct net_device *dev,
1535 char *name, size_t len);
1536 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1537 struct net_device *dev);
1538 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1539 void *priv);
1540
1541 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1542 int queue_index,
1543 u32 maxrate);
1544 int (*ndo_get_iflink)(const struct net_device *dev);
1545 int (*ndo_change_proto_down)(struct net_device *dev,
1546 bool proto_down);
1547 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1548 struct sk_buff *skb);
1549 void (*ndo_set_rx_headroom)(struct net_device *dev,
1550 int needed_headroom);
1551 int (*ndo_bpf)(struct net_device *dev,
1552 struct netdev_bpf *bpf);
1553 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1554 struct xdp_frame **xdp,
1555 u32 flags);
1556 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1557 struct xdp_buff *xdp);
1558 int (*ndo_xsk_wakeup)(struct net_device *dev,
1559 u32 queue_id, u32 flags);
1560 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
1561 int (*ndo_tunnel_ctl)(struct net_device *dev,
1562 struct ip_tunnel_parm *p, int cmd);
1563 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
1564 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
1565 struct net_device_path *path);
1566};
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616enum netdev_priv_flags {
1617 IFF_802_1Q_VLAN = 1<<0,
1618 IFF_EBRIDGE = 1<<1,
1619 IFF_BONDING = 1<<2,
1620 IFF_ISATAP = 1<<3,
1621 IFF_WAN_HDLC = 1<<4,
1622 IFF_XMIT_DST_RELEASE = 1<<5,
1623 IFF_DONT_BRIDGE = 1<<6,
1624 IFF_DISABLE_NETPOLL = 1<<7,
1625 IFF_MACVLAN_PORT = 1<<8,
1626 IFF_BRIDGE_PORT = 1<<9,
1627 IFF_OVS_DATAPATH = 1<<10,
1628 IFF_TX_SKB_SHARING = 1<<11,
1629 IFF_UNICAST_FLT = 1<<12,
1630 IFF_TEAM_PORT = 1<<13,
1631 IFF_SUPP_NOFCS = 1<<14,
1632 IFF_LIVE_ADDR_CHANGE = 1<<15,
1633 IFF_MACVLAN = 1<<16,
1634 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1635 IFF_L3MDEV_MASTER = 1<<18,
1636 IFF_NO_QUEUE = 1<<19,
1637 IFF_OPENVSWITCH = 1<<20,
1638 IFF_L3MDEV_SLAVE = 1<<21,
1639 IFF_TEAM = 1<<22,
1640 IFF_RXFH_CONFIGURED = 1<<23,
1641 IFF_PHONY_HEADROOM = 1<<24,
1642 IFF_MACSEC = 1<<25,
1643 IFF_NO_RX_HANDLER = 1<<26,
1644 IFF_FAILOVER = 1<<27,
1645 IFF_FAILOVER_SLAVE = 1<<28,
1646 IFF_L3MDEV_RX_HANDLER = 1<<29,
1647 IFF_LIVE_RENAME_OK = 1<<30,
1648 IFF_TX_SKB_NO_LINEAR = 1<<31,
1649};
1650
1651#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1652#define IFF_EBRIDGE IFF_EBRIDGE
1653#define IFF_BONDING IFF_BONDING
1654#define IFF_ISATAP IFF_ISATAP
1655#define IFF_WAN_HDLC IFF_WAN_HDLC
1656#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1657#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1658#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1659#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1660#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1661#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1662#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1663#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1664#define IFF_TEAM_PORT IFF_TEAM_PORT
1665#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1666#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1667#define IFF_MACVLAN IFF_MACVLAN
1668#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1669#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1670#define IFF_NO_QUEUE IFF_NO_QUEUE
1671#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1672#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1673#define IFF_TEAM IFF_TEAM
1674#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1675#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM
1676#define IFF_MACSEC IFF_MACSEC
1677#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1678#define IFF_FAILOVER IFF_FAILOVER
1679#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1680#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1681#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
1682#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
1683
1684
1685enum netdev_ml_priv_type {
1686 ML_PRIV_NONE,
1687 ML_PRIV_CAN,
1688};
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949struct net_device {
1950 char name[IFNAMSIZ];
1951 struct netdev_name_node *name_node;
1952 struct dev_ifalias __rcu *ifalias;
1953
1954
1955
1956
1957 unsigned long mem_end;
1958 unsigned long mem_start;
1959 unsigned long base_addr;
1960
1961
1962
1963
1964
1965
1966
1967 unsigned long state;
1968
1969 struct list_head dev_list;
1970 struct list_head napi_list;
1971 struct list_head unreg_list;
1972 struct list_head close_list;
1973 struct list_head ptype_all;
1974 struct list_head ptype_specific;
1975
1976 struct {
1977 struct list_head upper;
1978 struct list_head lower;
1979 } adj_list;
1980
1981
1982 unsigned int flags;
1983 unsigned int priv_flags;
1984 const struct net_device_ops *netdev_ops;
1985 int ifindex;
1986 unsigned short gflags;
1987 unsigned short hard_header_len;
1988
1989
1990
1991
1992
1993
1994 unsigned int mtu;
1995 unsigned short needed_headroom;
1996 unsigned short needed_tailroom;
1997
1998 netdev_features_t features;
1999 netdev_features_t hw_features;
2000 netdev_features_t wanted_features;
2001 netdev_features_t vlan_features;
2002 netdev_features_t hw_enc_features;
2003 netdev_features_t mpls_features;
2004 netdev_features_t gso_partial_features;
2005
2006 unsigned int min_mtu;
2007 unsigned int max_mtu;
2008 unsigned short type;
2009 unsigned char min_header_len;
2010 unsigned char name_assign_type;
2011
2012 int group;
2013
2014 struct net_device_stats stats;
2015
2016 atomic_long_t rx_dropped;
2017 atomic_long_t tx_dropped;
2018 atomic_long_t rx_nohandler;
2019
2020
2021 atomic_t carrier_up_count;
2022 atomic_t carrier_down_count;
2023
2024#ifdef CONFIG_WIRELESS_EXT
2025 const struct iw_handler_def *wireless_handlers;
2026 struct iw_public_data *wireless_data;
2027#endif
2028 const struct ethtool_ops *ethtool_ops;
2029#ifdef CONFIG_NET_L3_MASTER_DEV
2030 const struct l3mdev_ops *l3mdev_ops;
2031#endif
2032#if IS_ENABLED(CONFIG_IPV6)
2033 const struct ndisc_ops *ndisc_ops;
2034#endif
2035
2036#ifdef CONFIG_XFRM_OFFLOAD
2037 const struct xfrmdev_ops *xfrmdev_ops;
2038#endif
2039
2040#if IS_ENABLED(CONFIG_TLS_DEVICE)
2041 const struct tlsdev_ops *tlsdev_ops;
2042#endif
2043
2044 const struct header_ops *header_ops;
2045
2046 unsigned char operstate;
2047 unsigned char link_mode;
2048
2049 unsigned char if_port;
2050 unsigned char dma;
2051
2052
2053 unsigned char perm_addr[MAX_ADDR_LEN];
2054 unsigned char addr_assign_type;
2055 unsigned char addr_len;
2056 unsigned char upper_level;
2057 unsigned char lower_level;
2058
2059 unsigned short neigh_priv_len;
2060 unsigned short dev_id;
2061 unsigned short dev_port;
2062 unsigned short padded;
2063
2064 spinlock_t addr_list_lock;
2065 int irq;
2066
2067 struct netdev_hw_addr_list uc;
2068 struct netdev_hw_addr_list mc;
2069 struct netdev_hw_addr_list dev_addrs;
2070
2071#ifdef CONFIG_SYSFS
2072 struct kset *queues_kset;
2073#endif
2074#ifdef CONFIG_LOCKDEP
2075 struct list_head unlink_list;
2076#endif
2077 unsigned int promiscuity;
2078 unsigned int allmulti;
2079 bool uc_promisc;
2080#ifdef CONFIG_LOCKDEP
2081 unsigned char nested_level;
2082#endif
2083
2084
2085
2086
2087#if IS_ENABLED(CONFIG_VLAN_8021Q)
2088 struct vlan_info __rcu *vlan_info;
2089#endif
2090#if IS_ENABLED(CONFIG_NET_DSA)
2091 struct dsa_port *dsa_ptr;
2092#endif
2093#if IS_ENABLED(CONFIG_TIPC)
2094 struct tipc_bearer __rcu *tipc_ptr;
2095#endif
2096#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
2097 void *atalk_ptr;
2098#endif
2099 struct in_device __rcu *ip_ptr;
2100#if IS_ENABLED(CONFIG_DECNET)
2101 struct dn_dev __rcu *dn_ptr;
2102#endif
2103 struct inet6_dev __rcu *ip6_ptr;
2104#if IS_ENABLED(CONFIG_AX25)
2105 void *ax25_ptr;
2106#endif
2107 struct wireless_dev *ieee80211_ptr;
2108 struct wpan_dev *ieee802154_ptr;
2109#if IS_ENABLED(CONFIG_MPLS_ROUTING)
2110 struct mpls_dev __rcu *mpls_ptr;
2111#endif
2112#if IS_ENABLED(CONFIG_MCTP)
2113 struct mctp_dev __rcu *mctp_ptr;
2114#endif
2115
2116
2117
2118
2119
2120 unsigned char *dev_addr;
2121
2122 struct netdev_rx_queue *_rx;
2123 unsigned int num_rx_queues;
2124 unsigned int real_num_rx_queues;
2125
2126 struct bpf_prog __rcu *xdp_prog;
2127 unsigned long gro_flush_timeout;
2128 int napi_defer_hard_irqs;
2129 rx_handler_func_t __rcu *rx_handler;
2130 void __rcu *rx_handler_data;
2131
2132#ifdef CONFIG_NET_CLS_ACT
2133 struct mini_Qdisc __rcu *miniq_ingress;
2134#endif
2135 struct netdev_queue __rcu *ingress_queue;
2136#ifdef CONFIG_NETFILTER_INGRESS
2137 struct nf_hook_entries __rcu *nf_hooks_ingress;
2138#endif
2139
2140 unsigned char broadcast[MAX_ADDR_LEN];
2141#ifdef CONFIG_RFS_ACCEL
2142 struct cpu_rmap *rx_cpu_rmap;
2143#endif
2144 struct hlist_node index_hlist;
2145
2146
2147
2148
2149 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
2150 unsigned int num_tx_queues;
2151 unsigned int real_num_tx_queues;
2152 struct Qdisc *qdisc;
2153 unsigned int tx_queue_len;
2154 spinlock_t tx_global_lock;
2155
2156 struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2157
2158#ifdef CONFIG_XPS
2159 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
2160#endif
2161#ifdef CONFIG_NET_CLS_ACT
2162 struct mini_Qdisc __rcu *miniq_egress;
2163#endif
2164
2165#ifdef CONFIG_NET_SCHED
2166 DECLARE_HASHTABLE (qdisc_hash, 4);
2167#endif
2168
2169 struct timer_list watchdog_timer;
2170 int watchdog_timeo;
2171
2172 u32 proto_down_reason;
2173
2174 struct list_head todo_list;
2175
2176#ifdef CONFIG_PCPU_DEV_REFCNT
2177 int __percpu *pcpu_refcnt;
2178#else
2179 refcount_t dev_refcnt;
2180#endif
2181
2182 struct list_head link_watch_list;
2183
2184 enum { NETREG_UNINITIALIZED=0,
2185 NETREG_REGISTERED,
2186 NETREG_UNREGISTERING,
2187 NETREG_UNREGISTERED,
2188 NETREG_RELEASED,
2189 NETREG_DUMMY,
2190 } reg_state:8;
2191
2192 bool dismantle;
2193
2194 enum {
2195 RTNL_LINK_INITIALIZED,
2196 RTNL_LINK_INITIALIZING,
2197 } rtnl_link_state:16;
2198
2199 bool needs_free_netdev;
2200 void (*priv_destructor)(struct net_device *dev);
2201
2202#ifdef CONFIG_NETPOLL
2203 struct netpoll_info __rcu *npinfo;
2204#endif
2205
2206 possible_net_t nd_net;
2207
2208
2209 void *ml_priv;
2210 enum netdev_ml_priv_type ml_priv_type;
2211
2212 union {
2213 struct pcpu_lstats __percpu *lstats;
2214 struct pcpu_sw_netstats __percpu *tstats;
2215 struct pcpu_dstats __percpu *dstats;
2216 };
2217
2218#if IS_ENABLED(CONFIG_GARP)
2219 struct garp_port __rcu *garp_port;
2220#endif
2221#if IS_ENABLED(CONFIG_MRP)
2222 struct mrp_port __rcu *mrp_port;
2223#endif
2224
2225 struct device dev;
2226 const struct attribute_group *sysfs_groups[4];
2227 const struct attribute_group *sysfs_rx_queue_group;
2228
2229 const struct rtnl_link_ops *rtnl_link_ops;
2230
2231
2232#define GSO_MAX_SIZE 65536
2233 unsigned int gso_max_size;
2234#define GSO_MAX_SEGS 65535
2235 u16 gso_max_segs;
2236
2237#ifdef CONFIG_DCB
2238 const struct dcbnl_rtnl_ops *dcbnl_ops;
2239#endif
2240 s16 num_tc;
2241 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2242 u8 prio_tc_map[TC_BITMASK + 1];
2243
2244#if IS_ENABLED(CONFIG_FCOE)
2245 unsigned int fcoe_ddp_xid;
2246#endif
2247#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2248 struct netprio_map __rcu *priomap;
2249#endif
2250 struct phy_device *phydev;
2251 struct sfp_bus *sfp_bus;
2252 struct lock_class_key *qdisc_tx_busylock;
2253 struct lock_class_key *qdisc_running_key;
2254 bool proto_down;
2255 unsigned wol_enabled:1;
2256 unsigned threaded:1;
2257
2258 struct list_head net_notifier_list;
2259
2260#if IS_ENABLED(CONFIG_MACSEC)
2261
2262 const struct macsec_ops *macsec_ops;
2263#endif
2264 const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
2265 struct udp_tunnel_nic *udp_tunnel_nic;
2266
2267
2268 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
2269};
2270#define to_net_dev(d) container_of(d, struct net_device, dev)
2271
2272static inline bool netif_elide_gro(const struct net_device *dev)
2273{
2274 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2275 return true;
2276 return false;
2277}
2278
2279#define NETDEV_ALIGN 32
2280
2281static inline
2282int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2283{
2284 return dev->prio_tc_map[prio & TC_BITMASK];
2285}
2286
2287static inline
2288int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2289{
2290 if (tc >= dev->num_tc)
2291 return -EINVAL;
2292
2293 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2294 return 0;
2295}
2296
2297int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2298void netdev_reset_tc(struct net_device *dev);
2299int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2300int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2301
2302static inline
2303int netdev_get_num_tc(struct net_device *dev)
2304{
2305 return dev->num_tc;
2306}
2307
2308static inline void net_prefetch(void *p)
2309{
2310 prefetch(p);
2311#if L1_CACHE_BYTES < 128
2312 prefetch((u8 *)p + L1_CACHE_BYTES);
2313#endif
2314}
2315
2316static inline void net_prefetchw(void *p)
2317{
2318 prefetchw(p);
2319#if L1_CACHE_BYTES < 128
2320 prefetchw((u8 *)p + L1_CACHE_BYTES);
2321#endif
2322}
2323
2324void netdev_unbind_sb_channel(struct net_device *dev,
2325 struct net_device *sb_dev);
2326int netdev_bind_sb_channel_queue(struct net_device *dev,
2327 struct net_device *sb_dev,
2328 u8 tc, u16 count, u16 offset);
2329int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2330static inline int netdev_get_sb_channel(struct net_device *dev)
2331{
2332 return max_t(int, -dev->num_tc, 0);
2333}
2334
2335static inline
2336struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2337 unsigned int index)
2338{
2339 return &dev->_tx[index];
2340}
2341
2342static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2343 const struct sk_buff *skb)
2344{
2345 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2346}
2347
2348static inline void netdev_for_each_tx_queue(struct net_device *dev,
2349 void (*f)(struct net_device *,
2350 struct netdev_queue *,
2351 void *),
2352 void *arg)
2353{
2354 unsigned int i;
2355
2356 for (i = 0; i < dev->num_tx_queues; i++)
2357 f(dev, &dev->_tx[i], arg);
2358}
2359
2360#define netdev_lockdep_set_classes(dev) \
2361{ \
2362 static struct lock_class_key qdisc_tx_busylock_key; \
2363 static struct lock_class_key qdisc_running_key; \
2364 static struct lock_class_key qdisc_xmit_lock_key; \
2365 static struct lock_class_key dev_addr_list_lock_key; \
2366 unsigned int i; \
2367 \
2368 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2369 (dev)->qdisc_running_key = &qdisc_running_key; \
2370 lockdep_set_class(&(dev)->addr_list_lock, \
2371 &dev_addr_list_lock_key); \
2372 for (i = 0; i < (dev)->num_tx_queues; i++) \
2373 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2374 &qdisc_xmit_lock_key); \
2375}
2376
2377u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2378 struct net_device *sb_dev);
2379struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2380 struct sk_buff *skb,
2381 struct net_device *sb_dev);
2382
2383
2384
2385
2386static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2387{
2388 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2389}
2390
2391static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2392{
2393 if (dev->netdev_ops->ndo_set_rx_headroom)
2394 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2395}
2396
2397
2398static inline void netdev_reset_rx_headroom(struct net_device *dev)
2399{
2400 netdev_set_rx_headroom(dev, -1);
2401}
2402
2403static inline void *netdev_get_ml_priv(struct net_device *dev,
2404 enum netdev_ml_priv_type type)
2405{
2406 if (dev->ml_priv_type != type)
2407 return NULL;
2408
2409 return dev->ml_priv;
2410}
2411
2412static inline void netdev_set_ml_priv(struct net_device *dev,
2413 void *ml_priv,
2414 enum netdev_ml_priv_type type)
2415{
2416 WARN(dev->ml_priv_type && dev->ml_priv_type != type,
2417 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
2418 dev->ml_priv_type, type);
2419 WARN(!dev->ml_priv_type && dev->ml_priv,
2420 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
2421
2422 dev->ml_priv = ml_priv;
2423 dev->ml_priv_type = type;
2424}
2425
2426
2427
2428
2429static inline
2430struct net *dev_net(const struct net_device *dev)
2431{
2432 return read_pnet(&dev->nd_net);
2433}
2434
2435static inline
2436void dev_net_set(struct net_device *dev, struct net *net)
2437{
2438 write_pnet(&dev->nd_net, net);
2439}
2440
2441
2442
2443
2444
2445
2446
2447static inline void *netdev_priv(const struct net_device *dev)
2448{
2449 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2450}
2451
2452
2453
2454
2455#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2456
2457
2458
2459
2460
2461#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2462
2463
2464
2465
2466#define NAPI_POLL_WEIGHT 64
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2479 int (*poll)(struct napi_struct *, int), int weight);
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492static inline void netif_tx_napi_add(struct net_device *dev,
2493 struct napi_struct *napi,
2494 int (*poll)(struct napi_struct *, int),
2495 int weight)
2496{
2497 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2498 netif_napi_add(dev, napi, poll, weight);
2499}
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509void __netif_napi_del(struct napi_struct *napi);
2510
2511
2512
2513
2514
2515
2516
2517static inline void netif_napi_del(struct napi_struct *napi)
2518{
2519 __netif_napi_del(napi);
2520 synchronize_net();
2521}
2522
2523struct napi_gro_cb {
2524
2525 void *frag0;
2526
2527
2528 unsigned int frag0_len;
2529
2530
2531 int data_offset;
2532
2533
2534 u16 flush;
2535
2536
2537 u16 flush_id;
2538
2539
2540 u16 count;
2541
2542
2543 u16 gro_remcsum_start;
2544
2545
2546 unsigned long age;
2547
2548
2549 u16 proto;
2550
2551
2552 u8 same_flow:1;
2553
2554
2555 u8 encap_mark:1;
2556
2557
2558 u8 csum_valid:1;
2559
2560
2561 u8 csum_cnt:3;
2562
2563
2564 u8 free:2;
2565#define NAPI_GRO_FREE 1
2566#define NAPI_GRO_FREE_STOLEN_HEAD 2
2567
2568
2569 u8 is_ipv6:1;
2570
2571
2572 u8 is_fou:1;
2573
2574
2575 u8 is_atomic:1;
2576
2577
2578 u8 recursion_counter:4;
2579
2580
2581 u8 is_flist:1;
2582
2583
2584 __wsum csum;
2585
2586
2587 struct sk_buff *last;
2588};
2589
2590#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2591
2592#define GRO_RECURSION_LIMIT 15
2593static inline int gro_recursion_inc_test(struct sk_buff *skb)
2594{
2595 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2596}
2597
2598typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2599static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2600 struct list_head *head,
2601 struct sk_buff *skb)
2602{
2603 if (unlikely(gro_recursion_inc_test(skb))) {
2604 NAPI_GRO_CB(skb)->flush |= 1;
2605 return NULL;
2606 }
2607
2608 return cb(head, skb);
2609}
2610
2611typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2612 struct sk_buff *);
2613static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2614 struct sock *sk,
2615 struct list_head *head,
2616 struct sk_buff *skb)
2617{
2618 if (unlikely(gro_recursion_inc_test(skb))) {
2619 NAPI_GRO_CB(skb)->flush |= 1;
2620 return NULL;
2621 }
2622
2623 return cb(sk, head, skb);
2624}
2625
2626struct packet_type {
2627 __be16 type;
2628 bool ignore_outgoing;
2629 struct net_device *dev;
2630 int (*func) (struct sk_buff *,
2631 struct net_device *,
2632 struct packet_type *,
2633 struct net_device *);
2634 void (*list_func) (struct list_head *,
2635 struct packet_type *,
2636 struct net_device *);
2637 bool (*id_match)(struct packet_type *ptype,
2638 struct sock *sk);
2639 void *af_packet_priv;
2640 struct list_head list;
2641};
2642
2643struct offload_callbacks {
2644 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2645 netdev_features_t features);
2646 struct sk_buff *(*gro_receive)(struct list_head *head,
2647 struct sk_buff *skb);
2648 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2649};
2650
2651struct packet_offload {
2652 __be16 type;
2653 u16 priority;
2654 struct offload_callbacks callbacks;
2655 struct list_head list;
2656};
2657
2658
2659struct pcpu_sw_netstats {
2660 u64 rx_packets;
2661 u64 rx_bytes;
2662 u64 tx_packets;
2663 u64 tx_bytes;
2664 struct u64_stats_sync syncp;
2665} __aligned(4 * sizeof(u64));
2666
2667struct pcpu_lstats {
2668 u64_stats_t packets;
2669 u64_stats_t bytes;
2670 struct u64_stats_sync syncp;
2671} __aligned(2 * sizeof(u64));
2672
2673void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
2674
2675static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
2676{
2677 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2678
2679 u64_stats_update_begin(&tstats->syncp);
2680 tstats->rx_bytes += len;
2681 tstats->rx_packets++;
2682 u64_stats_update_end(&tstats->syncp);
2683}
2684
2685static inline void dev_sw_netstats_tx_add(struct net_device *dev,
2686 unsigned int packets,
2687 unsigned int len)
2688{
2689 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2690
2691 u64_stats_update_begin(&tstats->syncp);
2692 tstats->tx_bytes += len;
2693 tstats->tx_packets += packets;
2694 u64_stats_update_end(&tstats->syncp);
2695}
2696
2697static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
2698{
2699 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
2700
2701 u64_stats_update_begin(&lstats->syncp);
2702 u64_stats_add(&lstats->bytes, len);
2703 u64_stats_inc(&lstats->packets);
2704 u64_stats_update_end(&lstats->syncp);
2705}
2706
2707#define __netdev_alloc_pcpu_stats(type, gfp) \
2708({ \
2709 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2710 if (pcpu_stats) { \
2711 int __cpu; \
2712 for_each_possible_cpu(__cpu) { \
2713 typeof(type) *stat; \
2714 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2715 u64_stats_init(&stat->syncp); \
2716 } \
2717 } \
2718 pcpu_stats; \
2719})
2720
2721#define netdev_alloc_pcpu_stats(type) \
2722 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2723
2724#define devm_netdev_alloc_pcpu_stats(dev, type) \
2725({ \
2726 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
2727 if (pcpu_stats) { \
2728 int __cpu; \
2729 for_each_possible_cpu(__cpu) { \
2730 typeof(type) *stat; \
2731 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2732 u64_stats_init(&stat->syncp); \
2733 } \
2734 } \
2735 pcpu_stats; \
2736})
2737
2738enum netdev_lag_tx_type {
2739 NETDEV_LAG_TX_TYPE_UNKNOWN,
2740 NETDEV_LAG_TX_TYPE_RANDOM,
2741 NETDEV_LAG_TX_TYPE_BROADCAST,
2742 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2743 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2744 NETDEV_LAG_TX_TYPE_HASH,
2745};
2746
2747enum netdev_lag_hash {
2748 NETDEV_LAG_HASH_NONE,
2749 NETDEV_LAG_HASH_L2,
2750 NETDEV_LAG_HASH_L34,
2751 NETDEV_LAG_HASH_L23,
2752 NETDEV_LAG_HASH_E23,
2753 NETDEV_LAG_HASH_E34,
2754 NETDEV_LAG_HASH_VLAN_SRCMAC,
2755 NETDEV_LAG_HASH_UNKNOWN,
2756};
2757
2758struct netdev_lag_upper_info {
2759 enum netdev_lag_tx_type tx_type;
2760 enum netdev_lag_hash hash_type;
2761};
2762
2763struct netdev_lag_lower_state_info {
2764 u8 link_up : 1,
2765 tx_enabled : 1;
2766};
2767
2768#include <linux/notifier.h>
2769
2770
2771
2772
2773
2774enum netdev_cmd {
2775 NETDEV_UP = 1,
2776 NETDEV_DOWN,
2777 NETDEV_REBOOT,
2778
2779
2780
2781 NETDEV_CHANGE,
2782 NETDEV_REGISTER,
2783 NETDEV_UNREGISTER,
2784 NETDEV_CHANGEMTU,
2785 NETDEV_CHANGEADDR,
2786 NETDEV_PRE_CHANGEADDR,
2787 NETDEV_GOING_DOWN,
2788 NETDEV_CHANGENAME,
2789 NETDEV_FEAT_CHANGE,
2790 NETDEV_BONDING_FAILOVER,
2791 NETDEV_PRE_UP,
2792 NETDEV_PRE_TYPE_CHANGE,
2793 NETDEV_POST_TYPE_CHANGE,
2794 NETDEV_POST_INIT,
2795 NETDEV_RELEASE,
2796 NETDEV_NOTIFY_PEERS,
2797 NETDEV_JOIN,
2798 NETDEV_CHANGEUPPER,
2799 NETDEV_RESEND_IGMP,
2800 NETDEV_PRECHANGEMTU,
2801 NETDEV_CHANGEINFODATA,
2802 NETDEV_BONDING_INFO,
2803 NETDEV_PRECHANGEUPPER,
2804 NETDEV_CHANGELOWERSTATE,
2805 NETDEV_UDP_TUNNEL_PUSH_INFO,
2806 NETDEV_UDP_TUNNEL_DROP_INFO,
2807 NETDEV_CHANGE_TX_QUEUE_LEN,
2808 NETDEV_CVLAN_FILTER_PUSH_INFO,
2809 NETDEV_CVLAN_FILTER_DROP_INFO,
2810 NETDEV_SVLAN_FILTER_PUSH_INFO,
2811 NETDEV_SVLAN_FILTER_DROP_INFO,
2812};
2813const char *netdev_cmd_to_name(enum netdev_cmd cmd);
2814
2815int register_netdevice_notifier(struct notifier_block *nb);
2816int unregister_netdevice_notifier(struct notifier_block *nb);
2817int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
2818int unregister_netdevice_notifier_net(struct net *net,
2819 struct notifier_block *nb);
2820int register_netdevice_notifier_dev_net(struct net_device *dev,
2821 struct notifier_block *nb,
2822 struct netdev_net_notifier *nn);
2823int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2824 struct notifier_block *nb,
2825 struct netdev_net_notifier *nn);
2826
2827struct netdev_notifier_info {
2828 struct net_device *dev;
2829 struct netlink_ext_ack *extack;
2830};
2831
2832struct netdev_notifier_info_ext {
2833 struct netdev_notifier_info info;
2834 union {
2835 u32 mtu;
2836 } ext;
2837};
2838
2839struct netdev_notifier_change_info {
2840 struct netdev_notifier_info info;
2841 unsigned int flags_changed;
2842};
2843
2844struct netdev_notifier_changeupper_info {
2845 struct netdev_notifier_info info;
2846 struct net_device *upper_dev;
2847 bool master;
2848 bool linking;
2849 void *upper_info;
2850};
2851
2852struct netdev_notifier_changelowerstate_info {
2853 struct netdev_notifier_info info;
2854 void *lower_state_info;
2855};
2856
2857struct netdev_notifier_pre_changeaddr_info {
2858 struct netdev_notifier_info info;
2859 const unsigned char *dev_addr;
2860};
2861
2862static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2863 struct net_device *dev)
2864{
2865 info->dev = dev;
2866 info->extack = NULL;
2867}
2868
2869static inline struct net_device *
2870netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2871{
2872 return info->dev;
2873}
2874
2875static inline struct netlink_ext_ack *
2876netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2877{
2878 return info->extack;
2879}
2880
2881int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2882
2883
2884extern rwlock_t dev_base_lock;
2885
2886#define for_each_netdev(net, d) \
2887 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2888#define for_each_netdev_reverse(net, d) \
2889 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2890#define for_each_netdev_rcu(net, d) \
2891 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2892#define for_each_netdev_safe(net, d, n) \
2893 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2894#define for_each_netdev_continue(net, d) \
2895 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2896#define for_each_netdev_continue_reverse(net, d) \
2897 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
2898 dev_list)
2899#define for_each_netdev_continue_rcu(net, d) \
2900 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2901#define for_each_netdev_in_bond_rcu(bond, slave) \
2902 for_each_netdev_rcu(&init_net, slave) \
2903 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2904#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2905
2906static inline struct net_device *next_net_device(struct net_device *dev)
2907{
2908 struct list_head *lh;
2909 struct net *net;
2910
2911 net = dev_net(dev);
2912 lh = dev->dev_list.next;
2913 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2914}
2915
2916static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2917{
2918 struct list_head *lh;
2919 struct net *net;
2920
2921 net = dev_net(dev);
2922 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2923 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2924}
2925
2926static inline struct net_device *first_net_device(struct net *net)
2927{
2928 return list_empty(&net->dev_base_head) ? NULL :
2929 net_device_entry(net->dev_base_head.next);
2930}
2931
2932static inline struct net_device *first_net_device_rcu(struct net *net)
2933{
2934 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2935
2936 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2937}
2938
2939int netdev_boot_setup_check(struct net_device *dev);
2940struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2941 const char *hwaddr);
2942struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2943void dev_add_pack(struct packet_type *pt);
2944void dev_remove_pack(struct packet_type *pt);
2945void __dev_remove_pack(struct packet_type *pt);
2946void dev_add_offload(struct packet_offload *po);
2947void dev_remove_offload(struct packet_offload *po);
2948
2949int dev_get_iflink(const struct net_device *dev);
2950int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2951int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
2952 struct net_device_path_stack *stack);
2953struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2954 unsigned short mask);
2955struct net_device *dev_get_by_name(struct net *net, const char *name);
2956struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2957struct net_device *__dev_get_by_name(struct net *net, const char *name);
2958int dev_alloc_name(struct net_device *dev, const char *name);
2959int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
2960void dev_close(struct net_device *dev);
2961void dev_close_many(struct list_head *head, bool unlink);
2962void dev_disable_lro(struct net_device *dev);
2963int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2964u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2965 struct net_device *sb_dev);
2966u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2967 struct net_device *sb_dev);
2968
2969int dev_queue_xmit(struct sk_buff *skb);
2970int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2971int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2972
2973static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
2974{
2975 int ret;
2976
2977 ret = __dev_direct_xmit(skb, queue_id);
2978 if (!dev_xmit_complete(ret))
2979 kfree_skb(skb);
2980 return ret;
2981}
2982
2983int register_netdevice(struct net_device *dev);
2984void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2985void unregister_netdevice_many(struct list_head *head);
2986static inline void unregister_netdevice(struct net_device *dev)
2987{
2988 unregister_netdevice_queue(dev, NULL);
2989}
2990
2991int netdev_refcnt_read(const struct net_device *dev);
2992void free_netdev(struct net_device *dev);
2993void netdev_freemem(struct net_device *dev);
2994int init_dummy_netdev(struct net_device *dev);
2995
2996struct net_device *netdev_get_xmit_slave(struct net_device *dev,
2997 struct sk_buff *skb,
2998 bool all_slaves);
2999struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
3000 struct sock *sk);
3001struct net_device *dev_get_by_index(struct net *net, int ifindex);
3002struct net_device *__dev_get_by_index(struct net *net, int ifindex);
3003struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
3004struct net_device *dev_get_by_napi_id(unsigned int napi_id);
3005int netdev_get_name(struct net *net, char *name, int ifindex);
3006int dev_restart(struct net_device *dev);
3007int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
3008int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
3009
3010static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
3011{
3012 return NAPI_GRO_CB(skb)->data_offset;
3013}
3014
3015static inline unsigned int skb_gro_len(const struct sk_buff *skb)
3016{
3017 return skb->len - NAPI_GRO_CB(skb)->data_offset;
3018}
3019
3020static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
3021{
3022 NAPI_GRO_CB(skb)->data_offset += len;
3023}
3024
3025static inline void *skb_gro_header_fast(struct sk_buff *skb,
3026 unsigned int offset)
3027{
3028 return NAPI_GRO_CB(skb)->frag0 + offset;
3029}
3030
3031static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
3032{
3033 return NAPI_GRO_CB(skb)->frag0_len < hlen;
3034}
3035
3036static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
3037{
3038 NAPI_GRO_CB(skb)->frag0 = NULL;
3039 NAPI_GRO_CB(skb)->frag0_len = 0;
3040}
3041
3042static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
3043 unsigned int offset)
3044{
3045 if (!pskb_may_pull(skb, hlen))
3046 return NULL;
3047
3048 skb_gro_frag0_invalidate(skb);
3049 return skb->data + offset;
3050}
3051
3052static inline void *skb_gro_network_header(struct sk_buff *skb)
3053{
3054 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
3055 skb_network_offset(skb);
3056}
3057
3058static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
3059 const void *start, unsigned int len)
3060{
3061 if (NAPI_GRO_CB(skb)->csum_valid)
3062 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
3063 csum_partial(start, len, 0));
3064}
3065
3066
3067
3068
3069
3070
3071__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
3072
3073static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
3074{
3075 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
3076}
3077
3078static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
3079 bool zero_okay,
3080 __sum16 check)
3081{
3082 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
3083 skb_checksum_start_offset(skb) <
3084 skb_gro_offset(skb)) &&
3085 !skb_at_gro_remcsum_start(skb) &&
3086 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
3087 (!zero_okay || check));
3088}
3089
3090static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
3091 __wsum psum)
3092{
3093 if (NAPI_GRO_CB(skb)->csum_valid &&
3094 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
3095 return 0;
3096
3097 NAPI_GRO_CB(skb)->csum = psum;
3098
3099 return __skb_gro_checksum_complete(skb);
3100}
3101
3102static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
3103{
3104 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
3105
3106 NAPI_GRO_CB(skb)->csum_cnt--;
3107 } else {
3108
3109
3110
3111
3112 __skb_incr_checksum_unnecessary(skb);
3113 }
3114}
3115
3116#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
3117 compute_pseudo) \
3118({ \
3119 __sum16 __ret = 0; \
3120 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
3121 __ret = __skb_gro_checksum_validate_complete(skb, \
3122 compute_pseudo(skb, proto)); \
3123 if (!__ret) \
3124 skb_gro_incr_csum_unnecessary(skb); \
3125 __ret; \
3126})
3127
3128#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
3129 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
3130
3131#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
3132 compute_pseudo) \
3133 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
3134
3135#define skb_gro_checksum_simple_validate(skb) \
3136 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
3137
3138static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
3139{
3140 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
3141 !NAPI_GRO_CB(skb)->csum_valid);
3142}
3143
3144static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
3145 __wsum pseudo)
3146{
3147 NAPI_GRO_CB(skb)->csum = ~pseudo;
3148 NAPI_GRO_CB(skb)->csum_valid = 1;
3149}
3150
3151#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
3152do { \
3153 if (__skb_gro_checksum_convert_check(skb)) \
3154 __skb_gro_checksum_convert(skb, \
3155 compute_pseudo(skb, proto)); \
3156} while (0)
3157
3158struct gro_remcsum {
3159 int offset;
3160 __wsum delta;
3161};
3162
3163static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
3164{
3165 grc->offset = 0;
3166 grc->delta = 0;
3167}
3168
3169static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
3170 unsigned int off, size_t hdrlen,
3171 int start, int offset,
3172 struct gro_remcsum *grc,
3173 bool nopartial)
3174{
3175 __wsum delta;
3176 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
3177
3178 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
3179
3180 if (!nopartial) {
3181 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
3182 return ptr;
3183 }
3184
3185 ptr = skb_gro_header_fast(skb, off);
3186 if (skb_gro_header_hard(skb, off + plen)) {
3187 ptr = skb_gro_header_slow(skb, off + plen, off);
3188 if (!ptr)
3189 return NULL;
3190 }
3191
3192 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
3193 start, offset);
3194
3195
3196 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
3197
3198 grc->offset = off + hdrlen + offset;
3199 grc->delta = delta;
3200
3201 return ptr;
3202}
3203
3204static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
3205 struct gro_remcsum *grc)
3206{
3207 void *ptr;
3208 size_t plen = grc->offset + sizeof(u16);
3209
3210 if (!grc->delta)
3211 return;
3212
3213 ptr = skb_gro_header_fast(skb, grc->offset);
3214 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
3215 ptr = skb_gro_header_slow(skb, plen, grc->offset);
3216 if (!ptr)
3217 return;
3218 }
3219
3220 remcsum_unadjust((__sum16 *)ptr, grc->delta);
3221}
3222
3223#ifdef CONFIG_XFRM_OFFLOAD
3224static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3225{
3226 if (PTR_ERR(pp) != -EINPROGRESS)
3227 NAPI_GRO_CB(skb)->flush |= flush;
3228}
3229static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3230 struct sk_buff *pp,
3231 int flush,
3232 struct gro_remcsum *grc)
3233{
3234 if (PTR_ERR(pp) != -EINPROGRESS) {
3235 NAPI_GRO_CB(skb)->flush |= flush;
3236 skb_gro_remcsum_cleanup(skb, grc);
3237 skb->remcsum_offload = 0;
3238 }
3239}
3240#else
3241static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3242{
3243 NAPI_GRO_CB(skb)->flush |= flush;
3244}
3245static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3246 struct sk_buff *pp,
3247 int flush,
3248 struct gro_remcsum *grc)
3249{
3250 NAPI_GRO_CB(skb)->flush |= flush;
3251 skb_gro_remcsum_cleanup(skb, grc);
3252 skb->remcsum_offload = 0;
3253}
3254#endif
3255
3256static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3257 unsigned short type,
3258 const void *daddr, const void *saddr,
3259 unsigned int len)
3260{
3261 if (!dev->header_ops || !dev->header_ops->create)
3262 return 0;
3263
3264 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3265}
3266
3267static inline int dev_parse_header(const struct sk_buff *skb,
3268 unsigned char *haddr)
3269{
3270 const struct net_device *dev = skb->dev;
3271
3272 if (!dev->header_ops || !dev->header_ops->parse)
3273 return 0;
3274 return dev->header_ops->parse(skb, haddr);
3275}
3276
3277static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3278{
3279 const struct net_device *dev = skb->dev;
3280
3281 if (!dev->header_ops || !dev->header_ops->parse_protocol)
3282 return 0;
3283 return dev->header_ops->parse_protocol(skb);
3284}
3285
3286
3287static inline bool dev_validate_header(const struct net_device *dev,
3288 char *ll_header, int len)
3289{
3290 if (likely(len >= dev->hard_header_len))
3291 return true;
3292 if (len < dev->min_header_len)
3293 return false;
3294
3295 if (capable(CAP_SYS_RAWIO)) {
3296 memset(ll_header + len, 0, dev->hard_header_len - len);
3297 return true;
3298 }
3299
3300 if (dev->header_ops && dev->header_ops->validate)
3301 return dev->header_ops->validate(ll_header, len);
3302
3303 return false;
3304}
3305
3306static inline bool dev_has_header(const struct net_device *dev)
3307{
3308 return dev->header_ops && dev->header_ops->create;
3309}
3310
3311#ifdef CONFIG_NET_FLOW_LIMIT
3312#define FLOW_LIMIT_HISTORY (1 << 7)
3313struct sd_flow_limit {
3314 u64 count;
3315 unsigned int num_buckets;
3316 unsigned int history_head;
3317 u16 history[FLOW_LIMIT_HISTORY];
3318 u8 buckets[];
3319};
3320
3321extern int netdev_flow_limit_table_len;
3322#endif
3323
3324
3325
3326
3327struct softnet_data {
3328 struct list_head poll_list;
3329 struct sk_buff_head process_queue;
3330
3331
3332 unsigned int processed;
3333 unsigned int time_squeeze;
3334 unsigned int received_rps;
3335#ifdef CONFIG_RPS
3336 struct softnet_data *rps_ipi_list;
3337#endif
3338#ifdef CONFIG_NET_FLOW_LIMIT
3339 struct sd_flow_limit __rcu *flow_limit;
3340#endif
3341 struct Qdisc *output_queue;
3342 struct Qdisc **output_queue_tailp;
3343 struct sk_buff *completion_queue;
3344#ifdef CONFIG_XFRM_OFFLOAD
3345 struct sk_buff_head xfrm_backlog;
3346#endif
3347
3348 struct {
3349 u16 recursion;
3350 u8 more;
3351 } xmit;
3352#ifdef CONFIG_RPS
3353
3354
3355
3356 unsigned int input_queue_head ____cacheline_aligned_in_smp;
3357
3358
3359 call_single_data_t csd ____cacheline_aligned_in_smp;
3360 struct softnet_data *rps_ipi_next;
3361 unsigned int cpu;
3362 unsigned int input_queue_tail;
3363#endif
3364 unsigned int dropped;
3365 struct sk_buff_head input_pkt_queue;
3366 struct napi_struct backlog;
3367
3368};
3369
3370static inline void input_queue_head_incr(struct softnet_data *sd)
3371{
3372#ifdef CONFIG_RPS
3373 sd->input_queue_head++;
3374#endif
3375}
3376
3377static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3378 unsigned int *qtail)
3379{
3380#ifdef CONFIG_RPS
3381 *qtail = ++sd->input_queue_tail;
3382#endif
3383}
3384
3385DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3386
3387static inline int dev_recursion_level(void)
3388{
3389 return this_cpu_read(softnet_data.xmit.recursion);
3390}
3391
3392#define XMIT_RECURSION_LIMIT 8
3393static inline bool dev_xmit_recursion(void)
3394{
3395 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3396 XMIT_RECURSION_LIMIT);
3397}
3398
3399static inline void dev_xmit_recursion_inc(void)
3400{
3401 __this_cpu_inc(softnet_data.xmit.recursion);
3402}
3403
3404static inline void dev_xmit_recursion_dec(void)
3405{
3406 __this_cpu_dec(softnet_data.xmit.recursion);
3407}
3408
3409void __netif_schedule(struct Qdisc *q);
3410void netif_schedule_queue(struct netdev_queue *txq);
3411
3412static inline void netif_tx_schedule_all(struct net_device *dev)
3413{
3414 unsigned int i;
3415
3416 for (i = 0; i < dev->num_tx_queues; i++)
3417 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3418}
3419
3420static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3421{
3422 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3423}
3424
3425
3426
3427
3428
3429
3430
3431static inline void netif_start_queue(struct net_device *dev)
3432{
3433 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3434}
3435
3436static inline void netif_tx_start_all_queues(struct net_device *dev)
3437{
3438 unsigned int i;
3439
3440 for (i = 0; i < dev->num_tx_queues; i++) {
3441 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3442 netif_tx_start_queue(txq);
3443 }
3444}
3445
3446void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3447
3448
3449
3450
3451
3452
3453
3454
3455static inline void netif_wake_queue(struct net_device *dev)
3456{
3457 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3458}
3459
3460static inline void netif_tx_wake_all_queues(struct net_device *dev)
3461{
3462 unsigned int i;
3463
3464 for (i = 0; i < dev->num_tx_queues; i++) {
3465 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3466 netif_tx_wake_queue(txq);
3467 }
3468}
3469
3470static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3471{
3472 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3473}
3474
3475
3476
3477
3478
3479
3480
3481
3482static inline void netif_stop_queue(struct net_device *dev)
3483{
3484 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3485}
3486
3487void netif_tx_stop_all_queues(struct net_device *dev);
3488
3489static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3490{
3491 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3492}
3493
3494
3495
3496
3497
3498
3499
3500static inline bool netif_queue_stopped(const struct net_device *dev)
3501{
3502 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3503}
3504
3505static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3506{
3507 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3508}
3509
3510static inline bool
3511netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3512{
3513 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3514}
3515
3516static inline bool
3517netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3518{
3519 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3520}
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
3533 unsigned int min_limit)
3534{
3535#ifdef CONFIG_BQL
3536 dev_queue->dql.min_limit = min_limit;
3537#endif
3538}
3539
3540
3541
3542
3543
3544
3545
3546
3547static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3548{
3549#ifdef CONFIG_BQL
3550 prefetchw(&dev_queue->dql.num_queued);
3551#endif
3552}
3553
3554
3555
3556
3557
3558
3559
3560
3561static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3562{
3563#ifdef CONFIG_BQL
3564 prefetchw(&dev_queue->dql.limit);
3565#endif
3566}
3567
3568static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3569 unsigned int bytes)
3570{
3571#ifdef CONFIG_BQL
3572 dql_queued(&dev_queue->dql, bytes);
3573
3574 if (likely(dql_avail(&dev_queue->dql) >= 0))
3575 return;
3576
3577 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3578
3579
3580
3581
3582
3583
3584 smp_mb();
3585
3586
3587 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3588 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3589#endif
3590}
3591
3592
3593
3594
3595
3596
3597
3598static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3599 unsigned int bytes,
3600 bool xmit_more)
3601{
3602 if (xmit_more) {
3603#ifdef CONFIG_BQL
3604 dql_queued(&dev_queue->dql, bytes);
3605#endif
3606 return netif_tx_queue_stopped(dev_queue);
3607 }
3608 netdev_tx_sent_queue(dev_queue, bytes);
3609 return true;
3610}
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3622{
3623 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3624}
3625
3626static inline bool __netdev_sent_queue(struct net_device *dev,
3627 unsigned int bytes,
3628 bool xmit_more)
3629{
3630 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3631 xmit_more);
3632}
3633
3634static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3635 unsigned int pkts, unsigned int bytes)
3636{
3637#ifdef CONFIG_BQL
3638 if (unlikely(!bytes))
3639 return;
3640
3641 dql_completed(&dev_queue->dql, bytes);
3642
3643
3644
3645
3646
3647
3648 smp_mb();
3649
3650 if (unlikely(dql_avail(&dev_queue->dql) < 0))
3651 return;
3652
3653 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3654 netif_schedule_queue(dev_queue);
3655#endif
3656}
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668static inline void netdev_completed_queue(struct net_device *dev,
3669 unsigned int pkts, unsigned int bytes)
3670{
3671 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3672}
3673
3674static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3675{
3676#ifdef CONFIG_BQL
3677 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3678 dql_reset(&q->dql);
3679#endif
3680}
3681
3682
3683
3684
3685
3686
3687
3688
3689static inline void netdev_reset_queue(struct net_device *dev_queue)
3690{
3691 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3692}
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3703{
3704 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3705 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3706 dev->name, queue_index,
3707 dev->real_num_tx_queues);
3708 return 0;
3709 }
3710
3711 return queue_index;
3712}
3713
3714
3715
3716
3717
3718
3719
3720static inline bool netif_running(const struct net_device *dev)
3721{
3722 return test_bit(__LINK_STATE_START, &dev->state);
3723}
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3740{
3741 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3742
3743 netif_tx_start_queue(txq);
3744}
3745
3746
3747
3748
3749
3750
3751
3752
3753static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3754{
3755 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3756 netif_tx_stop_queue(txq);
3757}
3758
3759
3760
3761
3762
3763
3764
3765
3766static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3767 u16 queue_index)
3768{
3769 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3770
3771 return netif_tx_queue_stopped(txq);
3772}
3773
3774
3775
3776
3777
3778
3779
3780
3781static inline bool netif_subqueue_stopped(const struct net_device *dev,
3782 struct sk_buff *skb)
3783{
3784 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3785}
3786
3787
3788
3789
3790
3791
3792
3793
3794static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3795{
3796 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3797
3798 netif_tx_wake_queue(txq);
3799}
3800
3801#ifdef CONFIG_XPS
3802int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3803 u16 index);
3804int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3805 u16 index, enum xps_map_type type);
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815static inline bool netif_attr_test_mask(unsigned long j,
3816 const unsigned long *mask,
3817 unsigned int nr_bits)
3818{
3819 cpu_max_bits_warn(j, nr_bits);
3820 return test_bit(j, mask);
3821}
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831static inline bool netif_attr_test_online(unsigned long j,
3832 const unsigned long *online_mask,
3833 unsigned int nr_bits)
3834{
3835 cpu_max_bits_warn(j, nr_bits);
3836
3837 if (online_mask)
3838 return test_bit(j, online_mask);
3839
3840 return (j < nr_bits);
3841}
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3852 unsigned int nr_bits)
3853{
3854
3855 if (n != -1)
3856 cpu_max_bits_warn(n, nr_bits);
3857
3858 if (srcp)
3859 return find_next_bit(srcp, nr_bits, n + 1);
3860
3861 return n + 1;
3862}
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3874 const unsigned long *src2p,
3875 unsigned int nr_bits)
3876{
3877
3878 if (n != -1)
3879 cpu_max_bits_warn(n, nr_bits);
3880
3881 if (src1p && src2p)
3882 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3883 else if (src1p)
3884 return find_next_bit(src1p, nr_bits, n + 1);
3885 else if (src2p)
3886 return find_next_bit(src2p, nr_bits, n + 1);
3887
3888 return n + 1;
3889}
3890#else
3891static inline int netif_set_xps_queue(struct net_device *dev,
3892 const struct cpumask *mask,
3893 u16 index)
3894{
3895 return 0;
3896}
3897
3898static inline int __netif_set_xps_queue(struct net_device *dev,
3899 const unsigned long *mask,
3900 u16 index, enum xps_map_type type)
3901{
3902 return 0;
3903}
3904#endif
3905
3906
3907
3908
3909
3910
3911
3912static inline bool netif_is_multiqueue(const struct net_device *dev)
3913{
3914 return dev->num_tx_queues > 1;
3915}
3916
3917int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3918
3919#ifdef CONFIG_SYSFS
3920int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3921#else
3922static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3923 unsigned int rxqs)
3924{
3925 dev->real_num_rx_queues = rxqs;
3926 return 0;
3927}
3928#endif
3929int netif_set_real_num_queues(struct net_device *dev,
3930 unsigned int txq, unsigned int rxq);
3931
3932static inline struct netdev_rx_queue *
3933__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3934{
3935 return dev->_rx + rxq;
3936}
3937
3938#ifdef CONFIG_SYSFS
3939static inline unsigned int get_netdev_rx_queue_index(
3940 struct netdev_rx_queue *queue)
3941{
3942 struct net_device *dev = queue->dev;
3943 int index = queue - dev->_rx;
3944
3945 BUG_ON(index >= dev->num_rx_queues);
3946 return index;
3947}
3948#endif
3949
3950#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3951int netif_get_num_default_rss_queues(void);
3952
3953enum skb_free_reason {
3954 SKB_REASON_CONSUMED,
3955 SKB_REASON_DROPPED,
3956};
3957
3958void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3959void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3981{
3982 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3983}
3984
3985static inline void dev_consume_skb_irq(struct sk_buff *skb)
3986{
3987 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3988}
3989
3990static inline void dev_kfree_skb_any(struct sk_buff *skb)
3991{
3992 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3993}
3994
3995static inline void dev_consume_skb_any(struct sk_buff *skb)
3996{
3997 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3998}
3999
4000u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4001 struct bpf_prog *xdp_prog);
4002void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
4003int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
4004int netif_rx(struct sk_buff *skb);
4005int netif_rx_ni(struct sk_buff *skb);
4006int netif_rx_any_context(struct sk_buff *skb);
4007int netif_receive_skb(struct sk_buff *skb);
4008int netif_receive_skb_core(struct sk_buff *skb);
4009void netif_receive_skb_list(struct list_head *head);
4010gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
4011void napi_gro_flush(struct napi_struct *napi, bool flush_old);
4012struct sk_buff *napi_get_frags(struct napi_struct *napi);
4013gro_result_t napi_gro_frags(struct napi_struct *napi);
4014struct packet_offload *gro_find_receive_by_type(__be16 type);
4015struct packet_offload *gro_find_complete_by_type(__be16 type);
4016
4017static inline void napi_free_frags(struct napi_struct *napi)
4018{
4019 kfree_skb(napi->skb);
4020 napi->skb = NULL;
4021}
4022
4023bool netdev_is_rx_handler_busy(struct net_device *dev);
4024int netdev_rx_handler_register(struct net_device *dev,
4025 rx_handler_func_t *rx_handler,
4026 void *rx_handler_data);
4027void netdev_rx_handler_unregister(struct net_device *dev);
4028
4029bool dev_valid_name(const char *name);
4030static inline bool is_socket_ioctl_cmd(unsigned int cmd)
4031{
4032 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
4033}
4034int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
4035int put_user_ifreq(struct ifreq *ifr, void __user *arg);
4036int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
4037 void __user *data, bool *need_copyout);
4038int dev_ifconf(struct net *net, struct ifconf __user *ifc);
4039int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
4040unsigned int dev_get_flags(const struct net_device *);
4041int __dev_change_flags(struct net_device *dev, unsigned int flags,
4042 struct netlink_ext_ack *extack);
4043int dev_change_flags(struct net_device *dev, unsigned int flags,
4044 struct netlink_ext_ack *extack);
4045void __dev_notify_flags(struct net_device *, unsigned int old_flags,
4046 unsigned int gchanges);
4047int dev_change_name(struct net_device *, const char *);
4048int dev_set_alias(struct net_device *, const char *, size_t);
4049int dev_get_alias(const struct net_device *, char *, size_t);
4050int __dev_change_net_namespace(struct net_device *dev, struct net *net,
4051 const char *pat, int new_ifindex);
4052static inline
4053int dev_change_net_namespace(struct net_device *dev, struct net *net,
4054 const char *pat)
4055{
4056 return __dev_change_net_namespace(dev, net, pat, 0);
4057}
4058int __dev_set_mtu(struct net_device *, int);
4059int dev_validate_mtu(struct net_device *dev, int mtu,
4060 struct netlink_ext_ack *extack);
4061int dev_set_mtu_ext(struct net_device *dev, int mtu,
4062 struct netlink_ext_ack *extack);
4063int dev_set_mtu(struct net_device *, int);
4064int dev_change_tx_queue_len(struct net_device *, unsigned long);
4065void dev_set_group(struct net_device *, int);
4066int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
4067 struct netlink_ext_ack *extack);
4068int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
4069 struct netlink_ext_ack *extack);
4070int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
4071 struct netlink_ext_ack *extack);
4072int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
4073int dev_change_carrier(struct net_device *, bool new_carrier);
4074int dev_get_phys_port_id(struct net_device *dev,
4075 struct netdev_phys_item_id *ppid);
4076int dev_get_phys_port_name(struct net_device *dev,
4077 char *name, size_t len);
4078int dev_get_port_parent_id(struct net_device *dev,
4079 struct netdev_phys_item_id *ppid, bool recurse);
4080bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
4081int dev_change_proto_down(struct net_device *dev, bool proto_down);
4082int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
4083void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
4084 u32 value);
4085struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
4086struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
4087 struct netdev_queue *txq, int *ret);
4088
4089typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
4090int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
4091 int fd, int expected_fd, u32 flags);
4092int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
4093u8 dev_xdp_prog_count(struct net_device *dev);
4094u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
4095
4096int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4097int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4098int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
4099bool is_skb_forwardable(const struct net_device *dev,
4100 const struct sk_buff *skb);
4101
4102static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
4103 const struct sk_buff *skb,
4104 const bool check_mtu)
4105{
4106 const u32 vlan_hdr_len = 4;
4107 unsigned int len;
4108
4109 if (!(dev->flags & IFF_UP))
4110 return false;
4111
4112 if (!check_mtu)
4113 return true;
4114
4115 len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
4116 if (skb->len <= len)
4117 return true;
4118
4119
4120
4121
4122 if (skb_is_gso(skb))
4123 return true;
4124
4125 return false;
4126}
4127
4128static __always_inline int ____dev_forward_skb(struct net_device *dev,
4129 struct sk_buff *skb,
4130 const bool check_mtu)
4131{
4132 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
4133 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
4134 atomic_long_inc(&dev->rx_dropped);
4135 kfree_skb(skb);
4136 return NET_RX_DROP;
4137 }
4138
4139 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
4140 skb->priority = 0;
4141 return 0;
4142}
4143
4144bool dev_nit_active(struct net_device *dev);
4145void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
4146
4147extern int netdev_budget;
4148extern unsigned int netdev_budget_usecs;
4149
4150
4151void netdev_run_todo(void);
4152
4153
4154
4155
4156
4157
4158
4159static inline void dev_put(struct net_device *dev)
4160{
4161 if (dev) {
4162#ifdef CONFIG_PCPU_DEV_REFCNT
4163 this_cpu_dec(*dev->pcpu_refcnt);
4164#else
4165 refcount_dec(&dev->dev_refcnt);
4166#endif
4167 }
4168}
4169
4170
4171
4172
4173
4174
4175
4176static inline void dev_hold(struct net_device *dev)
4177{
4178 if (dev) {
4179#ifdef CONFIG_PCPU_DEV_REFCNT
4180 this_cpu_inc(*dev->pcpu_refcnt);
4181#else
4182 refcount_inc(&dev->dev_refcnt);
4183#endif
4184 }
4185}
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196void linkwatch_init_dev(struct net_device *dev);
4197void linkwatch_fire_event(struct net_device *dev);
4198void linkwatch_forget_dev(struct net_device *dev);
4199
4200
4201
4202
4203
4204
4205
4206static inline bool netif_carrier_ok(const struct net_device *dev)
4207{
4208 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
4209}
4210
4211unsigned long dev_trans_start(struct net_device *dev);
4212
4213void __netdev_watchdog_up(struct net_device *dev);
4214
4215void netif_carrier_on(struct net_device *dev);
4216void netif_carrier_off(struct net_device *dev);
4217void netif_carrier_event(struct net_device *dev);
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231static inline void netif_dormant_on(struct net_device *dev)
4232{
4233 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
4234 linkwatch_fire_event(dev);
4235}
4236
4237
4238
4239
4240
4241
4242
4243static inline void netif_dormant_off(struct net_device *dev)
4244{
4245 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
4246 linkwatch_fire_event(dev);
4247}
4248
4249
4250
4251
4252
4253
4254
4255static inline bool netif_dormant(const struct net_device *dev)
4256{
4257 return test_bit(__LINK_STATE_DORMANT, &dev->state);
4258}
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271static inline void netif_testing_on(struct net_device *dev)
4272{
4273 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4274 linkwatch_fire_event(dev);
4275}
4276
4277
4278
4279
4280
4281
4282
4283static inline void netif_testing_off(struct net_device *dev)
4284{
4285 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4286 linkwatch_fire_event(dev);
4287}
4288
4289
4290
4291
4292
4293
4294
4295static inline bool netif_testing(const struct net_device *dev)
4296{
4297 return test_bit(__LINK_STATE_TESTING, &dev->state);
4298}
4299
4300
4301
4302
4303
4304
4305
4306
4307static inline bool netif_oper_up(const struct net_device *dev)
4308{
4309 return (dev->operstate == IF_OPER_UP ||
4310 dev->operstate == IF_OPER_UNKNOWN );
4311}
4312
4313
4314
4315
4316
4317
4318
4319static inline bool netif_device_present(const struct net_device *dev)
4320{
4321 return test_bit(__LINK_STATE_PRESENT, &dev->state);
4322}
4323
4324void netif_device_detach(struct net_device *dev);
4325
4326void netif_device_attach(struct net_device *dev);
4327
4328
4329
4330
4331
4332enum {
4333 NETIF_MSG_DRV_BIT,
4334 NETIF_MSG_PROBE_BIT,
4335 NETIF_MSG_LINK_BIT,
4336 NETIF_MSG_TIMER_BIT,
4337 NETIF_MSG_IFDOWN_BIT,
4338 NETIF_MSG_IFUP_BIT,
4339 NETIF_MSG_RX_ERR_BIT,
4340 NETIF_MSG_TX_ERR_BIT,
4341 NETIF_MSG_TX_QUEUED_BIT,
4342 NETIF_MSG_INTR_BIT,
4343 NETIF_MSG_TX_DONE_BIT,
4344 NETIF_MSG_RX_STATUS_BIT,
4345 NETIF_MSG_PKTDATA_BIT,
4346 NETIF_MSG_HW_BIT,
4347 NETIF_MSG_WOL_BIT,
4348
4349
4350
4351
4352 NETIF_MSG_CLASS_COUNT,
4353};
4354
4355static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4356
4357#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
4358#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4359
4360#define NETIF_MSG_DRV __NETIF_MSG(DRV)
4361#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
4362#define NETIF_MSG_LINK __NETIF_MSG(LINK)
4363#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
4364#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
4365#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
4366#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
4367#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
4368#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
4369#define NETIF_MSG_INTR __NETIF_MSG(INTR)
4370#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
4371#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
4372#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
4373#define NETIF_MSG_HW __NETIF_MSG(HW)
4374#define NETIF_MSG_WOL __NETIF_MSG(WOL)
4375
4376#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4377#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4378#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4379#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4380#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4381#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4382#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4383#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4384#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4385#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4386#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4387#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4388#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4389#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4390#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4391
4392static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4393{
4394
4395 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4396 return default_msg_enable_bits;
4397 if (debug_value == 0)
4398 return 0;
4399
4400 return (1U << debug_value) - 1;
4401}
4402
4403static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4404{
4405 spin_lock(&txq->_xmit_lock);
4406 txq->xmit_lock_owner = cpu;
4407}
4408
4409static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4410{
4411 __acquire(&txq->_xmit_lock);
4412 return true;
4413}
4414
4415static inline void __netif_tx_release(struct netdev_queue *txq)
4416{
4417 __release(&txq->_xmit_lock);
4418}
4419
4420static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4421{
4422 spin_lock_bh(&txq->_xmit_lock);
4423 txq->xmit_lock_owner = smp_processor_id();
4424}
4425
4426static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4427{
4428 bool ok = spin_trylock(&txq->_xmit_lock);
4429 if (likely(ok))
4430 txq->xmit_lock_owner = smp_processor_id();
4431 return ok;
4432}
4433
4434static inline void __netif_tx_unlock(struct netdev_queue *txq)
4435{
4436 txq->xmit_lock_owner = -1;
4437 spin_unlock(&txq->_xmit_lock);
4438}
4439
4440static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4441{
4442 txq->xmit_lock_owner = -1;
4443 spin_unlock_bh(&txq->_xmit_lock);
4444}
4445
4446static inline void txq_trans_update(struct netdev_queue *txq)
4447{
4448 if (txq->xmit_lock_owner != -1)
4449 txq->trans_start = jiffies;
4450}
4451
4452
4453static inline void netif_trans_update(struct net_device *dev)
4454{
4455 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4456
4457 if (txq->trans_start != jiffies)
4458 txq->trans_start = jiffies;
4459}
4460
4461
4462
4463
4464
4465
4466
4467static inline void netif_tx_lock(struct net_device *dev)
4468{
4469 unsigned int i;
4470 int cpu;
4471
4472 spin_lock(&dev->tx_global_lock);
4473 cpu = smp_processor_id();
4474 for (i = 0; i < dev->num_tx_queues; i++) {
4475 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4476
4477
4478
4479
4480
4481
4482
4483 __netif_tx_lock(txq, cpu);
4484 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
4485 __netif_tx_unlock(txq);
4486 }
4487}
4488
4489static inline void netif_tx_lock_bh(struct net_device *dev)
4490{
4491 local_bh_disable();
4492 netif_tx_lock(dev);
4493}
4494
4495static inline void netif_tx_unlock(struct net_device *dev)
4496{
4497 unsigned int i;
4498
4499 for (i = 0; i < dev->num_tx_queues; i++) {
4500 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4501
4502
4503
4504
4505
4506 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
4507 netif_schedule_queue(txq);
4508 }
4509 spin_unlock(&dev->tx_global_lock);
4510}
4511
4512static inline void netif_tx_unlock_bh(struct net_device *dev)
4513{
4514 netif_tx_unlock(dev);
4515 local_bh_enable();
4516}
4517
4518#define HARD_TX_LOCK(dev, txq, cpu) { \
4519 if ((dev->features & NETIF_F_LLTX) == 0) { \
4520 __netif_tx_lock(txq, cpu); \
4521 } else { \
4522 __netif_tx_acquire(txq); \
4523 } \
4524}
4525
4526#define HARD_TX_TRYLOCK(dev, txq) \
4527 (((dev->features & NETIF_F_LLTX) == 0) ? \
4528 __netif_tx_trylock(txq) : \
4529 __netif_tx_acquire(txq))
4530
4531#define HARD_TX_UNLOCK(dev, txq) { \
4532 if ((dev->features & NETIF_F_LLTX) == 0) { \
4533 __netif_tx_unlock(txq); \
4534 } else { \
4535 __netif_tx_release(txq); \
4536 } \
4537}
4538
4539static inline void netif_tx_disable(struct net_device *dev)
4540{
4541 unsigned int i;
4542 int cpu;
4543
4544 local_bh_disable();
4545 cpu = smp_processor_id();
4546 spin_lock(&dev->tx_global_lock);
4547 for (i = 0; i < dev->num_tx_queues; i++) {
4548 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4549
4550 __netif_tx_lock(txq, cpu);
4551 netif_tx_stop_queue(txq);
4552 __netif_tx_unlock(txq);
4553 }
4554 spin_unlock(&dev->tx_global_lock);
4555 local_bh_enable();
4556}
4557
4558static inline void netif_addr_lock(struct net_device *dev)
4559{
4560 unsigned char nest_level = 0;
4561
4562#ifdef CONFIG_LOCKDEP
4563 nest_level = dev->nested_level;
4564#endif
4565 spin_lock_nested(&dev->addr_list_lock, nest_level);
4566}
4567
4568static inline void netif_addr_lock_bh(struct net_device *dev)
4569{
4570 unsigned char nest_level = 0;
4571
4572#ifdef CONFIG_LOCKDEP
4573 nest_level = dev->nested_level;
4574#endif
4575 local_bh_disable();
4576 spin_lock_nested(&dev->addr_list_lock, nest_level);
4577}
4578
4579static inline void netif_addr_unlock(struct net_device *dev)
4580{
4581 spin_unlock(&dev->addr_list_lock);
4582}
4583
4584static inline void netif_addr_unlock_bh(struct net_device *dev)
4585{
4586 spin_unlock_bh(&dev->addr_list_lock);
4587}
4588
4589
4590
4591
4592
4593#define for_each_dev_addr(dev, ha) \
4594 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4595
4596
4597
4598void ether_setup(struct net_device *dev);
4599
4600
4601struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4602 unsigned char name_assign_type,
4603 void (*setup)(struct net_device *),
4604 unsigned int txqs, unsigned int rxqs);
4605#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4606 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4607
4608#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4609 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4610 count)
4611
4612int register_netdev(struct net_device *dev);
4613void unregister_netdev(struct net_device *dev);
4614
4615int devm_register_netdev(struct device *dev, struct net_device *ndev);
4616
4617
4618int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4619 struct netdev_hw_addr_list *from_list, int addr_len);
4620void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4621 struct netdev_hw_addr_list *from_list, int addr_len);
4622int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4623 struct net_device *dev,
4624 int (*sync)(struct net_device *, const unsigned char *),
4625 int (*unsync)(struct net_device *,
4626 const unsigned char *));
4627int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4628 struct net_device *dev,
4629 int (*sync)(struct net_device *,
4630 const unsigned char *, int),
4631 int (*unsync)(struct net_device *,
4632 const unsigned char *, int));
4633void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4634 struct net_device *dev,
4635 int (*unsync)(struct net_device *,
4636 const unsigned char *, int));
4637void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4638 struct net_device *dev,
4639 int (*unsync)(struct net_device *,
4640 const unsigned char *));
4641void __hw_addr_init(struct netdev_hw_addr_list *list);
4642
4643
4644static inline void
4645__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len)
4646{
4647 memcpy(dev->dev_addr, addr, len);
4648}
4649
4650static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
4651{
4652 __dev_addr_set(dev, addr, dev->addr_len);
4653}
4654
4655static inline void
4656dev_addr_mod(struct net_device *dev, unsigned int offset,
4657 const u8 *addr, size_t len)
4658{
4659 memcpy(&dev->dev_addr[offset], addr, len);
4660}
4661
4662int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4663 unsigned char addr_type);
4664int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4665 unsigned char addr_type);
4666void dev_addr_flush(struct net_device *dev);
4667int dev_addr_init(struct net_device *dev);
4668
4669
4670int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4671int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4672int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4673int dev_uc_sync(struct net_device *to, struct net_device *from);
4674int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4675void dev_uc_unsync(struct net_device *to, struct net_device *from);
4676void dev_uc_flush(struct net_device *dev);
4677void dev_uc_init(struct net_device *dev);
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688static inline int __dev_uc_sync(struct net_device *dev,
4689 int (*sync)(struct net_device *,
4690 const unsigned char *),
4691 int (*unsync)(struct net_device *,
4692 const unsigned char *))
4693{
4694 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4695}
4696
4697
4698
4699
4700
4701
4702
4703
4704static inline void __dev_uc_unsync(struct net_device *dev,
4705 int (*unsync)(struct net_device *,
4706 const unsigned char *))
4707{
4708 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4709}
4710
4711
4712int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4713int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4714int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4715int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4716int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4717int dev_mc_sync(struct net_device *to, struct net_device *from);
4718int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4719void dev_mc_unsync(struct net_device *to, struct net_device *from);
4720void dev_mc_flush(struct net_device *dev);
4721void dev_mc_init(struct net_device *dev);
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732static inline int __dev_mc_sync(struct net_device *dev,
4733 int (*sync)(struct net_device *,
4734 const unsigned char *),
4735 int (*unsync)(struct net_device *,
4736 const unsigned char *))
4737{
4738 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4739}
4740
4741
4742
4743
4744
4745
4746
4747
4748static inline void __dev_mc_unsync(struct net_device *dev,
4749 int (*unsync)(struct net_device *,
4750 const unsigned char *))
4751{
4752 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4753}
4754
4755
4756void dev_set_rx_mode(struct net_device *dev);
4757void __dev_set_rx_mode(struct net_device *dev);
4758int dev_set_promiscuity(struct net_device *dev, int inc);
4759int dev_set_allmulti(struct net_device *dev, int inc);
4760void netdev_state_change(struct net_device *dev);
4761void __netdev_notify_peers(struct net_device *dev);
4762void netdev_notify_peers(struct net_device *dev);
4763void netdev_features_change(struct net_device *dev);
4764
4765void dev_load(struct net *net, const char *name);
4766struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4767 struct rtnl_link_stats64 *storage);
4768void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4769 const struct net_device_stats *netdev_stats);
4770void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
4771 const struct pcpu_sw_netstats __percpu *netstats);
4772void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
4773
4774extern int netdev_max_backlog;
4775extern int netdev_tstamp_prequeue;
4776extern int netdev_unregister_timeout_secs;
4777extern int weight_p;
4778extern int dev_weight_rx_bias;
4779extern int dev_weight_tx_bias;
4780extern int dev_rx_weight;
4781extern int dev_tx_weight;
4782extern int gro_normal_batch;
4783
4784enum {
4785 NESTED_SYNC_IMM_BIT,
4786 NESTED_SYNC_TODO_BIT,
4787};
4788
4789#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
4790#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
4791
4792#define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
4793#define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
4794
4795struct netdev_nested_priv {
4796 unsigned char flags;
4797 void *data;
4798};
4799
4800bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4801struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4802 struct list_head **iter);
4803struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4804 struct list_head **iter);
4805
4806#ifdef CONFIG_LOCKDEP
4807static LIST_HEAD(net_unlink_list);
4808
4809static inline void net_unlink_todo(struct net_device *dev)
4810{
4811 if (list_empty(&dev->unlink_list))
4812 list_add_tail(&dev->unlink_list, &net_unlink_list);
4813}
4814#endif
4815
4816
4817#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4818 for (iter = &(dev)->adj_list.upper, \
4819 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4820 updev; \
4821 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4822
4823int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4824 int (*fn)(struct net_device *upper_dev,
4825 struct netdev_nested_priv *priv),
4826 struct netdev_nested_priv *priv);
4827
4828bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4829 struct net_device *upper_dev);
4830
4831bool netdev_has_any_upper_dev(struct net_device *dev);
4832
4833void *netdev_lower_get_next_private(struct net_device *dev,
4834 struct list_head **iter);
4835void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4836 struct list_head **iter);
4837
4838#define netdev_for_each_lower_private(dev, priv, iter) \
4839 for (iter = (dev)->adj_list.lower.next, \
4840 priv = netdev_lower_get_next_private(dev, &(iter)); \
4841 priv; \
4842 priv = netdev_lower_get_next_private(dev, &(iter)))
4843
4844#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4845 for (iter = &(dev)->adj_list.lower, \
4846 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4847 priv; \
4848 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4849
4850void *netdev_lower_get_next(struct net_device *dev,
4851 struct list_head **iter);
4852
4853#define netdev_for_each_lower_dev(dev, ldev, iter) \
4854 for (iter = (dev)->adj_list.lower.next, \
4855 ldev = netdev_lower_get_next(dev, &(iter)); \
4856 ldev; \
4857 ldev = netdev_lower_get_next(dev, &(iter)))
4858
4859struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
4860 struct list_head **iter);
4861int netdev_walk_all_lower_dev(struct net_device *dev,
4862 int (*fn)(struct net_device *lower_dev,
4863 struct netdev_nested_priv *priv),
4864 struct netdev_nested_priv *priv);
4865int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4866 int (*fn)(struct net_device *lower_dev,
4867 struct netdev_nested_priv *priv),
4868 struct netdev_nested_priv *priv);
4869
4870void *netdev_adjacent_get_private(struct list_head *adj_list);
4871void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4872struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4873struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4874int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4875 struct netlink_ext_ack *extack);
4876int netdev_master_upper_dev_link(struct net_device *dev,
4877 struct net_device *upper_dev,
4878 void *upper_priv, void *upper_info,
4879 struct netlink_ext_ack *extack);
4880void netdev_upper_dev_unlink(struct net_device *dev,
4881 struct net_device *upper_dev);
4882int netdev_adjacent_change_prepare(struct net_device *old_dev,
4883 struct net_device *new_dev,
4884 struct net_device *dev,
4885 struct netlink_ext_ack *extack);
4886void netdev_adjacent_change_commit(struct net_device *old_dev,
4887 struct net_device *new_dev,
4888 struct net_device *dev);
4889void netdev_adjacent_change_abort(struct net_device *old_dev,
4890 struct net_device *new_dev,
4891 struct net_device *dev);
4892void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4893void *netdev_lower_dev_get_private(struct net_device *dev,
4894 struct net_device *lower_dev);
4895void netdev_lower_state_changed(struct net_device *lower_dev,
4896 void *lower_state_info);
4897
4898
4899#define NETDEV_RSS_KEY_LEN 52
4900extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4901void netdev_rss_key_fill(void *buffer, size_t len);
4902
4903int skb_checksum_help(struct sk_buff *skb);
4904int skb_crc32c_csum_help(struct sk_buff *skb);
4905int skb_csum_hwoffload_help(struct sk_buff *skb,
4906 const netdev_features_t features);
4907
4908struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4909 netdev_features_t features, bool tx_path);
4910struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4911 netdev_features_t features);
4912
4913struct netdev_bonding_info {
4914 ifslave slave;
4915 ifbond master;
4916};
4917
4918struct netdev_notifier_bonding_info {
4919 struct netdev_notifier_info info;
4920 struct netdev_bonding_info bonding_info;
4921};
4922
4923void netdev_bonding_info_change(struct net_device *dev,
4924 struct netdev_bonding_info *bonding_info);
4925
4926#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
4927void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
4928#else
4929static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
4930 const void *data)
4931{
4932}
4933#endif
4934
4935static inline
4936struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4937{
4938 return __skb_gso_segment(skb, features, true);
4939}
4940__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4941
4942static inline bool can_checksum_protocol(netdev_features_t features,
4943 __be16 protocol)
4944{
4945 if (protocol == htons(ETH_P_FCOE))
4946 return !!(features & NETIF_F_FCOE_CRC);
4947
4948
4949
4950 if (features & NETIF_F_HW_CSUM) {
4951
4952 return true;
4953 }
4954
4955 switch (protocol) {
4956 case htons(ETH_P_IP):
4957 return !!(features & NETIF_F_IP_CSUM);
4958 case htons(ETH_P_IPV6):
4959 return !!(features & NETIF_F_IPV6_CSUM);
4960 default:
4961 return false;
4962 }
4963}
4964
4965#ifdef CONFIG_BUG
4966void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4967#else
4968static inline void netdev_rx_csum_fault(struct net_device *dev,
4969 struct sk_buff *skb)
4970{
4971}
4972#endif
4973
4974void net_enable_timestamp(void);
4975void net_disable_timestamp(void);
4976
4977#ifdef CONFIG_PROC_FS
4978int __init dev_proc_init(void);
4979#else
4980#define dev_proc_init() 0
4981#endif
4982
4983static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
4984 struct sk_buff *skb, struct net_device *dev,
4985 bool more)
4986{
4987 __this_cpu_write(softnet_data.xmit.more, more);
4988 return ops->ndo_start_xmit(skb, dev);
4989}
4990
4991static inline bool netdev_xmit_more(void)
4992{
4993 return __this_cpu_read(softnet_data.xmit.more);
4994}
4995
4996static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4997 struct netdev_queue *txq, bool more)
4998{
4999 const struct net_device_ops *ops = dev->netdev_ops;
5000 netdev_tx_t rc;
5001
5002 rc = __netdev_start_xmit(ops, skb, dev, more);
5003 if (rc == NETDEV_TX_OK)
5004 txq_trans_update(txq);
5005
5006 return rc;
5007}
5008
5009int netdev_class_create_file_ns(const struct class_attribute *class_attr,
5010 const void *ns);
5011void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
5012 const void *ns);
5013
5014extern const struct kobj_ns_type_operations net_ns_type_operations;
5015
5016const char *netdev_drivername(const struct net_device *dev);
5017
5018void linkwatch_run_queue(void);
5019
5020static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
5021 netdev_features_t f2)
5022{
5023 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
5024 if (f1 & NETIF_F_HW_CSUM)
5025 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5026 else
5027 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5028 }
5029
5030 return f1 & f2;
5031}
5032
5033static inline netdev_features_t netdev_get_wanted_features(
5034 struct net_device *dev)
5035{
5036 return (dev->features & ~dev->hw_features) | dev->wanted_features;
5037}
5038netdev_features_t netdev_increment_features(netdev_features_t all,
5039 netdev_features_t one, netdev_features_t mask);
5040
5041
5042
5043
5044
5045static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
5046 netdev_features_t mask)
5047{
5048 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
5049}
5050
5051int __netdev_update_features(struct net_device *dev);
5052void netdev_update_features(struct net_device *dev);
5053void netdev_change_features(struct net_device *dev);
5054
5055void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5056 struct net_device *dev);
5057
5058netdev_features_t passthru_features_check(struct sk_buff *skb,
5059 struct net_device *dev,
5060 netdev_features_t features);
5061netdev_features_t netif_skb_features(struct sk_buff *skb);
5062
5063static inline bool net_gso_ok(netdev_features_t features, int gso_type)
5064{
5065 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
5066
5067
5068 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
5069 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
5070 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
5071 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
5072 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
5073 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
5074 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
5075 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
5076 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
5077 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
5078 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
5079 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
5080 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
5081 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
5082 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
5083 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
5084 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
5085 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
5086 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
5087
5088 return (features & feature) == feature;
5089}
5090
5091static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
5092{
5093 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
5094 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
5095}
5096
5097static inline bool netif_needs_gso(struct sk_buff *skb,
5098 netdev_features_t features)
5099{
5100 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
5101 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
5102 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
5103}
5104
5105static inline void netif_set_gso_max_size(struct net_device *dev,
5106 unsigned int size)
5107{
5108 dev->gso_max_size = size;
5109}
5110
5111static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
5112 int pulled_hlen, u16 mac_offset,
5113 int mac_len)
5114{
5115 skb->protocol = protocol;
5116 skb->encapsulation = 1;
5117 skb_push(skb, pulled_hlen);
5118 skb_reset_transport_header(skb);
5119 skb->mac_header = mac_offset;
5120 skb->network_header = skb->mac_header + mac_len;
5121 skb->mac_len = mac_len;
5122}
5123
5124static inline bool netif_is_macsec(const struct net_device *dev)
5125{
5126 return dev->priv_flags & IFF_MACSEC;
5127}
5128
5129static inline bool netif_is_macvlan(const struct net_device *dev)
5130{
5131 return dev->priv_flags & IFF_MACVLAN;
5132}
5133
5134static inline bool netif_is_macvlan_port(const struct net_device *dev)
5135{
5136 return dev->priv_flags & IFF_MACVLAN_PORT;
5137}
5138
5139static inline bool netif_is_bond_master(const struct net_device *dev)
5140{
5141 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
5142}
5143
5144static inline bool netif_is_bond_slave(const struct net_device *dev)
5145{
5146 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
5147}
5148
5149static inline bool netif_supports_nofcs(struct net_device *dev)
5150{
5151 return dev->priv_flags & IFF_SUPP_NOFCS;
5152}
5153
5154static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
5155{
5156 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
5157}
5158
5159static inline bool netif_is_l3_master(const struct net_device *dev)
5160{
5161 return dev->priv_flags & IFF_L3MDEV_MASTER;
5162}
5163
5164static inline bool netif_is_l3_slave(const struct net_device *dev)
5165{
5166 return dev->priv_flags & IFF_L3MDEV_SLAVE;
5167}
5168
5169static inline bool netif_is_bridge_master(const struct net_device *dev)
5170{
5171 return dev->priv_flags & IFF_EBRIDGE;
5172}
5173
5174static inline bool netif_is_bridge_port(const struct net_device *dev)
5175{
5176 return dev->priv_flags & IFF_BRIDGE_PORT;
5177}
5178
5179static inline bool netif_is_ovs_master(const struct net_device *dev)
5180{
5181 return dev->priv_flags & IFF_OPENVSWITCH;
5182}
5183
5184static inline bool netif_is_ovs_port(const struct net_device *dev)
5185{
5186 return dev->priv_flags & IFF_OVS_DATAPATH;
5187}
5188
5189static inline bool netif_is_any_bridge_port(const struct net_device *dev)
5190{
5191 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
5192}
5193
5194static inline bool netif_is_team_master(const struct net_device *dev)
5195{
5196 return dev->priv_flags & IFF_TEAM;
5197}
5198
5199static inline bool netif_is_team_port(const struct net_device *dev)
5200{
5201 return dev->priv_flags & IFF_TEAM_PORT;
5202}
5203
5204static inline bool netif_is_lag_master(const struct net_device *dev)
5205{
5206 return netif_is_bond_master(dev) || netif_is_team_master(dev);
5207}
5208
5209static inline bool netif_is_lag_port(const struct net_device *dev)
5210{
5211 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
5212}
5213
5214static inline bool netif_is_rxfh_configured(const struct net_device *dev)
5215{
5216 return dev->priv_flags & IFF_RXFH_CONFIGURED;
5217}
5218
5219static inline bool netif_is_failover(const struct net_device *dev)
5220{
5221 return dev->priv_flags & IFF_FAILOVER;
5222}
5223
5224static inline bool netif_is_failover_slave(const struct net_device *dev)
5225{
5226 return dev->priv_flags & IFF_FAILOVER_SLAVE;
5227}
5228
5229
5230static inline void netif_keep_dst(struct net_device *dev)
5231{
5232 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
5233}
5234
5235
5236static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
5237{
5238
5239 return dev->priv_flags & IFF_MACSEC;
5240}
5241
5242extern struct pernet_operations __net_initdata loopback_net_ops;
5243
5244
5245
5246
5247
5248static inline const char *netdev_name(const struct net_device *dev)
5249{
5250 if (!dev->name[0] || strchr(dev->name, '%'))
5251 return "(unnamed net_device)";
5252 return dev->name;
5253}
5254
5255static inline bool netdev_unregistering(const struct net_device *dev)
5256{
5257 return dev->reg_state == NETREG_UNREGISTERING;
5258}
5259
5260static inline const char *netdev_reg_state(const struct net_device *dev)
5261{
5262 switch (dev->reg_state) {
5263 case NETREG_UNINITIALIZED: return " (uninitialized)";
5264 case NETREG_REGISTERED: return "";
5265 case NETREG_UNREGISTERING: return " (unregistering)";
5266 case NETREG_UNREGISTERED: return " (unregistered)";
5267 case NETREG_RELEASED: return " (released)";
5268 case NETREG_DUMMY: return " (dummy)";
5269 }
5270
5271 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
5272 return " (unknown)";
5273}
5274
5275__printf(3, 4) __cold
5276void netdev_printk(const char *level, const struct net_device *dev,
5277 const char *format, ...);
5278__printf(2, 3) __cold
5279void netdev_emerg(const struct net_device *dev, const char *format, ...);
5280__printf(2, 3) __cold
5281void netdev_alert(const struct net_device *dev, const char *format, ...);
5282__printf(2, 3) __cold
5283void netdev_crit(const struct net_device *dev, const char *format, ...);
5284__printf(2, 3) __cold
5285void netdev_err(const struct net_device *dev, const char *format, ...);
5286__printf(2, 3) __cold
5287void netdev_warn(const struct net_device *dev, const char *format, ...);
5288__printf(2, 3) __cold
5289void netdev_notice(const struct net_device *dev, const char *format, ...);
5290__printf(2, 3) __cold
5291void netdev_info(const struct net_device *dev, const char *format, ...);
5292
5293#define netdev_level_once(level, dev, fmt, ...) \
5294do { \
5295 static bool __print_once __read_mostly; \
5296 \
5297 if (!__print_once) { \
5298 __print_once = true; \
5299 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
5300 } \
5301} while (0)
5302
5303#define netdev_emerg_once(dev, fmt, ...) \
5304 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
5305#define netdev_alert_once(dev, fmt, ...) \
5306 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
5307#define netdev_crit_once(dev, fmt, ...) \
5308 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
5309#define netdev_err_once(dev, fmt, ...) \
5310 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
5311#define netdev_warn_once(dev, fmt, ...) \
5312 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
5313#define netdev_notice_once(dev, fmt, ...) \
5314 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
5315#define netdev_info_once(dev, fmt, ...) \
5316 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
5317
5318#define MODULE_ALIAS_NETDEV(device) \
5319 MODULE_ALIAS("netdev-" device)
5320
5321#if defined(CONFIG_DYNAMIC_DEBUG) || \
5322 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
5323#define netdev_dbg(__dev, format, args...) \
5324do { \
5325 dynamic_netdev_dbg(__dev, format, ##args); \
5326} while (0)
5327#elif defined(DEBUG)
5328#define netdev_dbg(__dev, format, args...) \
5329 netdev_printk(KERN_DEBUG, __dev, format, ##args)
5330#else
5331#define netdev_dbg(__dev, format, args...) \
5332({ \
5333 if (0) \
5334 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
5335})
5336#endif
5337
5338#if defined(VERBOSE_DEBUG)
5339#define netdev_vdbg netdev_dbg
5340#else
5341
5342#define netdev_vdbg(dev, format, args...) \
5343({ \
5344 if (0) \
5345 netdev_printk(KERN_DEBUG, dev, format, ##args); \
5346 0; \
5347})
5348#endif
5349
5350
5351
5352
5353
5354
5355#define netdev_WARN(dev, format, args...) \
5356 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
5357 netdev_reg_state(dev), ##args)
5358
5359#define netdev_WARN_ONCE(dev, format, args...) \
5360 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
5361 netdev_reg_state(dev), ##args)
5362
5363
5364
5365#define netif_printk(priv, type, level, dev, fmt, args...) \
5366do { \
5367 if (netif_msg_##type(priv)) \
5368 netdev_printk(level, (dev), fmt, ##args); \
5369} while (0)
5370
5371#define netif_level(level, priv, type, dev, fmt, args...) \
5372do { \
5373 if (netif_msg_##type(priv)) \
5374 netdev_##level(dev, fmt, ##args); \
5375} while (0)
5376
5377#define netif_emerg(priv, type, dev, fmt, args...) \
5378 netif_level(emerg, priv, type, dev, fmt, ##args)
5379#define netif_alert(priv, type, dev, fmt, args...) \
5380 netif_level(alert, priv, type, dev, fmt, ##args)
5381#define netif_crit(priv, type, dev, fmt, args...) \
5382 netif_level(crit, priv, type, dev, fmt, ##args)
5383#define netif_err(priv, type, dev, fmt, args...) \
5384 netif_level(err, priv, type, dev, fmt, ##args)
5385#define netif_warn(priv, type, dev, fmt, args...) \
5386 netif_level(warn, priv, type, dev, fmt, ##args)
5387#define netif_notice(priv, type, dev, fmt, args...) \
5388 netif_level(notice, priv, type, dev, fmt, ##args)
5389#define netif_info(priv, type, dev, fmt, args...) \
5390 netif_level(info, priv, type, dev, fmt, ##args)
5391
5392#if defined(CONFIG_DYNAMIC_DEBUG) || \
5393 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
5394#define netif_dbg(priv, type, netdev, format, args...) \
5395do { \
5396 if (netif_msg_##type(priv)) \
5397 dynamic_netdev_dbg(netdev, format, ##args); \
5398} while (0)
5399#elif defined(DEBUG)
5400#define netif_dbg(priv, type, dev, format, args...) \
5401 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
5402#else
5403#define netif_dbg(priv, type, dev, format, args...) \
5404({ \
5405 if (0) \
5406 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5407 0; \
5408})
5409#endif
5410
5411
5412#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
5413 do { \
5414 if (cond) \
5415 netif_dbg(priv, type, netdev, fmt, ##args); \
5416 else \
5417 netif_ ## level(priv, type, netdev, fmt, ##args); \
5418 } while (0)
5419
5420#if defined(VERBOSE_DEBUG)
5421#define netif_vdbg netif_dbg
5422#else
5423#define netif_vdbg(priv, type, dev, format, args...) \
5424({ \
5425 if (0) \
5426 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5427 0; \
5428})
5429#endif
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450#define PTYPE_HASH_SIZE (16)
5451#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5452
5453extern struct list_head ptype_all __read_mostly;
5454extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
5455
5456extern struct net_device *blackhole_netdev;
5457
5458#endif
5459