1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef _LINUX_NETDEVICE_H
22#define _LINUX_NETDEVICE_H
23
24#include <linux/timer.h>
25#include <linux/bug.h>
26#include <linux/delay.h>
27#include <linux/atomic.h>
28#include <linux/prefetch.h>
29#include <asm/cache.h>
30#include <asm/byteorder.h>
31
32#include <linux/percpu.h>
33#include <linux/rculist.h>
34#include <linux/workqueue.h>
35#include <linux/dynamic_queue_limits.h>
36
37#include <net/net_namespace.h>
38#ifdef CONFIG_DCB
39#include <net/dcbnl.h>
40#endif
41#include <net/netprio_cgroup.h>
42#include <net/xdp.h>
43
44#include <linux/netdev_features.h>
45#include <linux/neighbour.h>
46#include <uapi/linux/netdevice.h>
47#include <uapi/linux/if_bonding.h>
48#include <uapi/linux/pkt_cls.h>
49#include <linux/hashtable.h>
50#include <linux/rbtree.h>
51
52struct netpoll_info;
53struct device;
54struct ethtool_ops;
55struct phy_device;
56struct dsa_port;
57struct ip_tunnel_parm;
58struct macsec_context;
59struct macsec_ops;
60
61struct sfp_bus;
62
63struct wireless_dev;
64
65struct wpan_dev;
66struct mpls_dev;
67
68struct udp_tunnel_info;
69struct udp_tunnel_nic_info;
70struct udp_tunnel_nic;
71struct bpf_prog;
72struct xdp_buff;
73
74void synchronize_net(void);
75void netdev_set_default_ethtool_ops(struct net_device *dev,
76 const struct ethtool_ops *ops);
77
78
79#define NET_RX_SUCCESS 0
80#define NET_RX_DROP 1
81
82#define MAX_NEST_DEV 8
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#define NET_XMIT_SUCCESS 0x00
103#define NET_XMIT_DROP 0x01
104#define NET_XMIT_CN 0x02
105#define NET_XMIT_MASK 0x0f
106
107
108
109
110#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
111#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
112
113
114#define NETDEV_TX_MASK 0xf0
115
116enum netdev_tx {
117 __NETDEV_TX_MIN = INT_MIN,
118 NETDEV_TX_OK = 0x00,
119 NETDEV_TX_BUSY = 0x10,
120};
121typedef enum netdev_tx netdev_tx_t;
122
123
124
125
126
127static inline bool dev_xmit_complete(int rc)
128{
129
130
131
132
133
134
135 if (likely(rc < NET_XMIT_MASK))
136 return true;
137
138 return false;
139}
140
141
142
143
144
145
146#if defined(CONFIG_HYPERV_NET)
147# define LL_MAX_HEADER 128
148#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
149# if defined(CONFIG_MAC80211_MESH)
150# define LL_MAX_HEADER 128
151# else
152# define LL_MAX_HEADER 96
153# endif
154#else
155# define LL_MAX_HEADER 32
156#endif
157
158#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
159 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
160#define MAX_HEADER LL_MAX_HEADER
161#else
162#define MAX_HEADER (LL_MAX_HEADER + 48)
163#endif
164
165
166
167
168
169
170struct net_device_stats {
171 unsigned long rx_packets;
172 unsigned long tx_packets;
173 unsigned long rx_bytes;
174 unsigned long tx_bytes;
175 unsigned long rx_errors;
176 unsigned long tx_errors;
177 unsigned long rx_dropped;
178 unsigned long tx_dropped;
179 unsigned long multicast;
180 unsigned long collisions;
181 unsigned long rx_length_errors;
182 unsigned long rx_over_errors;
183 unsigned long rx_crc_errors;
184 unsigned long rx_frame_errors;
185 unsigned long rx_fifo_errors;
186 unsigned long rx_missed_errors;
187 unsigned long tx_aborted_errors;
188 unsigned long tx_carrier_errors;
189 unsigned long tx_fifo_errors;
190 unsigned long tx_heartbeat_errors;
191 unsigned long tx_window_errors;
192 unsigned long rx_compressed;
193 unsigned long tx_compressed;
194};
195
196
197#include <linux/cache.h>
198#include <linux/skbuff.h>
199
200#ifdef CONFIG_RPS
201#include <linux/static_key.h>
202extern struct static_key_false rps_needed;
203extern struct static_key_false rfs_needed;
204#endif
205
206struct neighbour;
207struct neigh_parms;
208struct sk_buff;
209
210struct netdev_hw_addr {
211 struct list_head list;
212 struct rb_node node;
213 unsigned char addr[MAX_ADDR_LEN];
214 unsigned char type;
215#define NETDEV_HW_ADDR_T_LAN 1
216#define NETDEV_HW_ADDR_T_SAN 2
217#define NETDEV_HW_ADDR_T_UNICAST 3
218#define NETDEV_HW_ADDR_T_MULTICAST 4
219 bool global_use;
220 int sync_cnt;
221 int refcount;
222 int synced;
223 struct rcu_head rcu_head;
224};
225
226struct netdev_hw_addr_list {
227 struct list_head list;
228 int count;
229
230
231 struct rb_root tree;
232};
233
234#define netdev_hw_addr_list_count(l) ((l)->count)
235#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
236#define netdev_hw_addr_list_for_each(ha, l) \
237 list_for_each_entry(ha, &(l)->list, list)
238
239#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
240#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
241#define netdev_for_each_uc_addr(ha, dev) \
242 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
243
244#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
245#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
246#define netdev_for_each_mc_addr(ha, dev) \
247 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
248
249struct hh_cache {
250 unsigned int hh_len;
251 seqlock_t hh_lock;
252
253
254#define HH_DATA_MOD 16
255#define HH_DATA_OFF(__len) \
256 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
257#define HH_DATA_ALIGN(__len) \
258 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
259 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
260};
261
262
263
264
265
266
267
268
269
270#define LL_RESERVED_SPACE(dev) \
271 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
272#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
273 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
274
275struct header_ops {
276 int (*create) (struct sk_buff *skb, struct net_device *dev,
277 unsigned short type, const void *daddr,
278 const void *saddr, unsigned int len);
279 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
280 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
281 void (*cache_update)(struct hh_cache *hh,
282 const struct net_device *dev,
283 const unsigned char *haddr);
284 bool (*validate)(const char *ll_header, unsigned int len);
285 __be16 (*parse_protocol)(const struct sk_buff *skb);
286};
287
288
289
290
291
292
293enum netdev_state_t {
294 __LINK_STATE_START,
295 __LINK_STATE_PRESENT,
296 __LINK_STATE_NOCARRIER,
297 __LINK_STATE_LINKWATCH_PENDING,
298 __LINK_STATE_DORMANT,
299 __LINK_STATE_TESTING,
300};
301
302
303struct gro_list {
304 struct list_head list;
305 int count;
306};
307
308
309
310
311
312#define GRO_HASH_BUCKETS 8
313
314
315
316
317struct napi_struct {
318
319
320
321
322
323
324 struct list_head poll_list;
325
326 unsigned long state;
327 int weight;
328 int defer_hard_irqs_count;
329 unsigned long gro_bitmask;
330 int (*poll)(struct napi_struct *, int);
331#ifdef CONFIG_NETPOLL
332 int poll_owner;
333#endif
334 struct net_device *dev;
335 struct gro_list gro_hash[GRO_HASH_BUCKETS];
336 struct sk_buff *skb;
337 struct list_head rx_list;
338 int rx_count;
339 struct hrtimer timer;
340 struct list_head dev_list;
341 struct hlist_node napi_hash_node;
342 unsigned int napi_id;
343 struct task_struct *thread;
344};
345
346enum {
347 NAPI_STATE_SCHED,
348 NAPI_STATE_MISSED,
349 NAPI_STATE_DISABLE,
350 NAPI_STATE_NPSVC,
351 NAPI_STATE_LISTED,
352 NAPI_STATE_NO_BUSY_POLL,
353 NAPI_STATE_IN_BUSY_POLL,
354 NAPI_STATE_PREFER_BUSY_POLL,
355 NAPI_STATE_THREADED,
356 NAPI_STATE_SCHED_THREADED,
357};
358
359enum {
360 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
361 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
362 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
363 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
364 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
365 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
366 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
367 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
368 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
369 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
370};
371
372enum gro_result {
373 GRO_MERGED,
374 GRO_MERGED_FREE,
375 GRO_HELD,
376 GRO_NORMAL,
377 GRO_CONSUMED,
378};
379typedef enum gro_result gro_result_t;
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422enum rx_handler_result {
423 RX_HANDLER_CONSUMED,
424 RX_HANDLER_ANOTHER,
425 RX_HANDLER_EXACT,
426 RX_HANDLER_PASS,
427};
428typedef enum rx_handler_result rx_handler_result_t;
429typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
430
431void __napi_schedule(struct napi_struct *n);
432void __napi_schedule_irqoff(struct napi_struct *n);
433
434static inline bool napi_disable_pending(struct napi_struct *n)
435{
436 return test_bit(NAPI_STATE_DISABLE, &n->state);
437}
438
439static inline bool napi_prefer_busy_poll(struct napi_struct *n)
440{
441 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
442}
443
444bool napi_schedule_prep(struct napi_struct *n);
445
446
447
448
449
450
451
452
453static inline void napi_schedule(struct napi_struct *n)
454{
455 if (napi_schedule_prep(n))
456 __napi_schedule(n);
457}
458
459
460
461
462
463
464
465static inline void napi_schedule_irqoff(struct napi_struct *n)
466{
467 if (napi_schedule_prep(n))
468 __napi_schedule_irqoff(n);
469}
470
471
472static inline bool napi_reschedule(struct napi_struct *napi)
473{
474 if (napi_schedule_prep(napi)) {
475 __napi_schedule(napi);
476 return true;
477 }
478 return false;
479}
480
481bool napi_complete_done(struct napi_struct *n, int work_done);
482
483
484
485
486
487
488
489
490static inline bool napi_complete(struct napi_struct *n)
491{
492 return napi_complete_done(n, 0);
493}
494
495int dev_set_threaded(struct net_device *dev, bool threaded);
496
497
498
499
500
501
502
503
504void napi_disable(struct napi_struct *n);
505
506void napi_enable(struct napi_struct *n);
507
508
509
510
511
512
513
514
515
516static inline void napi_synchronize(const struct napi_struct *n)
517{
518 if (IS_ENABLED(CONFIG_SMP))
519 while (test_bit(NAPI_STATE_SCHED, &n->state))
520 msleep(1);
521 else
522 barrier();
523}
524
525
526
527
528
529
530
531
532
533static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
534{
535 unsigned long val, new;
536
537 do {
538 val = READ_ONCE(n->state);
539 if (val & NAPIF_STATE_DISABLE)
540 return true;
541
542 if (!(val & NAPIF_STATE_SCHED))
543 return false;
544
545 new = val | NAPIF_STATE_MISSED;
546 } while (cmpxchg(&n->state, val, new) != val);
547
548 return true;
549}
550
551enum netdev_queue_state_t {
552 __QUEUE_STATE_DRV_XOFF,
553 __QUEUE_STATE_STACK_XOFF,
554 __QUEUE_STATE_FROZEN,
555};
556
557#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
558#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
559#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
560
561#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
562#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
563 QUEUE_STATE_FROZEN)
564#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
565 QUEUE_STATE_FROZEN)
566
567
568
569
570
571
572
573
574
575
576
577struct netdev_queue {
578
579
580
581 struct net_device *dev;
582 struct Qdisc __rcu *qdisc;
583 struct Qdisc *qdisc_sleeping;
584#ifdef CONFIG_SYSFS
585 struct kobject kobj;
586#endif
587#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
588 int numa_node;
589#endif
590 unsigned long tx_maxrate;
591
592
593
594
595 unsigned long trans_timeout;
596
597
598 struct net_device *sb_dev;
599#ifdef CONFIG_XDP_SOCKETS
600 struct xsk_buff_pool *pool;
601#endif
602
603
604
605 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
606 int xmit_lock_owner;
607
608
609
610 unsigned long trans_start;
611
612 unsigned long state;
613
614#ifdef CONFIG_BQL
615 struct dql dql;
616#endif
617} ____cacheline_aligned_in_smp;
618
619extern int sysctl_fb_tunnels_only_for_init_net;
620extern int sysctl_devconf_inherit_init_net;
621
622
623
624
625
626
627static inline bool net_has_fallback_tunnels(const struct net *net)
628{
629 return !IS_ENABLED(CONFIG_SYSCTL) ||
630 !sysctl_fb_tunnels_only_for_init_net ||
631 (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
632}
633
634static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
635{
636#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
637 return q->numa_node;
638#else
639 return NUMA_NO_NODE;
640#endif
641}
642
643static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
644{
645#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
646 q->numa_node = node;
647#endif
648}
649
650#ifdef CONFIG_RPS
651
652
653
654
655struct rps_map {
656 unsigned int len;
657 struct rcu_head rcu;
658 u16 cpus[];
659};
660#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
661
662
663
664
665
666
667struct rps_dev_flow {
668 u16 cpu;
669 u16 filter;
670 unsigned int last_qtail;
671};
672#define RPS_NO_FILTER 0xffff
673
674
675
676
677struct rps_dev_flow_table {
678 unsigned int mask;
679 struct rcu_head rcu;
680 struct rps_dev_flow flows[];
681};
682#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
683 ((_num) * sizeof(struct rps_dev_flow)))
684
685
686
687
688
689
690
691
692
693
694
695struct rps_sock_flow_table {
696 u32 mask;
697
698 u32 ents[] ____cacheline_aligned_in_smp;
699};
700#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
701
702#define RPS_NO_CPU 0xffff
703
704extern u32 rps_cpu_mask;
705extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
706
707static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
708 u32 hash)
709{
710 if (table && hash) {
711 unsigned int index = hash & table->mask;
712 u32 val = hash & ~rps_cpu_mask;
713
714
715 val |= raw_smp_processor_id();
716
717 if (table->ents[index] != val)
718 table->ents[index] = val;
719 }
720}
721
722#ifdef CONFIG_RFS_ACCEL
723bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
724 u16 filter_id);
725#endif
726#endif
727
728
729struct netdev_rx_queue {
730 struct xdp_rxq_info xdp_rxq;
731#ifdef CONFIG_RPS
732 struct rps_map __rcu *rps_map;
733 struct rps_dev_flow_table __rcu *rps_flow_table;
734#endif
735 struct kobject kobj;
736 struct net_device *dev;
737#ifdef CONFIG_XDP_SOCKETS
738 struct xsk_buff_pool *pool;
739#endif
740} ____cacheline_aligned_in_smp;
741
742
743
744
745struct rx_queue_attribute {
746 struct attribute attr;
747 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
748 ssize_t (*store)(struct netdev_rx_queue *queue,
749 const char *buf, size_t len);
750};
751
752
753enum xps_map_type {
754 XPS_CPUS = 0,
755 XPS_RXQS,
756 XPS_MAPS_MAX,
757};
758
759#ifdef CONFIG_XPS
760
761
762
763
764struct xps_map {
765 unsigned int len;
766 unsigned int alloc_len;
767 struct rcu_head rcu;
768 u16 queues[];
769};
770#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
771#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
772 - sizeof(struct xps_map)) / sizeof(u16))
773
774
775
776
777
778
779
780
781
782
783
784
785struct xps_dev_maps {
786 struct rcu_head rcu;
787 unsigned int nr_ids;
788 s16 num_tc;
789 struct xps_map __rcu *attr_map[];
790};
791
792#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
793 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
794
795#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
796 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
797
798#endif
799
800#define TC_MAX_QUEUE 16
801#define TC_BITMASK 15
802
803struct netdev_tc_txq {
804 u16 count;
805 u16 offset;
806};
807
808#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
809
810
811
812
813struct netdev_fcoe_hbainfo {
814 char manufacturer[64];
815 char serial_number[64];
816 char hardware_version[64];
817 char driver_version[64];
818 char optionrom_version[64];
819 char firmware_version[64];
820 char model[256];
821 char model_description[256];
822};
823#endif
824
825#define MAX_PHYS_ITEM_ID_LEN 32
826
827
828
829
830struct netdev_phys_item_id {
831 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
832 unsigned char id_len;
833};
834
835static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
836 struct netdev_phys_item_id *b)
837{
838 return a->id_len == b->id_len &&
839 memcmp(a->id, b->id, a->id_len) == 0;
840}
841
842typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
843 struct sk_buff *skb,
844 struct net_device *sb_dev);
845
846enum net_device_path_type {
847 DEV_PATH_ETHERNET = 0,
848 DEV_PATH_VLAN,
849 DEV_PATH_BRIDGE,
850 DEV_PATH_PPPOE,
851 DEV_PATH_DSA,
852};
853
854struct net_device_path {
855 enum net_device_path_type type;
856 const struct net_device *dev;
857 union {
858 struct {
859 u16 id;
860 __be16 proto;
861 u8 h_dest[ETH_ALEN];
862 } encap;
863 struct {
864 enum {
865 DEV_PATH_BR_VLAN_KEEP,
866 DEV_PATH_BR_VLAN_TAG,
867 DEV_PATH_BR_VLAN_UNTAG,
868 DEV_PATH_BR_VLAN_UNTAG_HW,
869 } vlan_mode;
870 u16 vlan_id;
871 __be16 vlan_proto;
872 } bridge;
873 struct {
874 int port;
875 u16 proto;
876 } dsa;
877 };
878};
879
880#define NET_DEVICE_PATH_STACK_MAX 5
881#define NET_DEVICE_PATH_VLAN_MAX 2
882
883struct net_device_path_stack {
884 int num_paths;
885 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
886};
887
888struct net_device_path_ctx {
889 const struct net_device *dev;
890 const u8 *daddr;
891
892 int num_vlans;
893 struct {
894 u16 id;
895 __be16 proto;
896 } vlan[NET_DEVICE_PATH_VLAN_MAX];
897};
898
899enum tc_setup_type {
900 TC_SETUP_QDISC_MQPRIO,
901 TC_SETUP_CLSU32,
902 TC_SETUP_CLSFLOWER,
903 TC_SETUP_CLSMATCHALL,
904 TC_SETUP_CLSBPF,
905 TC_SETUP_BLOCK,
906 TC_SETUP_QDISC_CBS,
907 TC_SETUP_QDISC_RED,
908 TC_SETUP_QDISC_PRIO,
909 TC_SETUP_QDISC_MQ,
910 TC_SETUP_QDISC_ETF,
911 TC_SETUP_ROOT_QDISC,
912 TC_SETUP_QDISC_GRED,
913 TC_SETUP_QDISC_TAPRIO,
914 TC_SETUP_FT,
915 TC_SETUP_QDISC_ETS,
916 TC_SETUP_QDISC_TBF,
917 TC_SETUP_QDISC_FIFO,
918 TC_SETUP_QDISC_HTB,
919};
920
921
922
923
924enum bpf_netdev_command {
925
926
927
928
929
930
931
932 XDP_SETUP_PROG,
933 XDP_SETUP_PROG_HW,
934
935 BPF_OFFLOAD_MAP_ALLOC,
936 BPF_OFFLOAD_MAP_FREE,
937 XDP_SETUP_XSK_POOL,
938};
939
940struct bpf_prog_offload_ops;
941struct netlink_ext_ack;
942struct xdp_umem;
943struct xdp_dev_bulk_queue;
944struct bpf_xdp_link;
945
946enum bpf_xdp_mode {
947 XDP_MODE_SKB = 0,
948 XDP_MODE_DRV = 1,
949 XDP_MODE_HW = 2,
950 __MAX_XDP_MODE
951};
952
953struct bpf_xdp_entity {
954 struct bpf_prog *prog;
955 struct bpf_xdp_link *link;
956};
957
958struct netdev_bpf {
959 enum bpf_netdev_command command;
960 union {
961
962 struct {
963 u32 flags;
964 struct bpf_prog *prog;
965 struct netlink_ext_ack *extack;
966 };
967
968 struct {
969 struct bpf_offloaded_map *offmap;
970 };
971
972 struct {
973 struct xsk_buff_pool *pool;
974 u16 queue_id;
975 } xsk;
976 };
977};
978
979
980#define XDP_WAKEUP_RX (1 << 0)
981#define XDP_WAKEUP_TX (1 << 1)
982
983#ifdef CONFIG_XFRM_OFFLOAD
984struct xfrmdev_ops {
985 int (*xdo_dev_state_add) (struct xfrm_state *x);
986 void (*xdo_dev_state_delete) (struct xfrm_state *x);
987 void (*xdo_dev_state_free) (struct xfrm_state *x);
988 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
989 struct xfrm_state *x);
990 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
991};
992#endif
993
994struct dev_ifalias {
995 struct rcu_head rcuhead;
996 char ifalias[];
997};
998
999struct devlink;
1000struct tlsdev_ops;
1001
1002struct netdev_name_node {
1003 struct hlist_node hlist;
1004 struct list_head list;
1005 struct net_device *dev;
1006 const char *name;
1007};
1008
1009int netdev_name_node_alt_create(struct net_device *dev, const char *name);
1010int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
1011
1012struct netdev_net_notifier {
1013 struct list_head list;
1014 struct notifier_block *nb;
1015};
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348struct net_device_ops {
1349 int (*ndo_init)(struct net_device *dev);
1350 void (*ndo_uninit)(struct net_device *dev);
1351 int (*ndo_open)(struct net_device *dev);
1352 int (*ndo_stop)(struct net_device *dev);
1353 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1354 struct net_device *dev);
1355 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1356 struct net_device *dev,
1357 netdev_features_t features);
1358 u16 (*ndo_select_queue)(struct net_device *dev,
1359 struct sk_buff *skb,
1360 struct net_device *sb_dev);
1361 void (*ndo_change_rx_flags)(struct net_device *dev,
1362 int flags);
1363 void (*ndo_set_rx_mode)(struct net_device *dev);
1364 int (*ndo_set_mac_address)(struct net_device *dev,
1365 void *addr);
1366 int (*ndo_validate_addr)(struct net_device *dev);
1367 int (*ndo_do_ioctl)(struct net_device *dev,
1368 struct ifreq *ifr, int cmd);
1369 int (*ndo_eth_ioctl)(struct net_device *dev,
1370 struct ifreq *ifr, int cmd);
1371 int (*ndo_siocbond)(struct net_device *dev,
1372 struct ifreq *ifr, int cmd);
1373 int (*ndo_siocwandev)(struct net_device *dev,
1374 struct if_settings *ifs);
1375 int (*ndo_siocdevprivate)(struct net_device *dev,
1376 struct ifreq *ifr,
1377 void __user *data, int cmd);
1378 int (*ndo_set_config)(struct net_device *dev,
1379 struct ifmap *map);
1380 int (*ndo_change_mtu)(struct net_device *dev,
1381 int new_mtu);
1382 int (*ndo_neigh_setup)(struct net_device *dev,
1383 struct neigh_parms *);
1384 void (*ndo_tx_timeout) (struct net_device *dev,
1385 unsigned int txqueue);
1386
1387 void (*ndo_get_stats64)(struct net_device *dev,
1388 struct rtnl_link_stats64 *storage);
1389 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1390 int (*ndo_get_offload_stats)(int attr_id,
1391 const struct net_device *dev,
1392 void *attr_data);
1393 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1394
1395 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1396 __be16 proto, u16 vid);
1397 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1398 __be16 proto, u16 vid);
1399#ifdef CONFIG_NET_POLL_CONTROLLER
1400 void (*ndo_poll_controller)(struct net_device *dev);
1401 int (*ndo_netpoll_setup)(struct net_device *dev,
1402 struct netpoll_info *info);
1403 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1404#endif
1405 int (*ndo_set_vf_mac)(struct net_device *dev,
1406 int queue, u8 *mac);
1407 int (*ndo_set_vf_vlan)(struct net_device *dev,
1408 int queue, u16 vlan,
1409 u8 qos, __be16 proto);
1410 int (*ndo_set_vf_rate)(struct net_device *dev,
1411 int vf, int min_tx_rate,
1412 int max_tx_rate);
1413 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1414 int vf, bool setting);
1415 int (*ndo_set_vf_trust)(struct net_device *dev,
1416 int vf, bool setting);
1417 int (*ndo_get_vf_config)(struct net_device *dev,
1418 int vf,
1419 struct ifla_vf_info *ivf);
1420 int (*ndo_set_vf_link_state)(struct net_device *dev,
1421 int vf, int link_state);
1422 int (*ndo_get_vf_stats)(struct net_device *dev,
1423 int vf,
1424 struct ifla_vf_stats
1425 *vf_stats);
1426 int (*ndo_set_vf_port)(struct net_device *dev,
1427 int vf,
1428 struct nlattr *port[]);
1429 int (*ndo_get_vf_port)(struct net_device *dev,
1430 int vf, struct sk_buff *skb);
1431 int (*ndo_get_vf_guid)(struct net_device *dev,
1432 int vf,
1433 struct ifla_vf_guid *node_guid,
1434 struct ifla_vf_guid *port_guid);
1435 int (*ndo_set_vf_guid)(struct net_device *dev,
1436 int vf, u64 guid,
1437 int guid_type);
1438 int (*ndo_set_vf_rss_query_en)(
1439 struct net_device *dev,
1440 int vf, bool setting);
1441 int (*ndo_setup_tc)(struct net_device *dev,
1442 enum tc_setup_type type,
1443 void *type_data);
1444#if IS_ENABLED(CONFIG_FCOE)
1445 int (*ndo_fcoe_enable)(struct net_device *dev);
1446 int (*ndo_fcoe_disable)(struct net_device *dev);
1447 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1448 u16 xid,
1449 struct scatterlist *sgl,
1450 unsigned int sgc);
1451 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1452 u16 xid);
1453 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1454 u16 xid,
1455 struct scatterlist *sgl,
1456 unsigned int sgc);
1457 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1458 struct netdev_fcoe_hbainfo *hbainfo);
1459#endif
1460
1461#if IS_ENABLED(CONFIG_LIBFCOE)
1462#define NETDEV_FCOE_WWNN 0
1463#define NETDEV_FCOE_WWPN 1
1464 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1465 u64 *wwn, int type);
1466#endif
1467
1468#ifdef CONFIG_RFS_ACCEL
1469 int (*ndo_rx_flow_steer)(struct net_device *dev,
1470 const struct sk_buff *skb,
1471 u16 rxq_index,
1472 u32 flow_id);
1473#endif
1474 int (*ndo_add_slave)(struct net_device *dev,
1475 struct net_device *slave_dev,
1476 struct netlink_ext_ack *extack);
1477 int (*ndo_del_slave)(struct net_device *dev,
1478 struct net_device *slave_dev);
1479 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
1480 struct sk_buff *skb,
1481 bool all_slaves);
1482 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
1483 struct sock *sk);
1484 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1485 netdev_features_t features);
1486 int (*ndo_set_features)(struct net_device *dev,
1487 netdev_features_t features);
1488 int (*ndo_neigh_construct)(struct net_device *dev,
1489 struct neighbour *n);
1490 void (*ndo_neigh_destroy)(struct net_device *dev,
1491 struct neighbour *n);
1492
1493 int (*ndo_fdb_add)(struct ndmsg *ndm,
1494 struct nlattr *tb[],
1495 struct net_device *dev,
1496 const unsigned char *addr,
1497 u16 vid,
1498 u16 flags,
1499 struct netlink_ext_ack *extack);
1500 int (*ndo_fdb_del)(struct ndmsg *ndm,
1501 struct nlattr *tb[],
1502 struct net_device *dev,
1503 const unsigned char *addr,
1504 u16 vid);
1505 int (*ndo_fdb_dump)(struct sk_buff *skb,
1506 struct netlink_callback *cb,
1507 struct net_device *dev,
1508 struct net_device *filter_dev,
1509 int *idx);
1510 int (*ndo_fdb_get)(struct sk_buff *skb,
1511 struct nlattr *tb[],
1512 struct net_device *dev,
1513 const unsigned char *addr,
1514 u16 vid, u32 portid, u32 seq,
1515 struct netlink_ext_ack *extack);
1516 int (*ndo_bridge_setlink)(struct net_device *dev,
1517 struct nlmsghdr *nlh,
1518 u16 flags,
1519 struct netlink_ext_ack *extack);
1520 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1521 u32 pid, u32 seq,
1522 struct net_device *dev,
1523 u32 filter_mask,
1524 int nlflags);
1525 int (*ndo_bridge_dellink)(struct net_device *dev,
1526 struct nlmsghdr *nlh,
1527 u16 flags);
1528 int (*ndo_change_carrier)(struct net_device *dev,
1529 bool new_carrier);
1530 int (*ndo_get_phys_port_id)(struct net_device *dev,
1531 struct netdev_phys_item_id *ppid);
1532 int (*ndo_get_port_parent_id)(struct net_device *dev,
1533 struct netdev_phys_item_id *ppid);
1534 int (*ndo_get_phys_port_name)(struct net_device *dev,
1535 char *name, size_t len);
1536 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1537 struct net_device *dev);
1538 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1539 void *priv);
1540
1541 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1542 int queue_index,
1543 u32 maxrate);
1544 int (*ndo_get_iflink)(const struct net_device *dev);
1545 int (*ndo_change_proto_down)(struct net_device *dev,
1546 bool proto_down);
1547 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1548 struct sk_buff *skb);
1549 void (*ndo_set_rx_headroom)(struct net_device *dev,
1550 int needed_headroom);
1551 int (*ndo_bpf)(struct net_device *dev,
1552 struct netdev_bpf *bpf);
1553 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1554 struct xdp_frame **xdp,
1555 u32 flags);
1556 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1557 struct xdp_buff *xdp);
1558 int (*ndo_xsk_wakeup)(struct net_device *dev,
1559 u32 queue_id, u32 flags);
1560 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
1561 int (*ndo_tunnel_ctl)(struct net_device *dev,
1562 struct ip_tunnel_parm *p, int cmd);
1563 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
1564 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
1565 struct net_device_path *path);
1566};
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616enum netdev_priv_flags {
1617 IFF_802_1Q_VLAN = 1<<0,
1618 IFF_EBRIDGE = 1<<1,
1619 IFF_BONDING = 1<<2,
1620 IFF_ISATAP = 1<<3,
1621 IFF_WAN_HDLC = 1<<4,
1622 IFF_XMIT_DST_RELEASE = 1<<5,
1623 IFF_DONT_BRIDGE = 1<<6,
1624 IFF_DISABLE_NETPOLL = 1<<7,
1625 IFF_MACVLAN_PORT = 1<<8,
1626 IFF_BRIDGE_PORT = 1<<9,
1627 IFF_OVS_DATAPATH = 1<<10,
1628 IFF_TX_SKB_SHARING = 1<<11,
1629 IFF_UNICAST_FLT = 1<<12,
1630 IFF_TEAM_PORT = 1<<13,
1631 IFF_SUPP_NOFCS = 1<<14,
1632 IFF_LIVE_ADDR_CHANGE = 1<<15,
1633 IFF_MACVLAN = 1<<16,
1634 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1635 IFF_L3MDEV_MASTER = 1<<18,
1636 IFF_NO_QUEUE = 1<<19,
1637 IFF_OPENVSWITCH = 1<<20,
1638 IFF_L3MDEV_SLAVE = 1<<21,
1639 IFF_TEAM = 1<<22,
1640 IFF_RXFH_CONFIGURED = 1<<23,
1641 IFF_PHONY_HEADROOM = 1<<24,
1642 IFF_MACSEC = 1<<25,
1643 IFF_NO_RX_HANDLER = 1<<26,
1644 IFF_FAILOVER = 1<<27,
1645 IFF_FAILOVER_SLAVE = 1<<28,
1646 IFF_L3MDEV_RX_HANDLER = 1<<29,
1647 IFF_LIVE_RENAME_OK = 1<<30,
1648 IFF_TX_SKB_NO_LINEAR = 1<<31,
1649};
1650
1651#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1652#define IFF_EBRIDGE IFF_EBRIDGE
1653#define IFF_BONDING IFF_BONDING
1654#define IFF_ISATAP IFF_ISATAP
1655#define IFF_WAN_HDLC IFF_WAN_HDLC
1656#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1657#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1658#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1659#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1660#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1661#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1662#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1663#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1664#define IFF_TEAM_PORT IFF_TEAM_PORT
1665#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1666#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1667#define IFF_MACVLAN IFF_MACVLAN
1668#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1669#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1670#define IFF_NO_QUEUE IFF_NO_QUEUE
1671#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1672#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1673#define IFF_TEAM IFF_TEAM
1674#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1675#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM
1676#define IFF_MACSEC IFF_MACSEC
1677#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1678#define IFF_FAILOVER IFF_FAILOVER
1679#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1680#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1681#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
1682#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
1683
1684
1685enum netdev_ml_priv_type {
1686 ML_PRIV_NONE,
1687 ML_PRIV_CAN,
1688};
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949struct net_device {
1950 char name[IFNAMSIZ];
1951 struct netdev_name_node *name_node;
1952 struct dev_ifalias __rcu *ifalias;
1953
1954
1955
1956
1957 unsigned long mem_end;
1958 unsigned long mem_start;
1959 unsigned long base_addr;
1960
1961
1962
1963
1964
1965
1966
1967 unsigned long state;
1968
1969 struct list_head dev_list;
1970 struct list_head napi_list;
1971 struct list_head unreg_list;
1972 struct list_head close_list;
1973 struct list_head ptype_all;
1974 struct list_head ptype_specific;
1975
1976 struct {
1977 struct list_head upper;
1978 struct list_head lower;
1979 } adj_list;
1980
1981
1982 unsigned int flags;
1983 unsigned int priv_flags;
1984 const struct net_device_ops *netdev_ops;
1985 int ifindex;
1986 unsigned short gflags;
1987 unsigned short hard_header_len;
1988
1989
1990
1991
1992
1993
1994 unsigned int mtu;
1995 unsigned short needed_headroom;
1996 unsigned short needed_tailroom;
1997
1998 netdev_features_t features;
1999 netdev_features_t hw_features;
2000 netdev_features_t wanted_features;
2001 netdev_features_t vlan_features;
2002 netdev_features_t hw_enc_features;
2003 netdev_features_t mpls_features;
2004 netdev_features_t gso_partial_features;
2005
2006 unsigned int min_mtu;
2007 unsigned int max_mtu;
2008 unsigned short type;
2009 unsigned char min_header_len;
2010 unsigned char name_assign_type;
2011
2012 int group;
2013
2014 struct net_device_stats stats;
2015
2016 atomic_long_t rx_dropped;
2017 atomic_long_t tx_dropped;
2018 atomic_long_t rx_nohandler;
2019
2020
2021 atomic_t carrier_up_count;
2022 atomic_t carrier_down_count;
2023
2024#ifdef CONFIG_WIRELESS_EXT
2025 const struct iw_handler_def *wireless_handlers;
2026 struct iw_public_data *wireless_data;
2027#endif
2028 const struct ethtool_ops *ethtool_ops;
2029#ifdef CONFIG_NET_L3_MASTER_DEV
2030 const struct l3mdev_ops *l3mdev_ops;
2031#endif
2032#if IS_ENABLED(CONFIG_IPV6)
2033 const struct ndisc_ops *ndisc_ops;
2034#endif
2035
2036#ifdef CONFIG_XFRM_OFFLOAD
2037 const struct xfrmdev_ops *xfrmdev_ops;
2038#endif
2039
2040#if IS_ENABLED(CONFIG_TLS_DEVICE)
2041 const struct tlsdev_ops *tlsdev_ops;
2042#endif
2043
2044 const struct header_ops *header_ops;
2045
2046 unsigned char operstate;
2047 unsigned char link_mode;
2048
2049 unsigned char if_port;
2050 unsigned char dma;
2051
2052
2053 unsigned char perm_addr[MAX_ADDR_LEN];
2054 unsigned char addr_assign_type;
2055 unsigned char addr_len;
2056 unsigned char upper_level;
2057 unsigned char lower_level;
2058
2059 unsigned short neigh_priv_len;
2060 unsigned short dev_id;
2061 unsigned short dev_port;
2062 unsigned short padded;
2063
2064 spinlock_t addr_list_lock;
2065 int irq;
2066
2067 struct netdev_hw_addr_list uc;
2068 struct netdev_hw_addr_list mc;
2069 struct netdev_hw_addr_list dev_addrs;
2070
2071#ifdef CONFIG_SYSFS
2072 struct kset *queues_kset;
2073#endif
2074#ifdef CONFIG_LOCKDEP
2075 struct list_head unlink_list;
2076#endif
2077 unsigned int promiscuity;
2078 unsigned int allmulti;
2079 bool uc_promisc;
2080#ifdef CONFIG_LOCKDEP
2081 unsigned char nested_level;
2082#endif
2083
2084
2085
2086
2087#if IS_ENABLED(CONFIG_VLAN_8021Q)
2088 struct vlan_info __rcu *vlan_info;
2089#endif
2090#if IS_ENABLED(CONFIG_NET_DSA)
2091 struct dsa_port *dsa_ptr;
2092#endif
2093#if IS_ENABLED(CONFIG_TIPC)
2094 struct tipc_bearer __rcu *tipc_ptr;
2095#endif
2096#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
2097 void *atalk_ptr;
2098#endif
2099 struct in_device __rcu *ip_ptr;
2100#if IS_ENABLED(CONFIG_DECNET)
2101 struct dn_dev __rcu *dn_ptr;
2102#endif
2103 struct inet6_dev __rcu *ip6_ptr;
2104#if IS_ENABLED(CONFIG_AX25)
2105 void *ax25_ptr;
2106#endif
2107 struct wireless_dev *ieee80211_ptr;
2108 struct wpan_dev *ieee802154_ptr;
2109#if IS_ENABLED(CONFIG_MPLS_ROUTING)
2110 struct mpls_dev __rcu *mpls_ptr;
2111#endif
2112#if IS_ENABLED(CONFIG_MCTP)
2113 struct mctp_dev __rcu *mctp_ptr;
2114#endif
2115
2116
2117
2118
2119
2120 unsigned char *dev_addr;
2121
2122 struct netdev_rx_queue *_rx;
2123 unsigned int num_rx_queues;
2124 unsigned int real_num_rx_queues;
2125
2126 struct bpf_prog __rcu *xdp_prog;
2127 unsigned long gro_flush_timeout;
2128 int napi_defer_hard_irqs;
2129 rx_handler_func_t __rcu *rx_handler;
2130 void __rcu *rx_handler_data;
2131
2132#ifdef CONFIG_NET_CLS_ACT
2133 struct mini_Qdisc __rcu *miniq_ingress;
2134#endif
2135 struct netdev_queue __rcu *ingress_queue;
2136#ifdef CONFIG_NETFILTER_INGRESS
2137 struct nf_hook_entries __rcu *nf_hooks_ingress;
2138#endif
2139
2140 unsigned char broadcast[MAX_ADDR_LEN];
2141#ifdef CONFIG_RFS_ACCEL
2142 struct cpu_rmap *rx_cpu_rmap;
2143#endif
2144 struct hlist_node index_hlist;
2145
2146
2147
2148
2149 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
2150 unsigned int num_tx_queues;
2151 unsigned int real_num_tx_queues;
2152 struct Qdisc *qdisc;
2153 unsigned int tx_queue_len;
2154 spinlock_t tx_global_lock;
2155
2156 struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2157
2158#ifdef CONFIG_XPS
2159 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
2160#endif
2161#ifdef CONFIG_NET_CLS_ACT
2162 struct mini_Qdisc __rcu *miniq_egress;
2163#endif
2164#ifdef CONFIG_NETFILTER_EGRESS
2165 struct nf_hook_entries __rcu *nf_hooks_egress;
2166#endif
2167
2168#ifdef CONFIG_NET_SCHED
2169 DECLARE_HASHTABLE (qdisc_hash, 4);
2170#endif
2171
2172 struct timer_list watchdog_timer;
2173 int watchdog_timeo;
2174
2175 u32 proto_down_reason;
2176
2177 struct list_head todo_list;
2178
2179#ifdef CONFIG_PCPU_DEV_REFCNT
2180 int __percpu *pcpu_refcnt;
2181#else
2182 refcount_t dev_refcnt;
2183#endif
2184
2185 struct list_head link_watch_list;
2186
2187 enum { NETREG_UNINITIALIZED=0,
2188 NETREG_REGISTERED,
2189 NETREG_UNREGISTERING,
2190 NETREG_UNREGISTERED,
2191 NETREG_RELEASED,
2192 NETREG_DUMMY,
2193 } reg_state:8;
2194
2195 bool dismantle;
2196
2197 enum {
2198 RTNL_LINK_INITIALIZED,
2199 RTNL_LINK_INITIALIZING,
2200 } rtnl_link_state:16;
2201
2202 bool needs_free_netdev;
2203 void (*priv_destructor)(struct net_device *dev);
2204
2205#ifdef CONFIG_NETPOLL
2206 struct netpoll_info __rcu *npinfo;
2207#endif
2208
2209 possible_net_t nd_net;
2210
2211
2212 void *ml_priv;
2213 enum netdev_ml_priv_type ml_priv_type;
2214
2215 union {
2216 struct pcpu_lstats __percpu *lstats;
2217 struct pcpu_sw_netstats __percpu *tstats;
2218 struct pcpu_dstats __percpu *dstats;
2219 };
2220
2221#if IS_ENABLED(CONFIG_GARP)
2222 struct garp_port __rcu *garp_port;
2223#endif
2224#if IS_ENABLED(CONFIG_MRP)
2225 struct mrp_port __rcu *mrp_port;
2226#endif
2227
2228 struct device dev;
2229 const struct attribute_group *sysfs_groups[4];
2230 const struct attribute_group *sysfs_rx_queue_group;
2231
2232 const struct rtnl_link_ops *rtnl_link_ops;
2233
2234
2235#define GSO_MAX_SIZE 65536
2236 unsigned int gso_max_size;
2237#define GSO_MAX_SEGS 65535
2238 u16 gso_max_segs;
2239
2240#ifdef CONFIG_DCB
2241 const struct dcbnl_rtnl_ops *dcbnl_ops;
2242#endif
2243 s16 num_tc;
2244 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2245 u8 prio_tc_map[TC_BITMASK + 1];
2246
2247#if IS_ENABLED(CONFIG_FCOE)
2248 unsigned int fcoe_ddp_xid;
2249#endif
2250#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2251 struct netprio_map __rcu *priomap;
2252#endif
2253 struct phy_device *phydev;
2254 struct sfp_bus *sfp_bus;
2255 struct lock_class_key *qdisc_tx_busylock;
2256 bool proto_down;
2257 unsigned wol_enabled:1;
2258 unsigned threaded:1;
2259
2260 struct list_head net_notifier_list;
2261
2262#if IS_ENABLED(CONFIG_MACSEC)
2263
2264 const struct macsec_ops *macsec_ops;
2265#endif
2266 const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
2267 struct udp_tunnel_nic *udp_tunnel_nic;
2268
2269
2270 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
2271};
2272#define to_net_dev(d) container_of(d, struct net_device, dev)
2273
2274static inline bool netif_elide_gro(const struct net_device *dev)
2275{
2276 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2277 return true;
2278 return false;
2279}
2280
2281#define NETDEV_ALIGN 32
2282
2283static inline
2284int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2285{
2286 return dev->prio_tc_map[prio & TC_BITMASK];
2287}
2288
2289static inline
2290int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2291{
2292 if (tc >= dev->num_tc)
2293 return -EINVAL;
2294
2295 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2296 return 0;
2297}
2298
2299int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2300void netdev_reset_tc(struct net_device *dev);
2301int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2302int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2303
2304static inline
2305int netdev_get_num_tc(struct net_device *dev)
2306{
2307 return dev->num_tc;
2308}
2309
2310static inline void net_prefetch(void *p)
2311{
2312 prefetch(p);
2313#if L1_CACHE_BYTES < 128
2314 prefetch((u8 *)p + L1_CACHE_BYTES);
2315#endif
2316}
2317
2318static inline void net_prefetchw(void *p)
2319{
2320 prefetchw(p);
2321#if L1_CACHE_BYTES < 128
2322 prefetchw((u8 *)p + L1_CACHE_BYTES);
2323#endif
2324}
2325
2326void netdev_unbind_sb_channel(struct net_device *dev,
2327 struct net_device *sb_dev);
2328int netdev_bind_sb_channel_queue(struct net_device *dev,
2329 struct net_device *sb_dev,
2330 u8 tc, u16 count, u16 offset);
2331int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2332static inline int netdev_get_sb_channel(struct net_device *dev)
2333{
2334 return max_t(int, -dev->num_tc, 0);
2335}
2336
2337static inline
2338struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2339 unsigned int index)
2340{
2341 return &dev->_tx[index];
2342}
2343
2344static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2345 const struct sk_buff *skb)
2346{
2347 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2348}
2349
2350static inline void netdev_for_each_tx_queue(struct net_device *dev,
2351 void (*f)(struct net_device *,
2352 struct netdev_queue *,
2353 void *),
2354 void *arg)
2355{
2356 unsigned int i;
2357
2358 for (i = 0; i < dev->num_tx_queues; i++)
2359 f(dev, &dev->_tx[i], arg);
2360}
2361
2362#define netdev_lockdep_set_classes(dev) \
2363{ \
2364 static struct lock_class_key qdisc_tx_busylock_key; \
2365 static struct lock_class_key qdisc_xmit_lock_key; \
2366 static struct lock_class_key dev_addr_list_lock_key; \
2367 unsigned int i; \
2368 \
2369 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2370 lockdep_set_class(&(dev)->addr_list_lock, \
2371 &dev_addr_list_lock_key); \
2372 for (i = 0; i < (dev)->num_tx_queues; i++) \
2373 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2374 &qdisc_xmit_lock_key); \
2375}
2376
2377u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2378 struct net_device *sb_dev);
2379struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2380 struct sk_buff *skb,
2381 struct net_device *sb_dev);
2382
2383
2384
2385
2386static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2387{
2388 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2389}
2390
2391static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2392{
2393 if (dev->netdev_ops->ndo_set_rx_headroom)
2394 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2395}
2396
2397
2398static inline void netdev_reset_rx_headroom(struct net_device *dev)
2399{
2400 netdev_set_rx_headroom(dev, -1);
2401}
2402
2403static inline void *netdev_get_ml_priv(struct net_device *dev,
2404 enum netdev_ml_priv_type type)
2405{
2406 if (dev->ml_priv_type != type)
2407 return NULL;
2408
2409 return dev->ml_priv;
2410}
2411
2412static inline void netdev_set_ml_priv(struct net_device *dev,
2413 void *ml_priv,
2414 enum netdev_ml_priv_type type)
2415{
2416 WARN(dev->ml_priv_type && dev->ml_priv_type != type,
2417 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
2418 dev->ml_priv_type, type);
2419 WARN(!dev->ml_priv_type && dev->ml_priv,
2420 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
2421
2422 dev->ml_priv = ml_priv;
2423 dev->ml_priv_type = type;
2424}
2425
2426
2427
2428
2429static inline
2430struct net *dev_net(const struct net_device *dev)
2431{
2432 return read_pnet(&dev->nd_net);
2433}
2434
2435static inline
2436void dev_net_set(struct net_device *dev, struct net *net)
2437{
2438 write_pnet(&dev->nd_net, net);
2439}
2440
2441
2442
2443
2444
2445
2446
2447static inline void *netdev_priv(const struct net_device *dev)
2448{
2449 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2450}
2451
2452
2453
2454
2455#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2456
2457
2458
2459
2460
2461#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2462
2463
2464
2465
2466#define NAPI_POLL_WEIGHT 64
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2479 int (*poll)(struct napi_struct *, int), int weight);
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492static inline void netif_tx_napi_add(struct net_device *dev,
2493 struct napi_struct *napi,
2494 int (*poll)(struct napi_struct *, int),
2495 int weight)
2496{
2497 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2498 netif_napi_add(dev, napi, poll, weight);
2499}
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509void __netif_napi_del(struct napi_struct *napi);
2510
2511
2512
2513
2514
2515
2516
2517static inline void netif_napi_del(struct napi_struct *napi)
2518{
2519 __netif_napi_del(napi);
2520 synchronize_net();
2521}
2522
2523struct napi_gro_cb {
2524
2525 void *frag0;
2526
2527
2528 unsigned int frag0_len;
2529
2530
2531 int data_offset;
2532
2533
2534 u16 flush;
2535
2536
2537 u16 flush_id;
2538
2539
2540 u16 count;
2541
2542
2543 u16 gro_remcsum_start;
2544
2545
2546 unsigned long age;
2547
2548
2549 u16 proto;
2550
2551
2552 u8 same_flow:1;
2553
2554
2555 u8 encap_mark:1;
2556
2557
2558 u8 csum_valid:1;
2559
2560
2561 u8 csum_cnt:3;
2562
2563
2564 u8 free:2;
2565#define NAPI_GRO_FREE 1
2566#define NAPI_GRO_FREE_STOLEN_HEAD 2
2567
2568
2569 u8 is_ipv6:1;
2570
2571
2572 u8 is_fou:1;
2573
2574
2575 u8 is_atomic:1;
2576
2577
2578 u8 recursion_counter:4;
2579
2580
2581 u8 is_flist:1;
2582
2583
2584 __wsum csum;
2585
2586
2587 struct sk_buff *last;
2588};
2589
2590#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2591
2592#define GRO_RECURSION_LIMIT 15
2593static inline int gro_recursion_inc_test(struct sk_buff *skb)
2594{
2595 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2596}
2597
2598typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2599static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2600 struct list_head *head,
2601 struct sk_buff *skb)
2602{
2603 if (unlikely(gro_recursion_inc_test(skb))) {
2604 NAPI_GRO_CB(skb)->flush |= 1;
2605 return NULL;
2606 }
2607
2608 return cb(head, skb);
2609}
2610
2611typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2612 struct sk_buff *);
2613static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2614 struct sock *sk,
2615 struct list_head *head,
2616 struct sk_buff *skb)
2617{
2618 if (unlikely(gro_recursion_inc_test(skb))) {
2619 NAPI_GRO_CB(skb)->flush |= 1;
2620 return NULL;
2621 }
2622
2623 return cb(sk, head, skb);
2624}
2625
2626struct packet_type {
2627 __be16 type;
2628 bool ignore_outgoing;
2629 struct net_device *dev;
2630 int (*func) (struct sk_buff *,
2631 struct net_device *,
2632 struct packet_type *,
2633 struct net_device *);
2634 void (*list_func) (struct list_head *,
2635 struct packet_type *,
2636 struct net_device *);
2637 bool (*id_match)(struct packet_type *ptype,
2638 struct sock *sk);
2639 void *af_packet_priv;
2640 struct list_head list;
2641};
2642
2643struct offload_callbacks {
2644 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2645 netdev_features_t features);
2646 struct sk_buff *(*gro_receive)(struct list_head *head,
2647 struct sk_buff *skb);
2648 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2649};
2650
2651struct packet_offload {
2652 __be16 type;
2653 u16 priority;
2654 struct offload_callbacks callbacks;
2655 struct list_head list;
2656};
2657
2658
2659struct pcpu_sw_netstats {
2660 u64 rx_packets;
2661 u64 rx_bytes;
2662 u64 tx_packets;
2663 u64 tx_bytes;
2664 struct u64_stats_sync syncp;
2665} __aligned(4 * sizeof(u64));
2666
2667struct pcpu_lstats {
2668 u64_stats_t packets;
2669 u64_stats_t bytes;
2670 struct u64_stats_sync syncp;
2671} __aligned(2 * sizeof(u64));
2672
2673void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
2674
2675static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
2676{
2677 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2678
2679 u64_stats_update_begin(&tstats->syncp);
2680 tstats->rx_bytes += len;
2681 tstats->rx_packets++;
2682 u64_stats_update_end(&tstats->syncp);
2683}
2684
2685static inline void dev_sw_netstats_tx_add(struct net_device *dev,
2686 unsigned int packets,
2687 unsigned int len)
2688{
2689 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2690
2691 u64_stats_update_begin(&tstats->syncp);
2692 tstats->tx_bytes += len;
2693 tstats->tx_packets += packets;
2694 u64_stats_update_end(&tstats->syncp);
2695}
2696
2697static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
2698{
2699 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
2700
2701 u64_stats_update_begin(&lstats->syncp);
2702 u64_stats_add(&lstats->bytes, len);
2703 u64_stats_inc(&lstats->packets);
2704 u64_stats_update_end(&lstats->syncp);
2705}
2706
2707#define __netdev_alloc_pcpu_stats(type, gfp) \
2708({ \
2709 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2710 if (pcpu_stats) { \
2711 int __cpu; \
2712 for_each_possible_cpu(__cpu) { \
2713 typeof(type) *stat; \
2714 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2715 u64_stats_init(&stat->syncp); \
2716 } \
2717 } \
2718 pcpu_stats; \
2719})
2720
2721#define netdev_alloc_pcpu_stats(type) \
2722 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2723
2724#define devm_netdev_alloc_pcpu_stats(dev, type) \
2725({ \
2726 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
2727 if (pcpu_stats) { \
2728 int __cpu; \
2729 for_each_possible_cpu(__cpu) { \
2730 typeof(type) *stat; \
2731 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2732 u64_stats_init(&stat->syncp); \
2733 } \
2734 } \
2735 pcpu_stats; \
2736})
2737
2738enum netdev_lag_tx_type {
2739 NETDEV_LAG_TX_TYPE_UNKNOWN,
2740 NETDEV_LAG_TX_TYPE_RANDOM,
2741 NETDEV_LAG_TX_TYPE_BROADCAST,
2742 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2743 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2744 NETDEV_LAG_TX_TYPE_HASH,
2745};
2746
2747enum netdev_lag_hash {
2748 NETDEV_LAG_HASH_NONE,
2749 NETDEV_LAG_HASH_L2,
2750 NETDEV_LAG_HASH_L34,
2751 NETDEV_LAG_HASH_L23,
2752 NETDEV_LAG_HASH_E23,
2753 NETDEV_LAG_HASH_E34,
2754 NETDEV_LAG_HASH_VLAN_SRCMAC,
2755 NETDEV_LAG_HASH_UNKNOWN,
2756};
2757
2758struct netdev_lag_upper_info {
2759 enum netdev_lag_tx_type tx_type;
2760 enum netdev_lag_hash hash_type;
2761};
2762
2763struct netdev_lag_lower_state_info {
2764 u8 link_up : 1,
2765 tx_enabled : 1;
2766};
2767
2768#include <linux/notifier.h>
2769
2770
2771
2772
2773
2774enum netdev_cmd {
2775 NETDEV_UP = 1,
2776 NETDEV_DOWN,
2777 NETDEV_REBOOT,
2778
2779
2780
2781 NETDEV_CHANGE,
2782 NETDEV_REGISTER,
2783 NETDEV_UNREGISTER,
2784 NETDEV_CHANGEMTU,
2785 NETDEV_CHANGEADDR,
2786 NETDEV_PRE_CHANGEADDR,
2787 NETDEV_GOING_DOWN,
2788 NETDEV_CHANGENAME,
2789 NETDEV_FEAT_CHANGE,
2790 NETDEV_BONDING_FAILOVER,
2791 NETDEV_PRE_UP,
2792 NETDEV_PRE_TYPE_CHANGE,
2793 NETDEV_POST_TYPE_CHANGE,
2794 NETDEV_POST_INIT,
2795 NETDEV_RELEASE,
2796 NETDEV_NOTIFY_PEERS,
2797 NETDEV_JOIN,
2798 NETDEV_CHANGEUPPER,
2799 NETDEV_RESEND_IGMP,
2800 NETDEV_PRECHANGEMTU,
2801 NETDEV_CHANGEINFODATA,
2802 NETDEV_BONDING_INFO,
2803 NETDEV_PRECHANGEUPPER,
2804 NETDEV_CHANGELOWERSTATE,
2805 NETDEV_UDP_TUNNEL_PUSH_INFO,
2806 NETDEV_UDP_TUNNEL_DROP_INFO,
2807 NETDEV_CHANGE_TX_QUEUE_LEN,
2808 NETDEV_CVLAN_FILTER_PUSH_INFO,
2809 NETDEV_CVLAN_FILTER_DROP_INFO,
2810 NETDEV_SVLAN_FILTER_PUSH_INFO,
2811 NETDEV_SVLAN_FILTER_DROP_INFO,
2812};
2813const char *netdev_cmd_to_name(enum netdev_cmd cmd);
2814
2815int register_netdevice_notifier(struct notifier_block *nb);
2816int unregister_netdevice_notifier(struct notifier_block *nb);
2817int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
2818int unregister_netdevice_notifier_net(struct net *net,
2819 struct notifier_block *nb);
2820int register_netdevice_notifier_dev_net(struct net_device *dev,
2821 struct notifier_block *nb,
2822 struct netdev_net_notifier *nn);
2823int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2824 struct notifier_block *nb,
2825 struct netdev_net_notifier *nn);
2826
2827struct netdev_notifier_info {
2828 struct net_device *dev;
2829 struct netlink_ext_ack *extack;
2830};
2831
2832struct netdev_notifier_info_ext {
2833 struct netdev_notifier_info info;
2834 union {
2835 u32 mtu;
2836 } ext;
2837};
2838
2839struct netdev_notifier_change_info {
2840 struct netdev_notifier_info info;
2841 unsigned int flags_changed;
2842};
2843
2844struct netdev_notifier_changeupper_info {
2845 struct netdev_notifier_info info;
2846 struct net_device *upper_dev;
2847 bool master;
2848 bool linking;
2849 void *upper_info;
2850};
2851
2852struct netdev_notifier_changelowerstate_info {
2853 struct netdev_notifier_info info;
2854 void *lower_state_info;
2855};
2856
2857struct netdev_notifier_pre_changeaddr_info {
2858 struct netdev_notifier_info info;
2859 const unsigned char *dev_addr;
2860};
2861
2862static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2863 struct net_device *dev)
2864{
2865 info->dev = dev;
2866 info->extack = NULL;
2867}
2868
2869static inline struct net_device *
2870netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2871{
2872 return info->dev;
2873}
2874
2875static inline struct netlink_ext_ack *
2876netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2877{
2878 return info->extack;
2879}
2880
2881int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2882
2883
2884extern rwlock_t dev_base_lock;
2885
2886#define for_each_netdev(net, d) \
2887 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2888#define for_each_netdev_reverse(net, d) \
2889 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2890#define for_each_netdev_rcu(net, d) \
2891 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2892#define for_each_netdev_safe(net, d, n) \
2893 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2894#define for_each_netdev_continue(net, d) \
2895 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2896#define for_each_netdev_continue_reverse(net, d) \
2897 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
2898 dev_list)
2899#define for_each_netdev_continue_rcu(net, d) \
2900 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2901#define for_each_netdev_in_bond_rcu(bond, slave) \
2902 for_each_netdev_rcu(&init_net, slave) \
2903 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2904#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2905
2906static inline struct net_device *next_net_device(struct net_device *dev)
2907{
2908 struct list_head *lh;
2909 struct net *net;
2910
2911 net = dev_net(dev);
2912 lh = dev->dev_list.next;
2913 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2914}
2915
2916static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2917{
2918 struct list_head *lh;
2919 struct net *net;
2920
2921 net = dev_net(dev);
2922 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2923 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2924}
2925
2926static inline struct net_device *first_net_device(struct net *net)
2927{
2928 return list_empty(&net->dev_base_head) ? NULL :
2929 net_device_entry(net->dev_base_head.next);
2930}
2931
2932static inline struct net_device *first_net_device_rcu(struct net *net)
2933{
2934 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2935
2936 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2937}
2938
2939int netdev_boot_setup_check(struct net_device *dev);
2940struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2941 const char *hwaddr);
2942struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2943void dev_add_pack(struct packet_type *pt);
2944void dev_remove_pack(struct packet_type *pt);
2945void __dev_remove_pack(struct packet_type *pt);
2946void dev_add_offload(struct packet_offload *po);
2947void dev_remove_offload(struct packet_offload *po);
2948
2949int dev_get_iflink(const struct net_device *dev);
2950int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2951int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
2952 struct net_device_path_stack *stack);
2953struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2954 unsigned short mask);
2955struct net_device *dev_get_by_name(struct net *net, const char *name);
2956struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2957struct net_device *__dev_get_by_name(struct net *net, const char *name);
2958bool netdev_name_in_use(struct net *net, const char *name);
2959int dev_alloc_name(struct net_device *dev, const char *name);
2960int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
2961void dev_close(struct net_device *dev);
2962void dev_close_many(struct list_head *head, bool unlink);
2963void dev_disable_lro(struct net_device *dev);
2964int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2965u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2966 struct net_device *sb_dev);
2967u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2968 struct net_device *sb_dev);
2969
2970int dev_queue_xmit(struct sk_buff *skb);
2971int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2972int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2973
2974static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
2975{
2976 int ret;
2977
2978 ret = __dev_direct_xmit(skb, queue_id);
2979 if (!dev_xmit_complete(ret))
2980 kfree_skb(skb);
2981 return ret;
2982}
2983
2984int register_netdevice(struct net_device *dev);
2985void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2986void unregister_netdevice_many(struct list_head *head);
2987static inline void unregister_netdevice(struct net_device *dev)
2988{
2989 unregister_netdevice_queue(dev, NULL);
2990}
2991
2992int netdev_refcnt_read(const struct net_device *dev);
2993void free_netdev(struct net_device *dev);
2994void netdev_freemem(struct net_device *dev);
2995int init_dummy_netdev(struct net_device *dev);
2996
2997struct net_device *netdev_get_xmit_slave(struct net_device *dev,
2998 struct sk_buff *skb,
2999 bool all_slaves);
3000struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
3001 struct sock *sk);
3002struct net_device *dev_get_by_index(struct net *net, int ifindex);
3003struct net_device *__dev_get_by_index(struct net *net, int ifindex);
3004struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
3005struct net_device *dev_get_by_napi_id(unsigned int napi_id);
3006int netdev_get_name(struct net *net, char *name, int ifindex);
3007int dev_restart(struct net_device *dev);
3008int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
3009int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
3010
3011static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
3012{
3013 return NAPI_GRO_CB(skb)->data_offset;
3014}
3015
3016static inline unsigned int skb_gro_len(const struct sk_buff *skb)
3017{
3018 return skb->len - NAPI_GRO_CB(skb)->data_offset;
3019}
3020
3021static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
3022{
3023 NAPI_GRO_CB(skb)->data_offset += len;
3024}
3025
3026static inline void *skb_gro_header_fast(struct sk_buff *skb,
3027 unsigned int offset)
3028{
3029 return NAPI_GRO_CB(skb)->frag0 + offset;
3030}
3031
3032static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
3033{
3034 return NAPI_GRO_CB(skb)->frag0_len < hlen;
3035}
3036
3037static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
3038{
3039 NAPI_GRO_CB(skb)->frag0 = NULL;
3040 NAPI_GRO_CB(skb)->frag0_len = 0;
3041}
3042
3043static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
3044 unsigned int offset)
3045{
3046 if (!pskb_may_pull(skb, hlen))
3047 return NULL;
3048
3049 skb_gro_frag0_invalidate(skb);
3050 return skb->data + offset;
3051}
3052
3053static inline void *skb_gro_network_header(struct sk_buff *skb)
3054{
3055 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
3056 skb_network_offset(skb);
3057}
3058
3059static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
3060 const void *start, unsigned int len)
3061{
3062 if (NAPI_GRO_CB(skb)->csum_valid)
3063 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
3064 csum_partial(start, len, 0));
3065}
3066
3067
3068
3069
3070
3071
3072__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
3073
3074static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
3075{
3076 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
3077}
3078
3079static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
3080 bool zero_okay,
3081 __sum16 check)
3082{
3083 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
3084 skb_checksum_start_offset(skb) <
3085 skb_gro_offset(skb)) &&
3086 !skb_at_gro_remcsum_start(skb) &&
3087 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
3088 (!zero_okay || check));
3089}
3090
3091static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
3092 __wsum psum)
3093{
3094 if (NAPI_GRO_CB(skb)->csum_valid &&
3095 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
3096 return 0;
3097
3098 NAPI_GRO_CB(skb)->csum = psum;
3099
3100 return __skb_gro_checksum_complete(skb);
3101}
3102
3103static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
3104{
3105 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
3106
3107 NAPI_GRO_CB(skb)->csum_cnt--;
3108 } else {
3109
3110
3111
3112
3113 __skb_incr_checksum_unnecessary(skb);
3114 }
3115}
3116
3117#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
3118 compute_pseudo) \
3119({ \
3120 __sum16 __ret = 0; \
3121 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
3122 __ret = __skb_gro_checksum_validate_complete(skb, \
3123 compute_pseudo(skb, proto)); \
3124 if (!__ret) \
3125 skb_gro_incr_csum_unnecessary(skb); \
3126 __ret; \
3127})
3128
3129#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
3130 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
3131
3132#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
3133 compute_pseudo) \
3134 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
3135
3136#define skb_gro_checksum_simple_validate(skb) \
3137 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
3138
3139static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
3140{
3141 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
3142 !NAPI_GRO_CB(skb)->csum_valid);
3143}
3144
3145static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
3146 __wsum pseudo)
3147{
3148 NAPI_GRO_CB(skb)->csum = ~pseudo;
3149 NAPI_GRO_CB(skb)->csum_valid = 1;
3150}
3151
3152#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
3153do { \
3154 if (__skb_gro_checksum_convert_check(skb)) \
3155 __skb_gro_checksum_convert(skb, \
3156 compute_pseudo(skb, proto)); \
3157} while (0)
3158
3159struct gro_remcsum {
3160 int offset;
3161 __wsum delta;
3162};
3163
3164static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
3165{
3166 grc->offset = 0;
3167 grc->delta = 0;
3168}
3169
3170static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
3171 unsigned int off, size_t hdrlen,
3172 int start, int offset,
3173 struct gro_remcsum *grc,
3174 bool nopartial)
3175{
3176 __wsum delta;
3177 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
3178
3179 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
3180
3181 if (!nopartial) {
3182 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
3183 return ptr;
3184 }
3185
3186 ptr = skb_gro_header_fast(skb, off);
3187 if (skb_gro_header_hard(skb, off + plen)) {
3188 ptr = skb_gro_header_slow(skb, off + plen, off);
3189 if (!ptr)
3190 return NULL;
3191 }
3192
3193 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
3194 start, offset);
3195
3196
3197 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
3198
3199 grc->offset = off + hdrlen + offset;
3200 grc->delta = delta;
3201
3202 return ptr;
3203}
3204
3205static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
3206 struct gro_remcsum *grc)
3207{
3208 void *ptr;
3209 size_t plen = grc->offset + sizeof(u16);
3210
3211 if (!grc->delta)
3212 return;
3213
3214 ptr = skb_gro_header_fast(skb, grc->offset);
3215 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
3216 ptr = skb_gro_header_slow(skb, plen, grc->offset);
3217 if (!ptr)
3218 return;
3219 }
3220
3221 remcsum_unadjust((__sum16 *)ptr, grc->delta);
3222}
3223
3224#ifdef CONFIG_XFRM_OFFLOAD
3225static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3226{
3227 if (PTR_ERR(pp) != -EINPROGRESS)
3228 NAPI_GRO_CB(skb)->flush |= flush;
3229}
3230static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3231 struct sk_buff *pp,
3232 int flush,
3233 struct gro_remcsum *grc)
3234{
3235 if (PTR_ERR(pp) != -EINPROGRESS) {
3236 NAPI_GRO_CB(skb)->flush |= flush;
3237 skb_gro_remcsum_cleanup(skb, grc);
3238 skb->remcsum_offload = 0;
3239 }
3240}
3241#else
3242static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3243{
3244 NAPI_GRO_CB(skb)->flush |= flush;
3245}
3246static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3247 struct sk_buff *pp,
3248 int flush,
3249 struct gro_remcsum *grc)
3250{
3251 NAPI_GRO_CB(skb)->flush |= flush;
3252 skb_gro_remcsum_cleanup(skb, grc);
3253 skb->remcsum_offload = 0;
3254}
3255#endif
3256
3257static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3258 unsigned short type,
3259 const void *daddr, const void *saddr,
3260 unsigned int len)
3261{
3262 if (!dev->header_ops || !dev->header_ops->create)
3263 return 0;
3264
3265 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3266}
3267
3268static inline int dev_parse_header(const struct sk_buff *skb,
3269 unsigned char *haddr)
3270{
3271 const struct net_device *dev = skb->dev;
3272
3273 if (!dev->header_ops || !dev->header_ops->parse)
3274 return 0;
3275 return dev->header_ops->parse(skb, haddr);
3276}
3277
3278static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3279{
3280 const struct net_device *dev = skb->dev;
3281
3282 if (!dev->header_ops || !dev->header_ops->parse_protocol)
3283 return 0;
3284 return dev->header_ops->parse_protocol(skb);
3285}
3286
3287
3288static inline bool dev_validate_header(const struct net_device *dev,
3289 char *ll_header, int len)
3290{
3291 if (likely(len >= dev->hard_header_len))
3292 return true;
3293 if (len < dev->min_header_len)
3294 return false;
3295
3296 if (capable(CAP_SYS_RAWIO)) {
3297 memset(ll_header + len, 0, dev->hard_header_len - len);
3298 return true;
3299 }
3300
3301 if (dev->header_ops && dev->header_ops->validate)
3302 return dev->header_ops->validate(ll_header, len);
3303
3304 return false;
3305}
3306
3307static inline bool dev_has_header(const struct net_device *dev)
3308{
3309 return dev->header_ops && dev->header_ops->create;
3310}
3311
3312#ifdef CONFIG_NET_FLOW_LIMIT
3313#define FLOW_LIMIT_HISTORY (1 << 7)
3314struct sd_flow_limit {
3315 u64 count;
3316 unsigned int num_buckets;
3317 unsigned int history_head;
3318 u16 history[FLOW_LIMIT_HISTORY];
3319 u8 buckets[];
3320};
3321
3322extern int netdev_flow_limit_table_len;
3323#endif
3324
3325
3326
3327
3328struct softnet_data {
3329 struct list_head poll_list;
3330 struct sk_buff_head process_queue;
3331
3332
3333 unsigned int processed;
3334 unsigned int time_squeeze;
3335 unsigned int received_rps;
3336#ifdef CONFIG_RPS
3337 struct softnet_data *rps_ipi_list;
3338#endif
3339#ifdef CONFIG_NET_FLOW_LIMIT
3340 struct sd_flow_limit __rcu *flow_limit;
3341#endif
3342 struct Qdisc *output_queue;
3343 struct Qdisc **output_queue_tailp;
3344 struct sk_buff *completion_queue;
3345#ifdef CONFIG_XFRM_OFFLOAD
3346 struct sk_buff_head xfrm_backlog;
3347#endif
3348
3349 struct {
3350 u16 recursion;
3351 u8 more;
3352 } xmit;
3353#ifdef CONFIG_RPS
3354
3355
3356
3357 unsigned int input_queue_head ____cacheline_aligned_in_smp;
3358
3359
3360 call_single_data_t csd ____cacheline_aligned_in_smp;
3361 struct softnet_data *rps_ipi_next;
3362 unsigned int cpu;
3363 unsigned int input_queue_tail;
3364#endif
3365 unsigned int dropped;
3366 struct sk_buff_head input_pkt_queue;
3367 struct napi_struct backlog;
3368
3369};
3370
3371static inline void input_queue_head_incr(struct softnet_data *sd)
3372{
3373#ifdef CONFIG_RPS
3374 sd->input_queue_head++;
3375#endif
3376}
3377
3378static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3379 unsigned int *qtail)
3380{
3381#ifdef CONFIG_RPS
3382 *qtail = ++sd->input_queue_tail;
3383#endif
3384}
3385
3386DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3387
3388static inline int dev_recursion_level(void)
3389{
3390 return this_cpu_read(softnet_data.xmit.recursion);
3391}
3392
3393#define XMIT_RECURSION_LIMIT 8
3394static inline bool dev_xmit_recursion(void)
3395{
3396 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3397 XMIT_RECURSION_LIMIT);
3398}
3399
3400static inline void dev_xmit_recursion_inc(void)
3401{
3402 __this_cpu_inc(softnet_data.xmit.recursion);
3403}
3404
3405static inline void dev_xmit_recursion_dec(void)
3406{
3407 __this_cpu_dec(softnet_data.xmit.recursion);
3408}
3409
3410void __netif_schedule(struct Qdisc *q);
3411void netif_schedule_queue(struct netdev_queue *txq);
3412
3413static inline void netif_tx_schedule_all(struct net_device *dev)
3414{
3415 unsigned int i;
3416
3417 for (i = 0; i < dev->num_tx_queues; i++)
3418 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3419}
3420
3421static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3422{
3423 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3424}
3425
3426
3427
3428
3429
3430
3431
3432static inline void netif_start_queue(struct net_device *dev)
3433{
3434 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3435}
3436
3437static inline void netif_tx_start_all_queues(struct net_device *dev)
3438{
3439 unsigned int i;
3440
3441 for (i = 0; i < dev->num_tx_queues; i++) {
3442 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3443 netif_tx_start_queue(txq);
3444 }
3445}
3446
3447void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3448
3449
3450
3451
3452
3453
3454
3455
3456static inline void netif_wake_queue(struct net_device *dev)
3457{
3458 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3459}
3460
3461static inline void netif_tx_wake_all_queues(struct net_device *dev)
3462{
3463 unsigned int i;
3464
3465 for (i = 0; i < dev->num_tx_queues; i++) {
3466 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3467 netif_tx_wake_queue(txq);
3468 }
3469}
3470
3471static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3472{
3473 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3474}
3475
3476
3477
3478
3479
3480
3481
3482
3483static inline void netif_stop_queue(struct net_device *dev)
3484{
3485 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3486}
3487
3488void netif_tx_stop_all_queues(struct net_device *dev);
3489
3490static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3491{
3492 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3493}
3494
3495
3496
3497
3498
3499
3500
3501static inline bool netif_queue_stopped(const struct net_device *dev)
3502{
3503 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3504}
3505
3506static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3507{
3508 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3509}
3510
3511static inline bool
3512netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3513{
3514 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3515}
3516
3517static inline bool
3518netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3519{
3520 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3521}
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
3534 unsigned int min_limit)
3535{
3536#ifdef CONFIG_BQL
3537 dev_queue->dql.min_limit = min_limit;
3538#endif
3539}
3540
3541
3542
3543
3544
3545
3546
3547
3548static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3549{
3550#ifdef CONFIG_BQL
3551 prefetchw(&dev_queue->dql.num_queued);
3552#endif
3553}
3554
3555
3556
3557
3558
3559
3560
3561
3562static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3563{
3564#ifdef CONFIG_BQL
3565 prefetchw(&dev_queue->dql.limit);
3566#endif
3567}
3568
3569static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3570 unsigned int bytes)
3571{
3572#ifdef CONFIG_BQL
3573 dql_queued(&dev_queue->dql, bytes);
3574
3575 if (likely(dql_avail(&dev_queue->dql) >= 0))
3576 return;
3577
3578 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3579
3580
3581
3582
3583
3584
3585 smp_mb();
3586
3587
3588 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3589 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3590#endif
3591}
3592
3593
3594
3595
3596
3597
3598
3599static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3600 unsigned int bytes,
3601 bool xmit_more)
3602{
3603 if (xmit_more) {
3604#ifdef CONFIG_BQL
3605 dql_queued(&dev_queue->dql, bytes);
3606#endif
3607 return netif_tx_queue_stopped(dev_queue);
3608 }
3609 netdev_tx_sent_queue(dev_queue, bytes);
3610 return true;
3611}
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3623{
3624 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3625}
3626
3627static inline bool __netdev_sent_queue(struct net_device *dev,
3628 unsigned int bytes,
3629 bool xmit_more)
3630{
3631 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3632 xmit_more);
3633}
3634
3635static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3636 unsigned int pkts, unsigned int bytes)
3637{
3638#ifdef CONFIG_BQL
3639 if (unlikely(!bytes))
3640 return;
3641
3642 dql_completed(&dev_queue->dql, bytes);
3643
3644
3645
3646
3647
3648
3649 smp_mb();
3650
3651 if (unlikely(dql_avail(&dev_queue->dql) < 0))
3652 return;
3653
3654 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3655 netif_schedule_queue(dev_queue);
3656#endif
3657}
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669static inline void netdev_completed_queue(struct net_device *dev,
3670 unsigned int pkts, unsigned int bytes)
3671{
3672 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3673}
3674
3675static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3676{
3677#ifdef CONFIG_BQL
3678 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3679 dql_reset(&q->dql);
3680#endif
3681}
3682
3683
3684
3685
3686
3687
3688
3689
3690static inline void netdev_reset_queue(struct net_device *dev_queue)
3691{
3692 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3693}
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3704{
3705 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3706 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3707 dev->name, queue_index,
3708 dev->real_num_tx_queues);
3709 return 0;
3710 }
3711
3712 return queue_index;
3713}
3714
3715
3716
3717
3718
3719
3720
3721static inline bool netif_running(const struct net_device *dev)
3722{
3723 return test_bit(__LINK_STATE_START, &dev->state);
3724}
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3741{
3742 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3743
3744 netif_tx_start_queue(txq);
3745}
3746
3747
3748
3749
3750
3751
3752
3753
3754static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3755{
3756 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3757 netif_tx_stop_queue(txq);
3758}
3759
3760
3761
3762
3763
3764
3765
3766
3767static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3768 u16 queue_index)
3769{
3770 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3771
3772 return netif_tx_queue_stopped(txq);
3773}
3774
3775
3776
3777
3778
3779
3780
3781
3782static inline bool netif_subqueue_stopped(const struct net_device *dev,
3783 struct sk_buff *skb)
3784{
3785 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3786}
3787
3788
3789
3790
3791
3792
3793
3794
3795static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3796{
3797 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3798
3799 netif_tx_wake_queue(txq);
3800}
3801
3802#ifdef CONFIG_XPS
3803int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3804 u16 index);
3805int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3806 u16 index, enum xps_map_type type);
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816static inline bool netif_attr_test_mask(unsigned long j,
3817 const unsigned long *mask,
3818 unsigned int nr_bits)
3819{
3820 cpu_max_bits_warn(j, nr_bits);
3821 return test_bit(j, mask);
3822}
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832static inline bool netif_attr_test_online(unsigned long j,
3833 const unsigned long *online_mask,
3834 unsigned int nr_bits)
3835{
3836 cpu_max_bits_warn(j, nr_bits);
3837
3838 if (online_mask)
3839 return test_bit(j, online_mask);
3840
3841 return (j < nr_bits);
3842}
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3853 unsigned int nr_bits)
3854{
3855
3856 if (n != -1)
3857 cpu_max_bits_warn(n, nr_bits);
3858
3859 if (srcp)
3860 return find_next_bit(srcp, nr_bits, n + 1);
3861
3862 return n + 1;
3863}
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3875 const unsigned long *src2p,
3876 unsigned int nr_bits)
3877{
3878
3879 if (n != -1)
3880 cpu_max_bits_warn(n, nr_bits);
3881
3882 if (src1p && src2p)
3883 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3884 else if (src1p)
3885 return find_next_bit(src1p, nr_bits, n + 1);
3886 else if (src2p)
3887 return find_next_bit(src2p, nr_bits, n + 1);
3888
3889 return n + 1;
3890}
3891#else
3892static inline int netif_set_xps_queue(struct net_device *dev,
3893 const struct cpumask *mask,
3894 u16 index)
3895{
3896 return 0;
3897}
3898
3899static inline int __netif_set_xps_queue(struct net_device *dev,
3900 const unsigned long *mask,
3901 u16 index, enum xps_map_type type)
3902{
3903 return 0;
3904}
3905#endif
3906
3907
3908
3909
3910
3911
3912
3913static inline bool netif_is_multiqueue(const struct net_device *dev)
3914{
3915 return dev->num_tx_queues > 1;
3916}
3917
3918int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3919
3920#ifdef CONFIG_SYSFS
3921int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3922#else
3923static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3924 unsigned int rxqs)
3925{
3926 dev->real_num_rx_queues = rxqs;
3927 return 0;
3928}
3929#endif
3930int netif_set_real_num_queues(struct net_device *dev,
3931 unsigned int txq, unsigned int rxq);
3932
3933static inline struct netdev_rx_queue *
3934__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3935{
3936 return dev->_rx + rxq;
3937}
3938
3939#ifdef CONFIG_SYSFS
3940static inline unsigned int get_netdev_rx_queue_index(
3941 struct netdev_rx_queue *queue)
3942{
3943 struct net_device *dev = queue->dev;
3944 int index = queue - dev->_rx;
3945
3946 BUG_ON(index >= dev->num_rx_queues);
3947 return index;
3948}
3949#endif
3950
3951#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3952int netif_get_num_default_rss_queues(void);
3953
3954enum skb_free_reason {
3955 SKB_REASON_CONSUMED,
3956 SKB_REASON_DROPPED,
3957};
3958
3959void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3960void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3982{
3983 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3984}
3985
3986static inline void dev_consume_skb_irq(struct sk_buff *skb)
3987{
3988 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3989}
3990
3991static inline void dev_kfree_skb_any(struct sk_buff *skb)
3992{
3993 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3994}
3995
3996static inline void dev_consume_skb_any(struct sk_buff *skb)
3997{
3998 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3999}
4000
4001u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4002 struct bpf_prog *xdp_prog);
4003void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
4004int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
4005int netif_rx(struct sk_buff *skb);
4006int netif_rx_ni(struct sk_buff *skb);
4007int netif_rx_any_context(struct sk_buff *skb);
4008int netif_receive_skb(struct sk_buff *skb);
4009int netif_receive_skb_core(struct sk_buff *skb);
4010void netif_receive_skb_list(struct list_head *head);
4011gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
4012void napi_gro_flush(struct napi_struct *napi, bool flush_old);
4013struct sk_buff *napi_get_frags(struct napi_struct *napi);
4014gro_result_t napi_gro_frags(struct napi_struct *napi);
4015struct packet_offload *gro_find_receive_by_type(__be16 type);
4016struct packet_offload *gro_find_complete_by_type(__be16 type);
4017
4018static inline void napi_free_frags(struct napi_struct *napi)
4019{
4020 kfree_skb(napi->skb);
4021 napi->skb = NULL;
4022}
4023
4024bool netdev_is_rx_handler_busy(struct net_device *dev);
4025int netdev_rx_handler_register(struct net_device *dev,
4026 rx_handler_func_t *rx_handler,
4027 void *rx_handler_data);
4028void netdev_rx_handler_unregister(struct net_device *dev);
4029
4030bool dev_valid_name(const char *name);
4031static inline bool is_socket_ioctl_cmd(unsigned int cmd)
4032{
4033 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
4034}
4035int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
4036int put_user_ifreq(struct ifreq *ifr, void __user *arg);
4037int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
4038 void __user *data, bool *need_copyout);
4039int dev_ifconf(struct net *net, struct ifconf __user *ifc);
4040int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
4041unsigned int dev_get_flags(const struct net_device *);
4042int __dev_change_flags(struct net_device *dev, unsigned int flags,
4043 struct netlink_ext_ack *extack);
4044int dev_change_flags(struct net_device *dev, unsigned int flags,
4045 struct netlink_ext_ack *extack);
4046void __dev_notify_flags(struct net_device *, unsigned int old_flags,
4047 unsigned int gchanges);
4048int dev_change_name(struct net_device *, const char *);
4049int dev_set_alias(struct net_device *, const char *, size_t);
4050int dev_get_alias(const struct net_device *, char *, size_t);
4051int __dev_change_net_namespace(struct net_device *dev, struct net *net,
4052 const char *pat, int new_ifindex);
4053static inline
4054int dev_change_net_namespace(struct net_device *dev, struct net *net,
4055 const char *pat)
4056{
4057 return __dev_change_net_namespace(dev, net, pat, 0);
4058}
4059int __dev_set_mtu(struct net_device *, int);
4060int dev_validate_mtu(struct net_device *dev, int mtu,
4061 struct netlink_ext_ack *extack);
4062int dev_set_mtu_ext(struct net_device *dev, int mtu,
4063 struct netlink_ext_ack *extack);
4064int dev_set_mtu(struct net_device *, int);
4065int dev_change_tx_queue_len(struct net_device *, unsigned long);
4066void dev_set_group(struct net_device *, int);
4067int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
4068 struct netlink_ext_ack *extack);
4069int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
4070 struct netlink_ext_ack *extack);
4071int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
4072 struct netlink_ext_ack *extack);
4073int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
4074int dev_change_carrier(struct net_device *, bool new_carrier);
4075int dev_get_phys_port_id(struct net_device *dev,
4076 struct netdev_phys_item_id *ppid);
4077int dev_get_phys_port_name(struct net_device *dev,
4078 char *name, size_t len);
4079int dev_get_port_parent_id(struct net_device *dev,
4080 struct netdev_phys_item_id *ppid, bool recurse);
4081bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
4082int dev_change_proto_down(struct net_device *dev, bool proto_down);
4083int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
4084void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
4085 u32 value);
4086struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
4087struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
4088 struct netdev_queue *txq, int *ret);
4089
4090typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
4091int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
4092 int fd, int expected_fd, u32 flags);
4093int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
4094u8 dev_xdp_prog_count(struct net_device *dev);
4095u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
4096
4097int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4098int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4099int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
4100bool is_skb_forwardable(const struct net_device *dev,
4101 const struct sk_buff *skb);
4102
4103static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
4104 const struct sk_buff *skb,
4105 const bool check_mtu)
4106{
4107 const u32 vlan_hdr_len = 4;
4108 unsigned int len;
4109
4110 if (!(dev->flags & IFF_UP))
4111 return false;
4112
4113 if (!check_mtu)
4114 return true;
4115
4116 len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
4117 if (skb->len <= len)
4118 return true;
4119
4120
4121
4122
4123 if (skb_is_gso(skb))
4124 return true;
4125
4126 return false;
4127}
4128
4129static __always_inline int ____dev_forward_skb(struct net_device *dev,
4130 struct sk_buff *skb,
4131 const bool check_mtu)
4132{
4133 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
4134 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
4135 atomic_long_inc(&dev->rx_dropped);
4136 kfree_skb(skb);
4137 return NET_RX_DROP;
4138 }
4139
4140 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
4141 skb->priority = 0;
4142 return 0;
4143}
4144
4145bool dev_nit_active(struct net_device *dev);
4146void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
4147
4148extern int netdev_budget;
4149extern unsigned int netdev_budget_usecs;
4150
4151
4152void netdev_run_todo(void);
4153
4154
4155
4156
4157
4158
4159
4160static inline void dev_put(struct net_device *dev)
4161{
4162 if (dev) {
4163#ifdef CONFIG_PCPU_DEV_REFCNT
4164 this_cpu_dec(*dev->pcpu_refcnt);
4165#else
4166 refcount_dec(&dev->dev_refcnt);
4167#endif
4168 }
4169}
4170
4171
4172
4173
4174
4175
4176
4177static inline void dev_hold(struct net_device *dev)
4178{
4179 if (dev) {
4180#ifdef CONFIG_PCPU_DEV_REFCNT
4181 this_cpu_inc(*dev->pcpu_refcnt);
4182#else
4183 refcount_inc(&dev->dev_refcnt);
4184#endif
4185 }
4186}
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197void linkwatch_init_dev(struct net_device *dev);
4198void linkwatch_fire_event(struct net_device *dev);
4199void linkwatch_forget_dev(struct net_device *dev);
4200
4201
4202
4203
4204
4205
4206
4207static inline bool netif_carrier_ok(const struct net_device *dev)
4208{
4209 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
4210}
4211
4212unsigned long dev_trans_start(struct net_device *dev);
4213
4214void __netdev_watchdog_up(struct net_device *dev);
4215
4216void netif_carrier_on(struct net_device *dev);
4217void netif_carrier_off(struct net_device *dev);
4218void netif_carrier_event(struct net_device *dev);
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232static inline void netif_dormant_on(struct net_device *dev)
4233{
4234 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
4235 linkwatch_fire_event(dev);
4236}
4237
4238
4239
4240
4241
4242
4243
4244static inline void netif_dormant_off(struct net_device *dev)
4245{
4246 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
4247 linkwatch_fire_event(dev);
4248}
4249
4250
4251
4252
4253
4254
4255
4256static inline bool netif_dormant(const struct net_device *dev)
4257{
4258 return test_bit(__LINK_STATE_DORMANT, &dev->state);
4259}
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272static inline void netif_testing_on(struct net_device *dev)
4273{
4274 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4275 linkwatch_fire_event(dev);
4276}
4277
4278
4279
4280
4281
4282
4283
4284static inline void netif_testing_off(struct net_device *dev)
4285{
4286 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4287 linkwatch_fire_event(dev);
4288}
4289
4290
4291
4292
4293
4294
4295
4296static inline bool netif_testing(const struct net_device *dev)
4297{
4298 return test_bit(__LINK_STATE_TESTING, &dev->state);
4299}
4300
4301
4302
4303
4304
4305
4306
4307
4308static inline bool netif_oper_up(const struct net_device *dev)
4309{
4310 return (dev->operstate == IF_OPER_UP ||
4311 dev->operstate == IF_OPER_UNKNOWN );
4312}
4313
4314
4315
4316
4317
4318
4319
4320static inline bool netif_device_present(const struct net_device *dev)
4321{
4322 return test_bit(__LINK_STATE_PRESENT, &dev->state);
4323}
4324
4325void netif_device_detach(struct net_device *dev);
4326
4327void netif_device_attach(struct net_device *dev);
4328
4329
4330
4331
4332
4333enum {
4334 NETIF_MSG_DRV_BIT,
4335 NETIF_MSG_PROBE_BIT,
4336 NETIF_MSG_LINK_BIT,
4337 NETIF_MSG_TIMER_BIT,
4338 NETIF_MSG_IFDOWN_BIT,
4339 NETIF_MSG_IFUP_BIT,
4340 NETIF_MSG_RX_ERR_BIT,
4341 NETIF_MSG_TX_ERR_BIT,
4342 NETIF_MSG_TX_QUEUED_BIT,
4343 NETIF_MSG_INTR_BIT,
4344 NETIF_MSG_TX_DONE_BIT,
4345 NETIF_MSG_RX_STATUS_BIT,
4346 NETIF_MSG_PKTDATA_BIT,
4347 NETIF_MSG_HW_BIT,
4348 NETIF_MSG_WOL_BIT,
4349
4350
4351
4352
4353 NETIF_MSG_CLASS_COUNT,
4354};
4355
4356static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4357
4358#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
4359#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4360
4361#define NETIF_MSG_DRV __NETIF_MSG(DRV)
4362#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
4363#define NETIF_MSG_LINK __NETIF_MSG(LINK)
4364#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
4365#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
4366#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
4367#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
4368#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
4369#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
4370#define NETIF_MSG_INTR __NETIF_MSG(INTR)
4371#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
4372#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
4373#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
4374#define NETIF_MSG_HW __NETIF_MSG(HW)
4375#define NETIF_MSG_WOL __NETIF_MSG(WOL)
4376
4377#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4378#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4379#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4380#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4381#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4382#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4383#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4384#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4385#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4386#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4387#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4388#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4389#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4390#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4391#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4392
4393static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4394{
4395
4396 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4397 return default_msg_enable_bits;
4398 if (debug_value == 0)
4399 return 0;
4400
4401 return (1U << debug_value) - 1;
4402}
4403
4404static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4405{
4406 spin_lock(&txq->_xmit_lock);
4407
4408 WRITE_ONCE(txq->xmit_lock_owner, cpu);
4409}
4410
4411static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4412{
4413 __acquire(&txq->_xmit_lock);
4414 return true;
4415}
4416
4417static inline void __netif_tx_release(struct netdev_queue *txq)
4418{
4419 __release(&txq->_xmit_lock);
4420}
4421
4422static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4423{
4424 spin_lock_bh(&txq->_xmit_lock);
4425
4426 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4427}
4428
4429static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4430{
4431 bool ok = spin_trylock(&txq->_xmit_lock);
4432
4433 if (likely(ok)) {
4434
4435 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4436 }
4437 return ok;
4438}
4439
4440static inline void __netif_tx_unlock(struct netdev_queue *txq)
4441{
4442
4443 WRITE_ONCE(txq->xmit_lock_owner, -1);
4444 spin_unlock(&txq->_xmit_lock);
4445}
4446
4447static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4448{
4449
4450 WRITE_ONCE(txq->xmit_lock_owner, -1);
4451 spin_unlock_bh(&txq->_xmit_lock);
4452}
4453
4454static inline void txq_trans_update(struct netdev_queue *txq)
4455{
4456 if (txq->xmit_lock_owner != -1)
4457 txq->trans_start = jiffies;
4458}
4459
4460
4461static inline void netif_trans_update(struct net_device *dev)
4462{
4463 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4464
4465 if (txq->trans_start != jiffies)
4466 txq->trans_start = jiffies;
4467}
4468
4469
4470
4471
4472
4473
4474
4475static inline void netif_tx_lock(struct net_device *dev)
4476{
4477 unsigned int i;
4478 int cpu;
4479
4480 spin_lock(&dev->tx_global_lock);
4481 cpu = smp_processor_id();
4482 for (i = 0; i < dev->num_tx_queues; i++) {
4483 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4484
4485
4486
4487
4488
4489
4490
4491 __netif_tx_lock(txq, cpu);
4492 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
4493 __netif_tx_unlock(txq);
4494 }
4495}
4496
4497static inline void netif_tx_lock_bh(struct net_device *dev)
4498{
4499 local_bh_disable();
4500 netif_tx_lock(dev);
4501}
4502
4503static inline void netif_tx_unlock(struct net_device *dev)
4504{
4505 unsigned int i;
4506
4507 for (i = 0; i < dev->num_tx_queues; i++) {
4508 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4509
4510
4511
4512
4513
4514 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
4515 netif_schedule_queue(txq);
4516 }
4517 spin_unlock(&dev->tx_global_lock);
4518}
4519
4520static inline void netif_tx_unlock_bh(struct net_device *dev)
4521{
4522 netif_tx_unlock(dev);
4523 local_bh_enable();
4524}
4525
4526#define HARD_TX_LOCK(dev, txq, cpu) { \
4527 if ((dev->features & NETIF_F_LLTX) == 0) { \
4528 __netif_tx_lock(txq, cpu); \
4529 } else { \
4530 __netif_tx_acquire(txq); \
4531 } \
4532}
4533
4534#define HARD_TX_TRYLOCK(dev, txq) \
4535 (((dev->features & NETIF_F_LLTX) == 0) ? \
4536 __netif_tx_trylock(txq) : \
4537 __netif_tx_acquire(txq))
4538
4539#define HARD_TX_UNLOCK(dev, txq) { \
4540 if ((dev->features & NETIF_F_LLTX) == 0) { \
4541 __netif_tx_unlock(txq); \
4542 } else { \
4543 __netif_tx_release(txq); \
4544 } \
4545}
4546
4547static inline void netif_tx_disable(struct net_device *dev)
4548{
4549 unsigned int i;
4550 int cpu;
4551
4552 local_bh_disable();
4553 cpu = smp_processor_id();
4554 spin_lock(&dev->tx_global_lock);
4555 for (i = 0; i < dev->num_tx_queues; i++) {
4556 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4557
4558 __netif_tx_lock(txq, cpu);
4559 netif_tx_stop_queue(txq);
4560 __netif_tx_unlock(txq);
4561 }
4562 spin_unlock(&dev->tx_global_lock);
4563 local_bh_enable();
4564}
4565
4566static inline void netif_addr_lock(struct net_device *dev)
4567{
4568 unsigned char nest_level = 0;
4569
4570#ifdef CONFIG_LOCKDEP
4571 nest_level = dev->nested_level;
4572#endif
4573 spin_lock_nested(&dev->addr_list_lock, nest_level);
4574}
4575
4576static inline void netif_addr_lock_bh(struct net_device *dev)
4577{
4578 unsigned char nest_level = 0;
4579
4580#ifdef CONFIG_LOCKDEP
4581 nest_level = dev->nested_level;
4582#endif
4583 local_bh_disable();
4584 spin_lock_nested(&dev->addr_list_lock, nest_level);
4585}
4586
4587static inline void netif_addr_unlock(struct net_device *dev)
4588{
4589 spin_unlock(&dev->addr_list_lock);
4590}
4591
4592static inline void netif_addr_unlock_bh(struct net_device *dev)
4593{
4594 spin_unlock_bh(&dev->addr_list_lock);
4595}
4596
4597
4598
4599
4600
4601#define for_each_dev_addr(dev, ha) \
4602 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4603
4604
4605
4606void ether_setup(struct net_device *dev);
4607
4608
4609struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4610 unsigned char name_assign_type,
4611 void (*setup)(struct net_device *),
4612 unsigned int txqs, unsigned int rxqs);
4613#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4614 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4615
4616#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4617 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4618 count)
4619
4620int register_netdev(struct net_device *dev);
4621void unregister_netdev(struct net_device *dev);
4622
4623int devm_register_netdev(struct device *dev, struct net_device *ndev);
4624
4625
4626int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4627 struct netdev_hw_addr_list *from_list, int addr_len);
4628void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4629 struct netdev_hw_addr_list *from_list, int addr_len);
4630int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4631 struct net_device *dev,
4632 int (*sync)(struct net_device *, const unsigned char *),
4633 int (*unsync)(struct net_device *,
4634 const unsigned char *));
4635int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4636 struct net_device *dev,
4637 int (*sync)(struct net_device *,
4638 const unsigned char *, int),
4639 int (*unsync)(struct net_device *,
4640 const unsigned char *, int));
4641void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4642 struct net_device *dev,
4643 int (*unsync)(struct net_device *,
4644 const unsigned char *, int));
4645void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4646 struct net_device *dev,
4647 int (*unsync)(struct net_device *,
4648 const unsigned char *));
4649void __hw_addr_init(struct netdev_hw_addr_list *list);
4650
4651
4652static inline void
4653__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
4654{
4655 memcpy(dev->dev_addr, addr, len);
4656}
4657
4658static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
4659{
4660 __dev_addr_set(dev, addr, dev->addr_len);
4661}
4662
4663static inline void
4664dev_addr_mod(struct net_device *dev, unsigned int offset,
4665 const void *addr, size_t len)
4666{
4667 memcpy(&dev->dev_addr[offset], addr, len);
4668}
4669
4670int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4671 unsigned char addr_type);
4672int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4673 unsigned char addr_type);
4674void dev_addr_flush(struct net_device *dev);
4675int dev_addr_init(struct net_device *dev);
4676
4677
4678int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4679int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4680int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4681int dev_uc_sync(struct net_device *to, struct net_device *from);
4682int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4683void dev_uc_unsync(struct net_device *to, struct net_device *from);
4684void dev_uc_flush(struct net_device *dev);
4685void dev_uc_init(struct net_device *dev);
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696static inline int __dev_uc_sync(struct net_device *dev,
4697 int (*sync)(struct net_device *,
4698 const unsigned char *),
4699 int (*unsync)(struct net_device *,
4700 const unsigned char *))
4701{
4702 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4703}
4704
4705
4706
4707
4708
4709
4710
4711
4712static inline void __dev_uc_unsync(struct net_device *dev,
4713 int (*unsync)(struct net_device *,
4714 const unsigned char *))
4715{
4716 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4717}
4718
4719
4720int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4721int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4722int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4723int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4724int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4725int dev_mc_sync(struct net_device *to, struct net_device *from);
4726int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4727void dev_mc_unsync(struct net_device *to, struct net_device *from);
4728void dev_mc_flush(struct net_device *dev);
4729void dev_mc_init(struct net_device *dev);
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740static inline int __dev_mc_sync(struct net_device *dev,
4741 int (*sync)(struct net_device *,
4742 const unsigned char *),
4743 int (*unsync)(struct net_device *,
4744 const unsigned char *))
4745{
4746 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4747}
4748
4749
4750
4751
4752
4753
4754
4755
4756static inline void __dev_mc_unsync(struct net_device *dev,
4757 int (*unsync)(struct net_device *,
4758 const unsigned char *))
4759{
4760 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4761}
4762
4763
4764void dev_set_rx_mode(struct net_device *dev);
4765void __dev_set_rx_mode(struct net_device *dev);
4766int dev_set_promiscuity(struct net_device *dev, int inc);
4767int dev_set_allmulti(struct net_device *dev, int inc);
4768void netdev_state_change(struct net_device *dev);
4769void __netdev_notify_peers(struct net_device *dev);
4770void netdev_notify_peers(struct net_device *dev);
4771void netdev_features_change(struct net_device *dev);
4772
4773void dev_load(struct net *net, const char *name);
4774struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4775 struct rtnl_link_stats64 *storage);
4776void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4777 const struct net_device_stats *netdev_stats);
4778void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
4779 const struct pcpu_sw_netstats __percpu *netstats);
4780void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
4781
4782extern int netdev_max_backlog;
4783extern int netdev_tstamp_prequeue;
4784extern int netdev_unregister_timeout_secs;
4785extern int weight_p;
4786extern int dev_weight_rx_bias;
4787extern int dev_weight_tx_bias;
4788extern int dev_rx_weight;
4789extern int dev_tx_weight;
4790extern int gro_normal_batch;
4791
4792enum {
4793 NESTED_SYNC_IMM_BIT,
4794 NESTED_SYNC_TODO_BIT,
4795};
4796
4797#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
4798#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
4799
4800#define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
4801#define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
4802
4803struct netdev_nested_priv {
4804 unsigned char flags;
4805 void *data;
4806};
4807
4808bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4809struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4810 struct list_head **iter);
4811
4812#ifdef CONFIG_LOCKDEP
4813static LIST_HEAD(net_unlink_list);
4814
4815static inline void net_unlink_todo(struct net_device *dev)
4816{
4817 if (list_empty(&dev->unlink_list))
4818 list_add_tail(&dev->unlink_list, &net_unlink_list);
4819}
4820#endif
4821
4822
4823#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4824 for (iter = &(dev)->adj_list.upper, \
4825 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4826 updev; \
4827 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4828
4829int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4830 int (*fn)(struct net_device *upper_dev,
4831 struct netdev_nested_priv *priv),
4832 struct netdev_nested_priv *priv);
4833
4834bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4835 struct net_device *upper_dev);
4836
4837bool netdev_has_any_upper_dev(struct net_device *dev);
4838
4839void *netdev_lower_get_next_private(struct net_device *dev,
4840 struct list_head **iter);
4841void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4842 struct list_head **iter);
4843
4844#define netdev_for_each_lower_private(dev, priv, iter) \
4845 for (iter = (dev)->adj_list.lower.next, \
4846 priv = netdev_lower_get_next_private(dev, &(iter)); \
4847 priv; \
4848 priv = netdev_lower_get_next_private(dev, &(iter)))
4849
4850#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4851 for (iter = &(dev)->adj_list.lower, \
4852 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4853 priv; \
4854 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4855
4856void *netdev_lower_get_next(struct net_device *dev,
4857 struct list_head **iter);
4858
4859#define netdev_for_each_lower_dev(dev, ldev, iter) \
4860 for (iter = (dev)->adj_list.lower.next, \
4861 ldev = netdev_lower_get_next(dev, &(iter)); \
4862 ldev; \
4863 ldev = netdev_lower_get_next(dev, &(iter)))
4864
4865struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
4866 struct list_head **iter);
4867int netdev_walk_all_lower_dev(struct net_device *dev,
4868 int (*fn)(struct net_device *lower_dev,
4869 struct netdev_nested_priv *priv),
4870 struct netdev_nested_priv *priv);
4871int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4872 int (*fn)(struct net_device *lower_dev,
4873 struct netdev_nested_priv *priv),
4874 struct netdev_nested_priv *priv);
4875
4876void *netdev_adjacent_get_private(struct list_head *adj_list);
4877void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4878struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4879struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4880int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4881 struct netlink_ext_ack *extack);
4882int netdev_master_upper_dev_link(struct net_device *dev,
4883 struct net_device *upper_dev,
4884 void *upper_priv, void *upper_info,
4885 struct netlink_ext_ack *extack);
4886void netdev_upper_dev_unlink(struct net_device *dev,
4887 struct net_device *upper_dev);
4888int netdev_adjacent_change_prepare(struct net_device *old_dev,
4889 struct net_device *new_dev,
4890 struct net_device *dev,
4891 struct netlink_ext_ack *extack);
4892void netdev_adjacent_change_commit(struct net_device *old_dev,
4893 struct net_device *new_dev,
4894 struct net_device *dev);
4895void netdev_adjacent_change_abort(struct net_device *old_dev,
4896 struct net_device *new_dev,
4897 struct net_device *dev);
4898void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4899void *netdev_lower_dev_get_private(struct net_device *dev,
4900 struct net_device *lower_dev);
4901void netdev_lower_state_changed(struct net_device *lower_dev,
4902 void *lower_state_info);
4903
4904
4905#define NETDEV_RSS_KEY_LEN 52
4906extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4907void netdev_rss_key_fill(void *buffer, size_t len);
4908
4909int skb_checksum_help(struct sk_buff *skb);
4910int skb_crc32c_csum_help(struct sk_buff *skb);
4911int skb_csum_hwoffload_help(struct sk_buff *skb,
4912 const netdev_features_t features);
4913
4914struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4915 netdev_features_t features, bool tx_path);
4916struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4917 netdev_features_t features);
4918
4919struct netdev_bonding_info {
4920 ifslave slave;
4921 ifbond master;
4922};
4923
4924struct netdev_notifier_bonding_info {
4925 struct netdev_notifier_info info;
4926 struct netdev_bonding_info bonding_info;
4927};
4928
4929void netdev_bonding_info_change(struct net_device *dev,
4930 struct netdev_bonding_info *bonding_info);
4931
4932#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
4933void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
4934#else
4935static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
4936 const void *data)
4937{
4938}
4939#endif
4940
4941static inline
4942struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4943{
4944 return __skb_gso_segment(skb, features, true);
4945}
4946__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4947
4948static inline bool can_checksum_protocol(netdev_features_t features,
4949 __be16 protocol)
4950{
4951 if (protocol == htons(ETH_P_FCOE))
4952 return !!(features & NETIF_F_FCOE_CRC);
4953
4954
4955
4956 if (features & NETIF_F_HW_CSUM) {
4957
4958 return true;
4959 }
4960
4961 switch (protocol) {
4962 case htons(ETH_P_IP):
4963 return !!(features & NETIF_F_IP_CSUM);
4964 case htons(ETH_P_IPV6):
4965 return !!(features & NETIF_F_IPV6_CSUM);
4966 default:
4967 return false;
4968 }
4969}
4970
4971#ifdef CONFIG_BUG
4972void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4973#else
4974static inline void netdev_rx_csum_fault(struct net_device *dev,
4975 struct sk_buff *skb)
4976{
4977}
4978#endif
4979
4980void net_enable_timestamp(void);
4981void net_disable_timestamp(void);
4982
4983#ifdef CONFIG_PROC_FS
4984int __init dev_proc_init(void);
4985#else
4986#define dev_proc_init() 0
4987#endif
4988
4989static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
4990 struct sk_buff *skb, struct net_device *dev,
4991 bool more)
4992{
4993 __this_cpu_write(softnet_data.xmit.more, more);
4994 return ops->ndo_start_xmit(skb, dev);
4995}
4996
4997static inline bool netdev_xmit_more(void)
4998{
4999 return __this_cpu_read(softnet_data.xmit.more);
5000}
5001
5002static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
5003 struct netdev_queue *txq, bool more)
5004{
5005 const struct net_device_ops *ops = dev->netdev_ops;
5006 netdev_tx_t rc;
5007
5008 rc = __netdev_start_xmit(ops, skb, dev, more);
5009 if (rc == NETDEV_TX_OK)
5010 txq_trans_update(txq);
5011
5012 return rc;
5013}
5014
5015int netdev_class_create_file_ns(const struct class_attribute *class_attr,
5016 const void *ns);
5017void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
5018 const void *ns);
5019
5020extern const struct kobj_ns_type_operations net_ns_type_operations;
5021
5022const char *netdev_drivername(const struct net_device *dev);
5023
5024void linkwatch_run_queue(void);
5025
5026static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
5027 netdev_features_t f2)
5028{
5029 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
5030 if (f1 & NETIF_F_HW_CSUM)
5031 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5032 else
5033 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5034 }
5035
5036 return f1 & f2;
5037}
5038
5039static inline netdev_features_t netdev_get_wanted_features(
5040 struct net_device *dev)
5041{
5042 return (dev->features & ~dev->hw_features) | dev->wanted_features;
5043}
5044netdev_features_t netdev_increment_features(netdev_features_t all,
5045 netdev_features_t one, netdev_features_t mask);
5046
5047
5048
5049
5050
5051static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
5052 netdev_features_t mask)
5053{
5054 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
5055}
5056
5057int __netdev_update_features(struct net_device *dev);
5058void netdev_update_features(struct net_device *dev);
5059void netdev_change_features(struct net_device *dev);
5060
5061void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5062 struct net_device *dev);
5063
5064netdev_features_t passthru_features_check(struct sk_buff *skb,
5065 struct net_device *dev,
5066 netdev_features_t features);
5067netdev_features_t netif_skb_features(struct sk_buff *skb);
5068
5069static inline bool net_gso_ok(netdev_features_t features, int gso_type)
5070{
5071 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
5072
5073
5074 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
5075 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
5076 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
5077 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
5078 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
5079 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
5080 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
5081 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
5082 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
5083 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
5084 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
5085 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
5086 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
5087 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
5088 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
5089 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
5090 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
5091 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
5092 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
5093
5094 return (features & feature) == feature;
5095}
5096
5097static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
5098{
5099 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
5100 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
5101}
5102
5103static inline bool netif_needs_gso(struct sk_buff *skb,
5104 netdev_features_t features)
5105{
5106 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
5107 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
5108 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
5109}
5110
5111static inline void netif_set_gso_max_size(struct net_device *dev,
5112 unsigned int size)
5113{
5114 dev->gso_max_size = size;
5115}
5116
5117static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
5118 int pulled_hlen, u16 mac_offset,
5119 int mac_len)
5120{
5121 skb->protocol = protocol;
5122 skb->encapsulation = 1;
5123 skb_push(skb, pulled_hlen);
5124 skb_reset_transport_header(skb);
5125 skb->mac_header = mac_offset;
5126 skb->network_header = skb->mac_header + mac_len;
5127 skb->mac_len = mac_len;
5128}
5129
5130static inline bool netif_is_macsec(const struct net_device *dev)
5131{
5132 return dev->priv_flags & IFF_MACSEC;
5133}
5134
5135static inline bool netif_is_macvlan(const struct net_device *dev)
5136{
5137 return dev->priv_flags & IFF_MACVLAN;
5138}
5139
5140static inline bool netif_is_macvlan_port(const struct net_device *dev)
5141{
5142 return dev->priv_flags & IFF_MACVLAN_PORT;
5143}
5144
5145static inline bool netif_is_bond_master(const struct net_device *dev)
5146{
5147 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
5148}
5149
5150static inline bool netif_is_bond_slave(const struct net_device *dev)
5151{
5152 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
5153}
5154
5155static inline bool netif_supports_nofcs(struct net_device *dev)
5156{
5157 return dev->priv_flags & IFF_SUPP_NOFCS;
5158}
5159
5160static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
5161{
5162 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
5163}
5164
5165static inline bool netif_is_l3_master(const struct net_device *dev)
5166{
5167 return dev->priv_flags & IFF_L3MDEV_MASTER;
5168}
5169
5170static inline bool netif_is_l3_slave(const struct net_device *dev)
5171{
5172 return dev->priv_flags & IFF_L3MDEV_SLAVE;
5173}
5174
5175static inline bool netif_is_bridge_master(const struct net_device *dev)
5176{
5177 return dev->priv_flags & IFF_EBRIDGE;
5178}
5179
5180static inline bool netif_is_bridge_port(const struct net_device *dev)
5181{
5182 return dev->priv_flags & IFF_BRIDGE_PORT;
5183}
5184
5185static inline bool netif_is_ovs_master(const struct net_device *dev)
5186{
5187 return dev->priv_flags & IFF_OPENVSWITCH;
5188}
5189
5190static inline bool netif_is_ovs_port(const struct net_device *dev)
5191{
5192 return dev->priv_flags & IFF_OVS_DATAPATH;
5193}
5194
5195static inline bool netif_is_any_bridge_port(const struct net_device *dev)
5196{
5197 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
5198}
5199
5200static inline bool netif_is_team_master(const struct net_device *dev)
5201{
5202 return dev->priv_flags & IFF_TEAM;
5203}
5204
5205static inline bool netif_is_team_port(const struct net_device *dev)
5206{
5207 return dev->priv_flags & IFF_TEAM_PORT;
5208}
5209
5210static inline bool netif_is_lag_master(const struct net_device *dev)
5211{
5212 return netif_is_bond_master(dev) || netif_is_team_master(dev);
5213}
5214
5215static inline bool netif_is_lag_port(const struct net_device *dev)
5216{
5217 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
5218}
5219
5220static inline bool netif_is_rxfh_configured(const struct net_device *dev)
5221{
5222 return dev->priv_flags & IFF_RXFH_CONFIGURED;
5223}
5224
5225static inline bool netif_is_failover(const struct net_device *dev)
5226{
5227 return dev->priv_flags & IFF_FAILOVER;
5228}
5229
5230static inline bool netif_is_failover_slave(const struct net_device *dev)
5231{
5232 return dev->priv_flags & IFF_FAILOVER_SLAVE;
5233}
5234
5235
5236static inline void netif_keep_dst(struct net_device *dev)
5237{
5238 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
5239}
5240
5241
5242static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
5243{
5244
5245 return netif_is_macsec(dev);
5246}
5247
5248extern struct pernet_operations __net_initdata loopback_net_ops;
5249
5250
5251
5252
5253
5254static inline const char *netdev_name(const struct net_device *dev)
5255{
5256 if (!dev->name[0] || strchr(dev->name, '%'))
5257 return "(unnamed net_device)";
5258 return dev->name;
5259}
5260
5261static inline bool netdev_unregistering(const struct net_device *dev)
5262{
5263 return dev->reg_state == NETREG_UNREGISTERING;
5264}
5265
5266static inline const char *netdev_reg_state(const struct net_device *dev)
5267{
5268 switch (dev->reg_state) {
5269 case NETREG_UNINITIALIZED: return " (uninitialized)";
5270 case NETREG_REGISTERED: return "";
5271 case NETREG_UNREGISTERING: return " (unregistering)";
5272 case NETREG_UNREGISTERED: return " (unregistered)";
5273 case NETREG_RELEASED: return " (released)";
5274 case NETREG_DUMMY: return " (dummy)";
5275 }
5276
5277 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
5278 return " (unknown)";
5279}
5280
5281__printf(3, 4) __cold
5282void netdev_printk(const char *level, const struct net_device *dev,
5283 const char *format, ...);
5284__printf(2, 3) __cold
5285void netdev_emerg(const struct net_device *dev, const char *format, ...);
5286__printf(2, 3) __cold
5287void netdev_alert(const struct net_device *dev, const char *format, ...);
5288__printf(2, 3) __cold
5289void netdev_crit(const struct net_device *dev, const char *format, ...);
5290__printf(2, 3) __cold
5291void netdev_err(const struct net_device *dev, const char *format, ...);
5292__printf(2, 3) __cold
5293void netdev_warn(const struct net_device *dev, const char *format, ...);
5294__printf(2, 3) __cold
5295void netdev_notice(const struct net_device *dev, const char *format, ...);
5296__printf(2, 3) __cold
5297void netdev_info(const struct net_device *dev, const char *format, ...);
5298
5299#define netdev_level_once(level, dev, fmt, ...) \
5300do { \
5301 static bool __print_once __read_mostly; \
5302 \
5303 if (!__print_once) { \
5304 __print_once = true; \
5305 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
5306 } \
5307} while (0)
5308
5309#define netdev_emerg_once(dev, fmt, ...) \
5310 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
5311#define netdev_alert_once(dev, fmt, ...) \
5312 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
5313#define netdev_crit_once(dev, fmt, ...) \
5314 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
5315#define netdev_err_once(dev, fmt, ...) \
5316 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
5317#define netdev_warn_once(dev, fmt, ...) \
5318 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
5319#define netdev_notice_once(dev, fmt, ...) \
5320 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
5321#define netdev_info_once(dev, fmt, ...) \
5322 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
5323
5324#define MODULE_ALIAS_NETDEV(device) \
5325 MODULE_ALIAS("netdev-" device)
5326
5327#if defined(CONFIG_DYNAMIC_DEBUG) || \
5328 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
5329#define netdev_dbg(__dev, format, args...) \
5330do { \
5331 dynamic_netdev_dbg(__dev, format, ##args); \
5332} while (0)
5333#elif defined(DEBUG)
5334#define netdev_dbg(__dev, format, args...) \
5335 netdev_printk(KERN_DEBUG, __dev, format, ##args)
5336#else
5337#define netdev_dbg(__dev, format, args...) \
5338({ \
5339 if (0) \
5340 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
5341})
5342#endif
5343
5344#if defined(VERBOSE_DEBUG)
5345#define netdev_vdbg netdev_dbg
5346#else
5347
5348#define netdev_vdbg(dev, format, args...) \
5349({ \
5350 if (0) \
5351 netdev_printk(KERN_DEBUG, dev, format, ##args); \
5352 0; \
5353})
5354#endif
5355
5356
5357
5358
5359
5360
5361#define netdev_WARN(dev, format, args...) \
5362 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
5363 netdev_reg_state(dev), ##args)
5364
5365#define netdev_WARN_ONCE(dev, format, args...) \
5366 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
5367 netdev_reg_state(dev), ##args)
5368
5369
5370
5371#define netif_printk(priv, type, level, dev, fmt, args...) \
5372do { \
5373 if (netif_msg_##type(priv)) \
5374 netdev_printk(level, (dev), fmt, ##args); \
5375} while (0)
5376
5377#define netif_level(level, priv, type, dev, fmt, args...) \
5378do { \
5379 if (netif_msg_##type(priv)) \
5380 netdev_##level(dev, fmt, ##args); \
5381} while (0)
5382
5383#define netif_emerg(priv, type, dev, fmt, args...) \
5384 netif_level(emerg, priv, type, dev, fmt, ##args)
5385#define netif_alert(priv, type, dev, fmt, args...) \
5386 netif_level(alert, priv, type, dev, fmt, ##args)
5387#define netif_crit(priv, type, dev, fmt, args...) \
5388 netif_level(crit, priv, type, dev, fmt, ##args)
5389#define netif_err(priv, type, dev, fmt, args...) \
5390 netif_level(err, priv, type, dev, fmt, ##args)
5391#define netif_warn(priv, type, dev, fmt, args...) \
5392 netif_level(warn, priv, type, dev, fmt, ##args)
5393#define netif_notice(priv, type, dev, fmt, args...) \
5394 netif_level(notice, priv, type, dev, fmt, ##args)
5395#define netif_info(priv, type, dev, fmt, args...) \
5396 netif_level(info, priv, type, dev, fmt, ##args)
5397
5398#if defined(CONFIG_DYNAMIC_DEBUG) || \
5399 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
5400#define netif_dbg(priv, type, netdev, format, args...) \
5401do { \
5402 if (netif_msg_##type(priv)) \
5403 dynamic_netdev_dbg(netdev, format, ##args); \
5404} while (0)
5405#elif defined(DEBUG)
5406#define netif_dbg(priv, type, dev, format, args...) \
5407 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
5408#else
5409#define netif_dbg(priv, type, dev, format, args...) \
5410({ \
5411 if (0) \
5412 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5413 0; \
5414})
5415#endif
5416
5417
5418#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
5419 do { \
5420 if (cond) \
5421 netif_dbg(priv, type, netdev, fmt, ##args); \
5422 else \
5423 netif_ ## level(priv, type, netdev, fmt, ##args); \
5424 } while (0)
5425
5426#if defined(VERBOSE_DEBUG)
5427#define netif_vdbg netif_dbg
5428#else
5429#define netif_vdbg(priv, type, dev, format, args...) \
5430({ \
5431 if (0) \
5432 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5433 0; \
5434})
5435#endif
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456#define PTYPE_HASH_SIZE (16)
5457#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5458
5459extern struct list_head ptype_all __read_mostly;
5460extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
5461
5462extern struct net_device *blackhole_netdev;
5463
5464#endif
5465