1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/timer.h>
29#include <linux/bug.h>
30#include <linux/delay.h>
31#include <linux/atomic.h>
32#include <linux/prefetch.h>
33#include <asm/cache.h>
34#include <asm/byteorder.h>
35
36#include <linux/percpu.h>
37#include <linux/rculist.h>
38#include <linux/dmaengine.h>
39#include <linux/workqueue.h>
40#include <linux/dynamic_queue_limits.h>
41
42#include <linux/ethtool.h>
43#include <net/net_namespace.h>
44#include <net/dsa.h>
45#ifdef CONFIG_DCB
46#include <net/dcbnl.h>
47#endif
48#include <net/netprio_cgroup.h>
49
50#include <linux/netdev_features.h>
51#include <linux/neighbour.h>
52#include <uapi/linux/netdevice.h>
53#include <uapi/linux/if_bonding.h>
54#include <uapi/linux/pkt_cls.h>
55#include <linux/hashtable.h>
56
57struct netpoll_info;
58struct device;
59struct phy_device;
60
61struct wireless_dev;
62
63struct wpan_dev;
64struct mpls_dev;
65
66struct udp_tunnel_info;
67struct bpf_prog;
68
69void netdev_set_default_ethtool_ops(struct net_device *dev,
70 const struct ethtool_ops *ops);
71
72
73#define NET_RX_SUCCESS 0
74#define NET_RX_DROP 1
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94#define NET_XMIT_SUCCESS 0x00
95#define NET_XMIT_DROP 0x01
96#define NET_XMIT_CN 0x02
97#define NET_XMIT_MASK 0x0f
98
99
100
101
102#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
103#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
104
105
106#define NETDEV_TX_MASK 0xf0
107
108enum netdev_tx {
109 __NETDEV_TX_MIN = INT_MIN,
110 NETDEV_TX_OK = 0x00,
111 NETDEV_TX_BUSY = 0x10,
112};
113typedef enum netdev_tx netdev_tx_t;
114
115
116
117
118
119static inline bool dev_xmit_complete(int rc)
120{
121
122
123
124
125
126
127 if (likely(rc < NET_XMIT_MASK))
128 return true;
129
130 return false;
131}
132
133
134
135
136
137
138#if defined(CONFIG_HYPERV_NET)
139# define LL_MAX_HEADER 128
140#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
141# if defined(CONFIG_MAC80211_MESH)
142# define LL_MAX_HEADER 128
143# else
144# define LL_MAX_HEADER 96
145# endif
146#else
147# define LL_MAX_HEADER 32
148#endif
149
150#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
151 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
152#define MAX_HEADER LL_MAX_HEADER
153#else
154#define MAX_HEADER (LL_MAX_HEADER + 48)
155#endif
156
157
158
159
160
161
162struct net_device_stats {
163 unsigned long rx_packets;
164 unsigned long tx_packets;
165 unsigned long rx_bytes;
166 unsigned long tx_bytes;
167 unsigned long rx_errors;
168 unsigned long tx_errors;
169 unsigned long rx_dropped;
170 unsigned long tx_dropped;
171 unsigned long multicast;
172 unsigned long collisions;
173 unsigned long rx_length_errors;
174 unsigned long rx_over_errors;
175 unsigned long rx_crc_errors;
176 unsigned long rx_frame_errors;
177 unsigned long rx_fifo_errors;
178 unsigned long rx_missed_errors;
179 unsigned long tx_aborted_errors;
180 unsigned long tx_carrier_errors;
181 unsigned long tx_fifo_errors;
182 unsigned long tx_heartbeat_errors;
183 unsigned long tx_window_errors;
184 unsigned long rx_compressed;
185 unsigned long tx_compressed;
186};
187
188
189#include <linux/cache.h>
190#include <linux/skbuff.h>
191
192#ifdef CONFIG_RPS
193#include <linux/static_key.h>
194extern struct static_key rps_needed;
195extern struct static_key rfs_needed;
196#endif
197
198struct neighbour;
199struct neigh_parms;
200struct sk_buff;
201
202struct netdev_hw_addr {
203 struct list_head list;
204 unsigned char addr[MAX_ADDR_LEN];
205 unsigned char type;
206#define NETDEV_HW_ADDR_T_LAN 1
207#define NETDEV_HW_ADDR_T_SAN 2
208#define NETDEV_HW_ADDR_T_SLAVE 3
209#define NETDEV_HW_ADDR_T_UNICAST 4
210#define NETDEV_HW_ADDR_T_MULTICAST 5
211 bool global_use;
212 int sync_cnt;
213 int refcount;
214 int synced;
215 struct rcu_head rcu_head;
216};
217
218struct netdev_hw_addr_list {
219 struct list_head list;
220 int count;
221};
222
223#define netdev_hw_addr_list_count(l) ((l)->count)
224#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225#define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list)
227
228#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
230#define netdev_for_each_uc_addr(ha, dev) \
231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
232
233#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
235#define netdev_for_each_mc_addr(ha, dev) \
236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
237
238struct hh_cache {
239 u16 hh_len;
240 u16 __pad;
241 seqlock_t hh_lock;
242
243
244#define HH_DATA_MOD 16
245#define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247#define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250};
251
252
253
254
255
256
257
258
259
260#define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264
265struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
268 const void *saddr, unsigned int len);
269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
271 void (*cache_update)(struct hh_cache *hh,
272 const struct net_device *dev,
273 const unsigned char *haddr);
274 bool (*validate)(const char *ll_header, unsigned int len);
275};
276
277
278
279
280
281
282enum netdev_state_t {
283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
285 __LINK_STATE_NOCARRIER,
286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
288};
289
290
291
292
293
294
295struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298};
299#define NETDEV_BOOT_SETUP_MAX 8
300
301int __init netdev_boot_setup(char *str);
302
303
304
305
306struct napi_struct {
307
308
309
310
311
312
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
317 unsigned int gro_count;
318 int (*poll)(struct napi_struct *, int);
319#ifdef CONFIG_NETPOLL
320 int poll_owner;
321#endif
322 struct net_device *dev;
323 struct sk_buff *gro_list;
324 struct sk_buff *skb;
325 struct hrtimer timer;
326 struct list_head dev_list;
327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
329};
330
331enum {
332 NAPI_STATE_SCHED,
333 NAPI_STATE_MISSED,
334 NAPI_STATE_DISABLE,
335 NAPI_STATE_NPSVC,
336 NAPI_STATE_HASHED,
337 NAPI_STATE_NO_BUSY_POLL,
338 NAPI_STATE_IN_BUSY_POLL,
339};
340
341enum {
342 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
343 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
344 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
345 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
346 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
347 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
348 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
349};
350
351enum gro_result {
352 GRO_MERGED,
353 GRO_MERGED_FREE,
354 GRO_HELD,
355 GRO_NORMAL,
356 GRO_DROP,
357 GRO_CONSUMED,
358};
359typedef enum gro_result gro_result_t;
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402enum rx_handler_result {
403 RX_HANDLER_CONSUMED,
404 RX_HANDLER_ANOTHER,
405 RX_HANDLER_EXACT,
406 RX_HANDLER_PASS,
407};
408typedef enum rx_handler_result rx_handler_result_t;
409typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
410
411void __napi_schedule(struct napi_struct *n);
412void __napi_schedule_irqoff(struct napi_struct *n);
413
414static inline bool napi_disable_pending(struct napi_struct *n)
415{
416 return test_bit(NAPI_STATE_DISABLE, &n->state);
417}
418
419bool napi_schedule_prep(struct napi_struct *n);
420
421
422
423
424
425
426
427
428static inline void napi_schedule(struct napi_struct *n)
429{
430 if (napi_schedule_prep(n))
431 __napi_schedule(n);
432}
433
434
435
436
437
438
439
440static inline void napi_schedule_irqoff(struct napi_struct *n)
441{
442 if (napi_schedule_prep(n))
443 __napi_schedule_irqoff(n);
444}
445
446
447static inline bool napi_reschedule(struct napi_struct *napi)
448{
449 if (napi_schedule_prep(napi)) {
450 __napi_schedule(napi);
451 return true;
452 }
453 return false;
454}
455
456bool napi_complete_done(struct napi_struct *n, int work_done);
457
458
459
460
461
462
463
464
465static inline bool napi_complete(struct napi_struct *n)
466{
467 return napi_complete_done(n, 0);
468}
469
470
471
472
473
474
475
476
477
478
479
480
481
482bool napi_hash_del(struct napi_struct *napi);
483
484
485
486
487
488
489
490
491void napi_disable(struct napi_struct *n);
492
493
494
495
496
497
498
499
500static inline void napi_enable(struct napi_struct *n)
501{
502 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
503 smp_mb__before_atomic();
504 clear_bit(NAPI_STATE_SCHED, &n->state);
505 clear_bit(NAPI_STATE_NPSVC, &n->state);
506}
507
508
509
510
511
512
513
514
515
516static inline void napi_synchronize(const struct napi_struct *n)
517{
518 if (IS_ENABLED(CONFIG_SMP))
519 while (test_bit(NAPI_STATE_SCHED, &n->state))
520 msleep(1);
521 else
522 barrier();
523}
524
525enum netdev_queue_state_t {
526 __QUEUE_STATE_DRV_XOFF,
527 __QUEUE_STATE_STACK_XOFF,
528 __QUEUE_STATE_FROZEN,
529};
530
531#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
532#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
533#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
534
535#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
536#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
537 QUEUE_STATE_FROZEN)
538#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
539 QUEUE_STATE_FROZEN)
540
541
542
543
544
545
546
547
548
549
550
551struct netdev_queue {
552
553
554
555 struct net_device *dev;
556 struct Qdisc __rcu *qdisc;
557 struct Qdisc *qdisc_sleeping;
558#ifdef CONFIG_SYSFS
559 struct kobject kobj;
560#endif
561#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
562 int numa_node;
563#endif
564 unsigned long tx_maxrate;
565
566
567
568
569 unsigned long trans_timeout;
570
571
572
573 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
574 int xmit_lock_owner;
575
576
577
578 unsigned long trans_start;
579
580 unsigned long state;
581
582#ifdef CONFIG_BQL
583 struct dql dql;
584#endif
585} ____cacheline_aligned_in_smp;
586
587static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
588{
589#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
590 return q->numa_node;
591#else
592 return NUMA_NO_NODE;
593#endif
594}
595
596static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
597{
598#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
599 q->numa_node = node;
600#endif
601}
602
603#ifdef CONFIG_RPS
604
605
606
607
608struct rps_map {
609 unsigned int len;
610 struct rcu_head rcu;
611 u16 cpus[0];
612};
613#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
614
615
616
617
618
619
620struct rps_dev_flow {
621 u16 cpu;
622 u16 filter;
623 unsigned int last_qtail;
624};
625#define RPS_NO_FILTER 0xffff
626
627
628
629
630struct rps_dev_flow_table {
631 unsigned int mask;
632 struct rcu_head rcu;
633 struct rps_dev_flow flows[0];
634};
635#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
636 ((_num) * sizeof(struct rps_dev_flow)))
637
638
639
640
641
642
643
644
645
646
647
648struct rps_sock_flow_table {
649 u32 mask;
650
651 u32 ents[0] ____cacheline_aligned_in_smp;
652};
653#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
654
655#define RPS_NO_CPU 0xffff
656
657extern u32 rps_cpu_mask;
658extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
659
660static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
661 u32 hash)
662{
663 if (table && hash) {
664 unsigned int index = hash & table->mask;
665 u32 val = hash & ~rps_cpu_mask;
666
667
668 val |= raw_smp_processor_id();
669
670 if (table->ents[index] != val)
671 table->ents[index] = val;
672 }
673}
674
675#ifdef CONFIG_RFS_ACCEL
676bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
677 u16 filter_id);
678#endif
679#endif
680
681
682struct netdev_rx_queue {
683#ifdef CONFIG_RPS
684 struct rps_map __rcu *rps_map;
685 struct rps_dev_flow_table __rcu *rps_flow_table;
686#endif
687 struct kobject kobj;
688 struct net_device *dev;
689} ____cacheline_aligned_in_smp;
690
691
692
693
694struct rx_queue_attribute {
695 struct attribute attr;
696 ssize_t (*show)(struct netdev_rx_queue *queue,
697 struct rx_queue_attribute *attr, char *buf);
698 ssize_t (*store)(struct netdev_rx_queue *queue,
699 struct rx_queue_attribute *attr, const char *buf, size_t len);
700};
701
702#ifdef CONFIG_XPS
703
704
705
706
707struct xps_map {
708 unsigned int len;
709 unsigned int alloc_len;
710 struct rcu_head rcu;
711 u16 queues[0];
712};
713#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
714#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
715 - sizeof(struct xps_map)) / sizeof(u16))
716
717
718
719
720struct xps_dev_maps {
721 struct rcu_head rcu;
722 struct xps_map __rcu *cpu_map[0];
723};
724#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
725 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
726#endif
727
728#define TC_MAX_QUEUE 16
729#define TC_BITMASK 15
730
731struct netdev_tc_txq {
732 u16 count;
733 u16 offset;
734};
735
736#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
737
738
739
740
741struct netdev_fcoe_hbainfo {
742 char manufacturer[64];
743 char serial_number[64];
744 char hardware_version[64];
745 char driver_version[64];
746 char optionrom_version[64];
747 char firmware_version[64];
748 char model[256];
749 char model_description[256];
750};
751#endif
752
753#define MAX_PHYS_ITEM_ID_LEN 32
754
755
756
757
758struct netdev_phys_item_id {
759 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
760 unsigned char id_len;
761};
762
763static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
764 struct netdev_phys_item_id *b)
765{
766 return a->id_len == b->id_len &&
767 memcmp(a->id, b->id, a->id_len) == 0;
768}
769
770typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
771 struct sk_buff *skb);
772
773
774
775
776enum {
777 TC_SETUP_MQPRIO,
778 TC_SETUP_CLSU32,
779 TC_SETUP_CLSFLOWER,
780 TC_SETUP_MATCHALL,
781 TC_SETUP_CLSBPF,
782};
783
784struct tc_cls_u32_offload;
785
786struct tc_to_netdev {
787 unsigned int type;
788 union {
789 u8 tc;
790 struct tc_cls_u32_offload *cls_u32;
791 struct tc_cls_flower_offload *cls_flower;
792 struct tc_cls_matchall_offload *cls_mall;
793 struct tc_cls_bpf_offload *cls_bpf;
794 };
795 bool egress_dev;
796};
797
798
799
800
801enum xdp_netdev_command {
802
803
804
805
806
807
808
809 XDP_SETUP_PROG,
810
811
812
813 XDP_QUERY_PROG,
814};
815
816struct netdev_xdp {
817 enum xdp_netdev_command command;
818 union {
819
820 struct bpf_prog *prog;
821
822 bool prog_attached;
823 };
824};
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129struct net_device_ops {
1130 int (*ndo_init)(struct net_device *dev);
1131 void (*ndo_uninit)(struct net_device *dev);
1132 int (*ndo_open)(struct net_device *dev);
1133 int (*ndo_stop)(struct net_device *dev);
1134 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1135 struct net_device *dev);
1136 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1137 struct net_device *dev,
1138 netdev_features_t features);
1139 u16 (*ndo_select_queue)(struct net_device *dev,
1140 struct sk_buff *skb,
1141 void *accel_priv,
1142 select_queue_fallback_t fallback);
1143 void (*ndo_change_rx_flags)(struct net_device *dev,
1144 int flags);
1145 void (*ndo_set_rx_mode)(struct net_device *dev);
1146 int (*ndo_set_mac_address)(struct net_device *dev,
1147 void *addr);
1148 int (*ndo_validate_addr)(struct net_device *dev);
1149 int (*ndo_do_ioctl)(struct net_device *dev,
1150 struct ifreq *ifr, int cmd);
1151 int (*ndo_set_config)(struct net_device *dev,
1152 struct ifmap *map);
1153 int (*ndo_change_mtu)(struct net_device *dev,
1154 int new_mtu);
1155 int (*ndo_neigh_setup)(struct net_device *dev,
1156 struct neigh_parms *);
1157 void (*ndo_tx_timeout) (struct net_device *dev);
1158
1159 void (*ndo_get_stats64)(struct net_device *dev,
1160 struct rtnl_link_stats64 *storage);
1161 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1162 int (*ndo_get_offload_stats)(int attr_id,
1163 const struct net_device *dev,
1164 void *attr_data);
1165 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1166
1167 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1168 __be16 proto, u16 vid);
1169 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1170 __be16 proto, u16 vid);
1171#ifdef CONFIG_NET_POLL_CONTROLLER
1172 void (*ndo_poll_controller)(struct net_device *dev);
1173 int (*ndo_netpoll_setup)(struct net_device *dev,
1174 struct netpoll_info *info);
1175 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1176#endif
1177 int (*ndo_set_vf_mac)(struct net_device *dev,
1178 int queue, u8 *mac);
1179 int (*ndo_set_vf_vlan)(struct net_device *dev,
1180 int queue, u16 vlan,
1181 u8 qos, __be16 proto);
1182 int (*ndo_set_vf_rate)(struct net_device *dev,
1183 int vf, int min_tx_rate,
1184 int max_tx_rate);
1185 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1186 int vf, bool setting);
1187 int (*ndo_set_vf_trust)(struct net_device *dev,
1188 int vf, bool setting);
1189 int (*ndo_get_vf_config)(struct net_device *dev,
1190 int vf,
1191 struct ifla_vf_info *ivf);
1192 int (*ndo_set_vf_link_state)(struct net_device *dev,
1193 int vf, int link_state);
1194 int (*ndo_get_vf_stats)(struct net_device *dev,
1195 int vf,
1196 struct ifla_vf_stats
1197 *vf_stats);
1198 int (*ndo_set_vf_port)(struct net_device *dev,
1199 int vf,
1200 struct nlattr *port[]);
1201 int (*ndo_get_vf_port)(struct net_device *dev,
1202 int vf, struct sk_buff *skb);
1203 int (*ndo_set_vf_guid)(struct net_device *dev,
1204 int vf, u64 guid,
1205 int guid_type);
1206 int (*ndo_set_vf_rss_query_en)(
1207 struct net_device *dev,
1208 int vf, bool setting);
1209 int (*ndo_setup_tc)(struct net_device *dev,
1210 u32 handle,
1211 __be16 protocol,
1212 struct tc_to_netdev *tc);
1213#if IS_ENABLED(CONFIG_FCOE)
1214 int (*ndo_fcoe_enable)(struct net_device *dev);
1215 int (*ndo_fcoe_disable)(struct net_device *dev);
1216 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1217 u16 xid,
1218 struct scatterlist *sgl,
1219 unsigned int sgc);
1220 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1221 u16 xid);
1222 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1223 u16 xid,
1224 struct scatterlist *sgl,
1225 unsigned int sgc);
1226 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1227 struct netdev_fcoe_hbainfo *hbainfo);
1228#endif
1229
1230#if IS_ENABLED(CONFIG_LIBFCOE)
1231#define NETDEV_FCOE_WWNN 0
1232#define NETDEV_FCOE_WWPN 1
1233 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1234 u64 *wwn, int type);
1235#endif
1236
1237#ifdef CONFIG_RFS_ACCEL
1238 int (*ndo_rx_flow_steer)(struct net_device *dev,
1239 const struct sk_buff *skb,
1240 u16 rxq_index,
1241 u32 flow_id);
1242#endif
1243 int (*ndo_add_slave)(struct net_device *dev,
1244 struct net_device *slave_dev);
1245 int (*ndo_del_slave)(struct net_device *dev,
1246 struct net_device *slave_dev);
1247 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1248 netdev_features_t features);
1249 int (*ndo_set_features)(struct net_device *dev,
1250 netdev_features_t features);
1251 int (*ndo_neigh_construct)(struct net_device *dev,
1252 struct neighbour *n);
1253 void (*ndo_neigh_destroy)(struct net_device *dev,
1254 struct neighbour *n);
1255
1256 int (*ndo_fdb_add)(struct ndmsg *ndm,
1257 struct nlattr *tb[],
1258 struct net_device *dev,
1259 const unsigned char *addr,
1260 u16 vid,
1261 u16 flags);
1262 int (*ndo_fdb_del)(struct ndmsg *ndm,
1263 struct nlattr *tb[],
1264 struct net_device *dev,
1265 const unsigned char *addr,
1266 u16 vid);
1267 int (*ndo_fdb_dump)(struct sk_buff *skb,
1268 struct netlink_callback *cb,
1269 struct net_device *dev,
1270 struct net_device *filter_dev,
1271 int *idx);
1272
1273 int (*ndo_bridge_setlink)(struct net_device *dev,
1274 struct nlmsghdr *nlh,
1275 u16 flags);
1276 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1277 u32 pid, u32 seq,
1278 struct net_device *dev,
1279 u32 filter_mask,
1280 int nlflags);
1281 int (*ndo_bridge_dellink)(struct net_device *dev,
1282 struct nlmsghdr *nlh,
1283 u16 flags);
1284 int (*ndo_change_carrier)(struct net_device *dev,
1285 bool new_carrier);
1286 int (*ndo_get_phys_port_id)(struct net_device *dev,
1287 struct netdev_phys_item_id *ppid);
1288 int (*ndo_get_phys_port_name)(struct net_device *dev,
1289 char *name, size_t len);
1290 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1291 struct udp_tunnel_info *ti);
1292 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1293 struct udp_tunnel_info *ti);
1294 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1295 struct net_device *dev);
1296 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1297 void *priv);
1298
1299 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1300 struct net_device *dev,
1301 void *priv);
1302 int (*ndo_get_lock_subclass)(struct net_device *dev);
1303 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1304 int queue_index,
1305 u32 maxrate);
1306 int (*ndo_get_iflink)(const struct net_device *dev);
1307 int (*ndo_change_proto_down)(struct net_device *dev,
1308 bool proto_down);
1309 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1310 struct sk_buff *skb);
1311 void (*ndo_set_rx_headroom)(struct net_device *dev,
1312 int needed_headroom);
1313 int (*ndo_xdp)(struct net_device *dev,
1314 struct netdev_xdp *xdp);
1315};
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360enum netdev_priv_flags {
1361 IFF_802_1Q_VLAN = 1<<0,
1362 IFF_EBRIDGE = 1<<1,
1363 IFF_BONDING = 1<<2,
1364 IFF_ISATAP = 1<<3,
1365 IFF_WAN_HDLC = 1<<4,
1366 IFF_XMIT_DST_RELEASE = 1<<5,
1367 IFF_DONT_BRIDGE = 1<<6,
1368 IFF_DISABLE_NETPOLL = 1<<7,
1369 IFF_MACVLAN_PORT = 1<<8,
1370 IFF_BRIDGE_PORT = 1<<9,
1371 IFF_OVS_DATAPATH = 1<<10,
1372 IFF_TX_SKB_SHARING = 1<<11,
1373 IFF_UNICAST_FLT = 1<<12,
1374 IFF_TEAM_PORT = 1<<13,
1375 IFF_SUPP_NOFCS = 1<<14,
1376 IFF_LIVE_ADDR_CHANGE = 1<<15,
1377 IFF_MACVLAN = 1<<16,
1378 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1379 IFF_IPVLAN_MASTER = 1<<18,
1380 IFF_IPVLAN_SLAVE = 1<<19,
1381 IFF_L3MDEV_MASTER = 1<<20,
1382 IFF_NO_QUEUE = 1<<21,
1383 IFF_OPENVSWITCH = 1<<22,
1384 IFF_L3MDEV_SLAVE = 1<<23,
1385 IFF_TEAM = 1<<24,
1386 IFF_RXFH_CONFIGURED = 1<<25,
1387 IFF_PHONY_HEADROOM = 1<<26,
1388 IFF_MACSEC = 1<<27,
1389};
1390
1391#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1392#define IFF_EBRIDGE IFF_EBRIDGE
1393#define IFF_BONDING IFF_BONDING
1394#define IFF_ISATAP IFF_ISATAP
1395#define IFF_WAN_HDLC IFF_WAN_HDLC
1396#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1397#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1398#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1399#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1400#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1401#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1402#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1403#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1404#define IFF_TEAM_PORT IFF_TEAM_PORT
1405#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1406#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1407#define IFF_MACVLAN IFF_MACVLAN
1408#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1409#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1410#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1411#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1412#define IFF_NO_QUEUE IFF_NO_QUEUE
1413#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1414#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1415#define IFF_TEAM IFF_TEAM
1416#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1417#define IFF_MACSEC IFF_MACSEC
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631struct net_device {
1632 char name[IFNAMSIZ];
1633 struct hlist_node name_hlist;
1634 char *ifalias;
1635
1636
1637
1638
1639 unsigned long mem_end;
1640 unsigned long mem_start;
1641 unsigned long base_addr;
1642 int irq;
1643
1644 atomic_t carrier_changes;
1645
1646
1647
1648
1649
1650
1651
1652 unsigned long state;
1653
1654 struct list_head dev_list;
1655 struct list_head napi_list;
1656 struct list_head unreg_list;
1657 struct list_head close_list;
1658 struct list_head ptype_all;
1659 struct list_head ptype_specific;
1660
1661 struct {
1662 struct list_head upper;
1663 struct list_head lower;
1664 } adj_list;
1665
1666 netdev_features_t features;
1667 netdev_features_t hw_features;
1668 netdev_features_t wanted_features;
1669 netdev_features_t vlan_features;
1670 netdev_features_t hw_enc_features;
1671 netdev_features_t mpls_features;
1672 netdev_features_t gso_partial_features;
1673
1674 int ifindex;
1675 int group;
1676
1677 struct net_device_stats stats;
1678
1679 atomic_long_t rx_dropped;
1680 atomic_long_t tx_dropped;
1681 atomic_long_t rx_nohandler;
1682
1683#ifdef CONFIG_WIRELESS_EXT
1684 const struct iw_handler_def *wireless_handlers;
1685 struct iw_public_data *wireless_data;
1686#endif
1687 const struct net_device_ops *netdev_ops;
1688 const struct ethtool_ops *ethtool_ops;
1689#ifdef CONFIG_NET_SWITCHDEV
1690 const struct switchdev_ops *switchdev_ops;
1691#endif
1692#ifdef CONFIG_NET_L3_MASTER_DEV
1693 const struct l3mdev_ops *l3mdev_ops;
1694#endif
1695#if IS_ENABLED(CONFIG_IPV6)
1696 const struct ndisc_ops *ndisc_ops;
1697#endif
1698
1699 const struct header_ops *header_ops;
1700
1701 unsigned int flags;
1702 unsigned int priv_flags;
1703
1704 unsigned short gflags;
1705 unsigned short padded;
1706
1707 unsigned char operstate;
1708 unsigned char link_mode;
1709
1710 unsigned char if_port;
1711 unsigned char dma;
1712
1713 unsigned int mtu;
1714 unsigned int min_mtu;
1715 unsigned int max_mtu;
1716 unsigned short type;
1717 unsigned short hard_header_len;
1718 unsigned short min_header_len;
1719
1720 unsigned short needed_headroom;
1721 unsigned short needed_tailroom;
1722
1723
1724 unsigned char perm_addr[MAX_ADDR_LEN];
1725 unsigned char addr_assign_type;
1726 unsigned char addr_len;
1727 unsigned short neigh_priv_len;
1728 unsigned short dev_id;
1729 unsigned short dev_port;
1730 spinlock_t addr_list_lock;
1731 unsigned char name_assign_type;
1732 bool uc_promisc;
1733 struct netdev_hw_addr_list uc;
1734 struct netdev_hw_addr_list mc;
1735 struct netdev_hw_addr_list dev_addrs;
1736
1737#ifdef CONFIG_SYSFS
1738 struct kset *queues_kset;
1739#endif
1740 unsigned int promiscuity;
1741 unsigned int allmulti;
1742
1743
1744
1745
1746#if IS_ENABLED(CONFIG_VLAN_8021Q)
1747 struct vlan_info __rcu *vlan_info;
1748#endif
1749#if IS_ENABLED(CONFIG_NET_DSA)
1750 struct dsa_switch_tree *dsa_ptr;
1751#endif
1752#if IS_ENABLED(CONFIG_TIPC)
1753 struct tipc_bearer __rcu *tipc_ptr;
1754#endif
1755 void *atalk_ptr;
1756 struct in_device __rcu *ip_ptr;
1757 struct dn_dev __rcu *dn_ptr;
1758 struct inet6_dev __rcu *ip6_ptr;
1759 void *ax25_ptr;
1760 struct wireless_dev *ieee80211_ptr;
1761 struct wpan_dev *ieee802154_ptr;
1762#if IS_ENABLED(CONFIG_MPLS_ROUTING)
1763 struct mpls_dev __rcu *mpls_ptr;
1764#endif
1765
1766
1767
1768
1769
1770 unsigned char *dev_addr;
1771
1772#ifdef CONFIG_SYSFS
1773 struct netdev_rx_queue *_rx;
1774
1775 unsigned int num_rx_queues;
1776 unsigned int real_num_rx_queues;
1777#endif
1778
1779 unsigned long gro_flush_timeout;
1780 rx_handler_func_t __rcu *rx_handler;
1781 void __rcu *rx_handler_data;
1782
1783#ifdef CONFIG_NET_CLS_ACT
1784 struct tcf_proto __rcu *ingress_cl_list;
1785#endif
1786 struct netdev_queue __rcu *ingress_queue;
1787#ifdef CONFIG_NETFILTER_INGRESS
1788 struct nf_hook_entry __rcu *nf_hooks_ingress;
1789#endif
1790
1791 unsigned char broadcast[MAX_ADDR_LEN];
1792#ifdef CONFIG_RFS_ACCEL
1793 struct cpu_rmap *rx_cpu_rmap;
1794#endif
1795 struct hlist_node index_hlist;
1796
1797
1798
1799
1800 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1801 unsigned int num_tx_queues;
1802 unsigned int real_num_tx_queues;
1803 struct Qdisc *qdisc;
1804#ifdef CONFIG_NET_SCHED
1805 DECLARE_HASHTABLE (qdisc_hash, 4);
1806#endif
1807 unsigned long tx_queue_len;
1808 spinlock_t tx_global_lock;
1809 int watchdog_timeo;
1810
1811#ifdef CONFIG_XPS
1812 struct xps_dev_maps __rcu *xps_maps;
1813#endif
1814#ifdef CONFIG_NET_CLS_ACT
1815 struct tcf_proto __rcu *egress_cl_list;
1816#endif
1817
1818
1819 struct timer_list watchdog_timer;
1820
1821 int __percpu *pcpu_refcnt;
1822 struct list_head todo_list;
1823
1824 struct list_head link_watch_list;
1825
1826 enum { NETREG_UNINITIALIZED=0,
1827 NETREG_REGISTERED,
1828 NETREG_UNREGISTERING,
1829 NETREG_UNREGISTERED,
1830 NETREG_RELEASED,
1831 NETREG_DUMMY,
1832 } reg_state:8;
1833
1834 bool dismantle;
1835
1836 enum {
1837 RTNL_LINK_INITIALIZED,
1838 RTNL_LINK_INITIALIZING,
1839 } rtnl_link_state:16;
1840
1841 void (*destructor)(struct net_device *dev);
1842
1843#ifdef CONFIG_NETPOLL
1844 struct netpoll_info __rcu *npinfo;
1845#endif
1846
1847 possible_net_t nd_net;
1848
1849
1850 union {
1851 void *ml_priv;
1852 struct pcpu_lstats __percpu *lstats;
1853 struct pcpu_sw_netstats __percpu *tstats;
1854 struct pcpu_dstats __percpu *dstats;
1855 struct pcpu_vstats __percpu *vstats;
1856 };
1857
1858#if IS_ENABLED(CONFIG_GARP)
1859 struct garp_port __rcu *garp_port;
1860#endif
1861#if IS_ENABLED(CONFIG_MRP)
1862 struct mrp_port __rcu *mrp_port;
1863#endif
1864
1865 struct device dev;
1866 const struct attribute_group *sysfs_groups[4];
1867 const struct attribute_group *sysfs_rx_queue_group;
1868
1869 const struct rtnl_link_ops *rtnl_link_ops;
1870
1871
1872#define GSO_MAX_SIZE 65536
1873 unsigned int gso_max_size;
1874#define GSO_MAX_SEGS 65535
1875 u16 gso_max_segs;
1876
1877#ifdef CONFIG_DCB
1878 const struct dcbnl_rtnl_ops *dcbnl_ops;
1879#endif
1880 u8 num_tc;
1881 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1882 u8 prio_tc_map[TC_BITMASK + 1];
1883
1884#if IS_ENABLED(CONFIG_FCOE)
1885 unsigned int fcoe_ddp_xid;
1886#endif
1887#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1888 struct netprio_map __rcu *priomap;
1889#endif
1890 struct phy_device *phydev;
1891 struct lock_class_key *qdisc_tx_busylock;
1892 struct lock_class_key *qdisc_running_key;
1893 bool proto_down;
1894};
1895#define to_net_dev(d) container_of(d, struct net_device, dev)
1896
1897#define NETDEV_ALIGN 32
1898
1899static inline
1900int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1901{
1902 return dev->prio_tc_map[prio & TC_BITMASK];
1903}
1904
1905static inline
1906int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1907{
1908 if (tc >= dev->num_tc)
1909 return -EINVAL;
1910
1911 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1912 return 0;
1913}
1914
1915int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
1916void netdev_reset_tc(struct net_device *dev);
1917int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
1918int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
1919
1920static inline
1921int netdev_get_num_tc(struct net_device *dev)
1922{
1923 return dev->num_tc;
1924}
1925
1926static inline
1927struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1928 unsigned int index)
1929{
1930 return &dev->_tx[index];
1931}
1932
1933static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1934 const struct sk_buff *skb)
1935{
1936 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1937}
1938
1939static inline void netdev_for_each_tx_queue(struct net_device *dev,
1940 void (*f)(struct net_device *,
1941 struct netdev_queue *,
1942 void *),
1943 void *arg)
1944{
1945 unsigned int i;
1946
1947 for (i = 0; i < dev->num_tx_queues; i++)
1948 f(dev, &dev->_tx[i], arg);
1949}
1950
1951#define netdev_lockdep_set_classes(dev) \
1952{ \
1953 static struct lock_class_key qdisc_tx_busylock_key; \
1954 static struct lock_class_key qdisc_running_key; \
1955 static struct lock_class_key qdisc_xmit_lock_key; \
1956 static struct lock_class_key dev_addr_list_lock_key; \
1957 unsigned int i; \
1958 \
1959 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
1960 (dev)->qdisc_running_key = &qdisc_running_key; \
1961 lockdep_set_class(&(dev)->addr_list_lock, \
1962 &dev_addr_list_lock_key); \
1963 for (i = 0; i < (dev)->num_tx_queues; i++) \
1964 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
1965 &qdisc_xmit_lock_key); \
1966}
1967
1968struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1969 struct sk_buff *skb,
1970 void *accel_priv);
1971
1972
1973
1974
1975static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
1976{
1977 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
1978}
1979
1980static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
1981{
1982 if (dev->netdev_ops->ndo_set_rx_headroom)
1983 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
1984}
1985
1986
1987static inline void netdev_reset_rx_headroom(struct net_device *dev)
1988{
1989 netdev_set_rx_headroom(dev, -1);
1990}
1991
1992
1993
1994
1995static inline
1996struct net *dev_net(const struct net_device *dev)
1997{
1998 return read_pnet(&dev->nd_net);
1999}
2000
2001static inline
2002void dev_net_set(struct net_device *dev, struct net *net)
2003{
2004 write_pnet(&dev->nd_net, net);
2005}
2006
2007static inline bool netdev_uses_dsa(struct net_device *dev)
2008{
2009#if IS_ENABLED(CONFIG_NET_DSA)
2010 if (dev->dsa_ptr != NULL)
2011 return dsa_uses_tagged_protocol(dev->dsa_ptr);
2012#endif
2013 return false;
2014}
2015
2016
2017
2018
2019
2020
2021
2022static inline void *netdev_priv(const struct net_device *dev)
2023{
2024 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2025}
2026
2027
2028
2029
2030#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2031
2032
2033
2034
2035
2036#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2037
2038
2039
2040
2041#define NAPI_POLL_WEIGHT 64
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2054 int (*poll)(struct napi_struct *, int), int weight);
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067static inline void netif_tx_napi_add(struct net_device *dev,
2068 struct napi_struct *napi,
2069 int (*poll)(struct napi_struct *, int),
2070 int weight)
2071{
2072 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2073 netif_napi_add(dev, napi, poll, weight);
2074}
2075
2076
2077
2078
2079
2080
2081
2082void netif_napi_del(struct napi_struct *napi);
2083
2084struct napi_gro_cb {
2085
2086 void *frag0;
2087
2088
2089 unsigned int frag0_len;
2090
2091
2092 int data_offset;
2093
2094
2095 u16 flush;
2096
2097
2098 u16 flush_id;
2099
2100
2101 u16 count;
2102
2103
2104 u16 gro_remcsum_start;
2105
2106
2107 unsigned long age;
2108
2109
2110 u16 proto;
2111
2112
2113 u8 same_flow:1;
2114
2115
2116 u8 encap_mark:1;
2117
2118
2119 u8 csum_valid:1;
2120
2121
2122 u8 csum_cnt:3;
2123
2124
2125 u8 free:2;
2126#define NAPI_GRO_FREE 1
2127#define NAPI_GRO_FREE_STOLEN_HEAD 2
2128
2129
2130 u8 is_ipv6:1;
2131
2132
2133 u8 is_fou:1;
2134
2135
2136 u8 is_atomic:1;
2137
2138
2139 u8 recursion_counter:4;
2140
2141
2142
2143
2144 __wsum csum;
2145
2146
2147 struct sk_buff *last;
2148};
2149
2150#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2151
2152#define GRO_RECURSION_LIMIT 15
2153static inline int gro_recursion_inc_test(struct sk_buff *skb)
2154{
2155 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2156}
2157
2158typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
2159static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2160 struct sk_buff **head,
2161 struct sk_buff *skb)
2162{
2163 if (unlikely(gro_recursion_inc_test(skb))) {
2164 NAPI_GRO_CB(skb)->flush |= 1;
2165 return NULL;
2166 }
2167
2168 return cb(head, skb);
2169}
2170
2171typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
2172 struct sk_buff *);
2173static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
2174 struct sock *sk,
2175 struct sk_buff **head,
2176 struct sk_buff *skb)
2177{
2178 if (unlikely(gro_recursion_inc_test(skb))) {
2179 NAPI_GRO_CB(skb)->flush |= 1;
2180 return NULL;
2181 }
2182
2183 return cb(sk, head, skb);
2184}
2185
2186struct packet_type {
2187 __be16 type;
2188 struct net_device *dev;
2189 int (*func) (struct sk_buff *,
2190 struct net_device *,
2191 struct packet_type *,
2192 struct net_device *);
2193 bool (*id_match)(struct packet_type *ptype,
2194 struct sock *sk);
2195 void *af_packet_priv;
2196 struct list_head list;
2197};
2198
2199struct offload_callbacks {
2200 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2201 netdev_features_t features);
2202 struct sk_buff **(*gro_receive)(struct sk_buff **head,
2203 struct sk_buff *skb);
2204 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2205};
2206
2207struct packet_offload {
2208 __be16 type;
2209 u16 priority;
2210 struct offload_callbacks callbacks;
2211 struct list_head list;
2212};
2213
2214
2215struct pcpu_sw_netstats {
2216 u64 rx_packets;
2217 u64 rx_bytes;
2218 u64 tx_packets;
2219 u64 tx_bytes;
2220 struct u64_stats_sync syncp;
2221};
2222
2223#define __netdev_alloc_pcpu_stats(type, gfp) \
2224({ \
2225 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2226 if (pcpu_stats) { \
2227 int __cpu; \
2228 for_each_possible_cpu(__cpu) { \
2229 typeof(type) *stat; \
2230 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2231 u64_stats_init(&stat->syncp); \
2232 } \
2233 } \
2234 pcpu_stats; \
2235})
2236
2237#define netdev_alloc_pcpu_stats(type) \
2238 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2239
2240enum netdev_lag_tx_type {
2241 NETDEV_LAG_TX_TYPE_UNKNOWN,
2242 NETDEV_LAG_TX_TYPE_RANDOM,
2243 NETDEV_LAG_TX_TYPE_BROADCAST,
2244 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2245 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2246 NETDEV_LAG_TX_TYPE_HASH,
2247};
2248
2249struct netdev_lag_upper_info {
2250 enum netdev_lag_tx_type tx_type;
2251};
2252
2253struct netdev_lag_lower_state_info {
2254 u8 link_up : 1,
2255 tx_enabled : 1;
2256};
2257
2258#include <linux/notifier.h>
2259
2260
2261
2262
2263
2264#define NETDEV_UP 0x0001
2265#define NETDEV_DOWN 0x0002
2266#define NETDEV_REBOOT 0x0003
2267
2268
2269
2270#define NETDEV_CHANGE 0x0004
2271#define NETDEV_REGISTER 0x0005
2272#define NETDEV_UNREGISTER 0x0006
2273#define NETDEV_CHANGEMTU 0x0007
2274#define NETDEV_CHANGEADDR 0x0008
2275#define NETDEV_GOING_DOWN 0x0009
2276#define NETDEV_CHANGENAME 0x000A
2277#define NETDEV_FEAT_CHANGE 0x000B
2278#define NETDEV_BONDING_FAILOVER 0x000C
2279#define NETDEV_PRE_UP 0x000D
2280#define NETDEV_PRE_TYPE_CHANGE 0x000E
2281#define NETDEV_POST_TYPE_CHANGE 0x000F
2282#define NETDEV_POST_INIT 0x0010
2283#define NETDEV_UNREGISTER_FINAL 0x0011
2284#define NETDEV_RELEASE 0x0012
2285#define NETDEV_NOTIFY_PEERS 0x0013
2286#define NETDEV_JOIN 0x0014
2287#define NETDEV_CHANGEUPPER 0x0015
2288#define NETDEV_RESEND_IGMP 0x0016
2289#define NETDEV_PRECHANGEMTU 0x0017
2290#define NETDEV_CHANGEINFODATA 0x0018
2291#define NETDEV_BONDING_INFO 0x0019
2292#define NETDEV_PRECHANGEUPPER 0x001A
2293#define NETDEV_CHANGELOWERSTATE 0x001B
2294#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
2295#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
2296
2297int register_netdevice_notifier(struct notifier_block *nb);
2298int unregister_netdevice_notifier(struct notifier_block *nb);
2299
2300struct netdev_notifier_info {
2301 struct net_device *dev;
2302};
2303
2304struct netdev_notifier_change_info {
2305 struct netdev_notifier_info info;
2306 unsigned int flags_changed;
2307};
2308
2309struct netdev_notifier_changeupper_info {
2310 struct netdev_notifier_info info;
2311 struct net_device *upper_dev;
2312 bool master;
2313 bool linking;
2314 void *upper_info;
2315};
2316
2317struct netdev_notifier_changelowerstate_info {
2318 struct netdev_notifier_info info;
2319 void *lower_state_info;
2320};
2321
2322static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2323 struct net_device *dev)
2324{
2325 info->dev = dev;
2326}
2327
2328static inline struct net_device *
2329netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2330{
2331 return info->dev;
2332}
2333
2334int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2335
2336
2337extern rwlock_t dev_base_lock;
2338
2339#define for_each_netdev(net, d) \
2340 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2341#define for_each_netdev_reverse(net, d) \
2342 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2343#define for_each_netdev_rcu(net, d) \
2344 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2345#define for_each_netdev_safe(net, d, n) \
2346 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2347#define for_each_netdev_continue(net, d) \
2348 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2349#define for_each_netdev_continue_rcu(net, d) \
2350 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2351#define for_each_netdev_in_bond_rcu(bond, slave) \
2352 for_each_netdev_rcu(&init_net, slave) \
2353 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2354#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2355
2356static inline struct net_device *next_net_device(struct net_device *dev)
2357{
2358 struct list_head *lh;
2359 struct net *net;
2360
2361 net = dev_net(dev);
2362 lh = dev->dev_list.next;
2363 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2364}
2365
2366static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2367{
2368 struct list_head *lh;
2369 struct net *net;
2370
2371 net = dev_net(dev);
2372 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2373 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2374}
2375
2376static inline struct net_device *first_net_device(struct net *net)
2377{
2378 return list_empty(&net->dev_base_head) ? NULL :
2379 net_device_entry(net->dev_base_head.next);
2380}
2381
2382static inline struct net_device *first_net_device_rcu(struct net *net)
2383{
2384 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2385
2386 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2387}
2388
2389int netdev_boot_setup_check(struct net_device *dev);
2390unsigned long netdev_boot_base(const char *prefix, int unit);
2391struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2392 const char *hwaddr);
2393struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2394struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2395void dev_add_pack(struct packet_type *pt);
2396void dev_remove_pack(struct packet_type *pt);
2397void __dev_remove_pack(struct packet_type *pt);
2398void dev_add_offload(struct packet_offload *po);
2399void dev_remove_offload(struct packet_offload *po);
2400
2401int dev_get_iflink(const struct net_device *dev);
2402int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2403struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2404 unsigned short mask);
2405struct net_device *dev_get_by_name(struct net *net, const char *name);
2406struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2407struct net_device *__dev_get_by_name(struct net *net, const char *name);
2408int dev_alloc_name(struct net_device *dev, const char *name);
2409int dev_open(struct net_device *dev);
2410int dev_close(struct net_device *dev);
2411int dev_close_many(struct list_head *head, bool unlink);
2412void dev_disable_lro(struct net_device *dev);
2413int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2414int dev_queue_xmit(struct sk_buff *skb);
2415int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2416int register_netdevice(struct net_device *dev);
2417void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2418void unregister_netdevice_many(struct list_head *head);
2419static inline void unregister_netdevice(struct net_device *dev)
2420{
2421 unregister_netdevice_queue(dev, NULL);
2422}
2423
2424int netdev_refcnt_read(const struct net_device *dev);
2425void free_netdev(struct net_device *dev);
2426void netdev_freemem(struct net_device *dev);
2427void synchronize_net(void);
2428int init_dummy_netdev(struct net_device *dev);
2429
2430DECLARE_PER_CPU(int, xmit_recursion);
2431#define XMIT_RECURSION_LIMIT 10
2432
2433static inline int dev_recursion_level(void)
2434{
2435 return this_cpu_read(xmit_recursion);
2436}
2437
2438struct net_device *dev_get_by_index(struct net *net, int ifindex);
2439struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2440struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2441int netdev_get_name(struct net *net, char *name, int ifindex);
2442int dev_restart(struct net_device *dev);
2443int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
2444
2445static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2446{
2447 return NAPI_GRO_CB(skb)->data_offset;
2448}
2449
2450static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2451{
2452 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2453}
2454
2455static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2456{
2457 NAPI_GRO_CB(skb)->data_offset += len;
2458}
2459
2460static inline void *skb_gro_header_fast(struct sk_buff *skb,
2461 unsigned int offset)
2462{
2463 return NAPI_GRO_CB(skb)->frag0 + offset;
2464}
2465
2466static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2467{
2468 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2469}
2470
2471static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2472{
2473 NAPI_GRO_CB(skb)->frag0 = NULL;
2474 NAPI_GRO_CB(skb)->frag0_len = 0;
2475}
2476
2477static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2478 unsigned int offset)
2479{
2480 if (!pskb_may_pull(skb, hlen))
2481 return NULL;
2482
2483 skb_gro_frag0_invalidate(skb);
2484 return skb->data + offset;
2485}
2486
2487static inline void *skb_gro_network_header(struct sk_buff *skb)
2488{
2489 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2490 skb_network_offset(skb);
2491}
2492
2493static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2494 const void *start, unsigned int len)
2495{
2496 if (NAPI_GRO_CB(skb)->csum_valid)
2497 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2498 csum_partial(start, len, 0));
2499}
2500
2501
2502
2503
2504
2505
2506__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2507
2508static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2509{
2510 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2511}
2512
2513static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2514 bool zero_okay,
2515 __sum16 check)
2516{
2517 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2518 skb_checksum_start_offset(skb) <
2519 skb_gro_offset(skb)) &&
2520 !skb_at_gro_remcsum_start(skb) &&
2521 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2522 (!zero_okay || check));
2523}
2524
2525static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2526 __wsum psum)
2527{
2528 if (NAPI_GRO_CB(skb)->csum_valid &&
2529 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2530 return 0;
2531
2532 NAPI_GRO_CB(skb)->csum = psum;
2533
2534 return __skb_gro_checksum_complete(skb);
2535}
2536
2537static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2538{
2539 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2540
2541 NAPI_GRO_CB(skb)->csum_cnt--;
2542 } else {
2543
2544
2545
2546
2547 __skb_incr_checksum_unnecessary(skb);
2548 }
2549}
2550
2551#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2552 compute_pseudo) \
2553({ \
2554 __sum16 __ret = 0; \
2555 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2556 __ret = __skb_gro_checksum_validate_complete(skb, \
2557 compute_pseudo(skb, proto)); \
2558 if (__ret) \
2559 __skb_mark_checksum_bad(skb); \
2560 else \
2561 skb_gro_incr_csum_unnecessary(skb); \
2562 __ret; \
2563})
2564
2565#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2566 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2567
2568#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2569 compute_pseudo) \
2570 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2571
2572#define skb_gro_checksum_simple_validate(skb) \
2573 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2574
2575static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2576{
2577 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2578 !NAPI_GRO_CB(skb)->csum_valid);
2579}
2580
2581static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2582 __sum16 check, __wsum pseudo)
2583{
2584 NAPI_GRO_CB(skb)->csum = ~pseudo;
2585 NAPI_GRO_CB(skb)->csum_valid = 1;
2586}
2587
2588#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2589do { \
2590 if (__skb_gro_checksum_convert_check(skb)) \
2591 __skb_gro_checksum_convert(skb, check, \
2592 compute_pseudo(skb, proto)); \
2593} while (0)
2594
2595struct gro_remcsum {
2596 int offset;
2597 __wsum delta;
2598};
2599
2600static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2601{
2602 grc->offset = 0;
2603 grc->delta = 0;
2604}
2605
2606static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2607 unsigned int off, size_t hdrlen,
2608 int start, int offset,
2609 struct gro_remcsum *grc,
2610 bool nopartial)
2611{
2612 __wsum delta;
2613 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2614
2615 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2616
2617 if (!nopartial) {
2618 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2619 return ptr;
2620 }
2621
2622 ptr = skb_gro_header_fast(skb, off);
2623 if (skb_gro_header_hard(skb, off + plen)) {
2624 ptr = skb_gro_header_slow(skb, off + plen, off);
2625 if (!ptr)
2626 return NULL;
2627 }
2628
2629 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2630 start, offset);
2631
2632
2633 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2634
2635 grc->offset = off + hdrlen + offset;
2636 grc->delta = delta;
2637
2638 return ptr;
2639}
2640
2641static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2642 struct gro_remcsum *grc)
2643{
2644 void *ptr;
2645 size_t plen = grc->offset + sizeof(u16);
2646
2647 if (!grc->delta)
2648 return;
2649
2650 ptr = skb_gro_header_fast(skb, grc->offset);
2651 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2652 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2653 if (!ptr)
2654 return;
2655 }
2656
2657 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2658}
2659
2660#ifdef CONFIG_XFRM_OFFLOAD
2661static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
2662{
2663 if (PTR_ERR(pp) != -EINPROGRESS)
2664 NAPI_GRO_CB(skb)->flush |= flush;
2665}
2666#else
2667static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
2668{
2669 NAPI_GRO_CB(skb)->flush |= flush;
2670}
2671#endif
2672
2673static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2674 unsigned short type,
2675 const void *daddr, const void *saddr,
2676 unsigned int len)
2677{
2678 if (!dev->header_ops || !dev->header_ops->create)
2679 return 0;
2680
2681 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2682}
2683
2684static inline int dev_parse_header(const struct sk_buff *skb,
2685 unsigned char *haddr)
2686{
2687 const struct net_device *dev = skb->dev;
2688
2689 if (!dev->header_ops || !dev->header_ops->parse)
2690 return 0;
2691 return dev->header_ops->parse(skb, haddr);
2692}
2693
2694
2695static inline bool dev_validate_header(const struct net_device *dev,
2696 char *ll_header, int len)
2697{
2698 if (likely(len >= dev->hard_header_len))
2699 return true;
2700 if (len < dev->min_header_len)
2701 return false;
2702
2703 if (capable(CAP_SYS_RAWIO)) {
2704 memset(ll_header + len, 0, dev->hard_header_len - len);
2705 return true;
2706 }
2707
2708 if (dev->header_ops && dev->header_ops->validate)
2709 return dev->header_ops->validate(ll_header, len);
2710
2711 return false;
2712}
2713
2714typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2715int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2716static inline int unregister_gifconf(unsigned int family)
2717{
2718 return register_gifconf(family, NULL);
2719}
2720
2721#ifdef CONFIG_NET_FLOW_LIMIT
2722#define FLOW_LIMIT_HISTORY (1 << 7)
2723struct sd_flow_limit {
2724 u64 count;
2725 unsigned int num_buckets;
2726 unsigned int history_head;
2727 u16 history[FLOW_LIMIT_HISTORY];
2728 u8 buckets[];
2729};
2730
2731extern int netdev_flow_limit_table_len;
2732#endif
2733
2734
2735
2736
2737struct softnet_data {
2738 struct list_head poll_list;
2739 struct sk_buff_head process_queue;
2740
2741
2742 unsigned int processed;
2743 unsigned int time_squeeze;
2744 unsigned int received_rps;
2745#ifdef CONFIG_RPS
2746 struct softnet_data *rps_ipi_list;
2747#endif
2748#ifdef CONFIG_NET_FLOW_LIMIT
2749 struct sd_flow_limit __rcu *flow_limit;
2750#endif
2751 struct Qdisc *output_queue;
2752 struct Qdisc **output_queue_tailp;
2753 struct sk_buff *completion_queue;
2754
2755#ifdef CONFIG_RPS
2756
2757
2758
2759 unsigned int input_queue_head ____cacheline_aligned_in_smp;
2760
2761
2762 struct call_single_data csd ____cacheline_aligned_in_smp;
2763 struct softnet_data *rps_ipi_next;
2764 unsigned int cpu;
2765 unsigned int input_queue_tail;
2766#endif
2767 unsigned int dropped;
2768 struct sk_buff_head input_pkt_queue;
2769 struct napi_struct backlog;
2770
2771};
2772
2773static inline void input_queue_head_incr(struct softnet_data *sd)
2774{
2775#ifdef CONFIG_RPS
2776 sd->input_queue_head++;
2777#endif
2778}
2779
2780static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2781 unsigned int *qtail)
2782{
2783#ifdef CONFIG_RPS
2784 *qtail = ++sd->input_queue_tail;
2785#endif
2786}
2787
2788DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2789
2790void __netif_schedule(struct Qdisc *q);
2791void netif_schedule_queue(struct netdev_queue *txq);
2792
2793static inline void netif_tx_schedule_all(struct net_device *dev)
2794{
2795 unsigned int i;
2796
2797 for (i = 0; i < dev->num_tx_queues; i++)
2798 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2799}
2800
2801static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2802{
2803 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2804}
2805
2806
2807
2808
2809
2810
2811
2812static inline void netif_start_queue(struct net_device *dev)
2813{
2814 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
2815}
2816
2817static inline void netif_tx_start_all_queues(struct net_device *dev)
2818{
2819 unsigned int i;
2820
2821 for (i = 0; i < dev->num_tx_queues; i++) {
2822 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2823 netif_tx_start_queue(txq);
2824 }
2825}
2826
2827void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2828
2829
2830
2831
2832
2833
2834
2835
2836static inline void netif_wake_queue(struct net_device *dev)
2837{
2838 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2839}
2840
2841static inline void netif_tx_wake_all_queues(struct net_device *dev)
2842{
2843 unsigned int i;
2844
2845 for (i = 0; i < dev->num_tx_queues; i++) {
2846 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2847 netif_tx_wake_queue(txq);
2848 }
2849}
2850
2851static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2852{
2853 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2854}
2855
2856
2857
2858
2859
2860
2861
2862
2863static inline void netif_stop_queue(struct net_device *dev)
2864{
2865 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2866}
2867
2868void netif_tx_stop_all_queues(struct net_device *dev);
2869
2870static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2871{
2872 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2873}
2874
2875
2876
2877
2878
2879
2880
2881static inline bool netif_queue_stopped(const struct net_device *dev)
2882{
2883 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2884}
2885
2886static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2887{
2888 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2889}
2890
2891static inline bool
2892netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2893{
2894 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2895}
2896
2897static inline bool
2898netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2899{
2900 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2901}
2902
2903
2904
2905
2906
2907
2908
2909
2910static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2911{
2912#ifdef CONFIG_BQL
2913 prefetchw(&dev_queue->dql.num_queued);
2914#endif
2915}
2916
2917
2918
2919
2920
2921
2922
2923
2924static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2925{
2926#ifdef CONFIG_BQL
2927 prefetchw(&dev_queue->dql.limit);
2928#endif
2929}
2930
2931static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2932 unsigned int bytes)
2933{
2934#ifdef CONFIG_BQL
2935 dql_queued(&dev_queue->dql, bytes);
2936
2937 if (likely(dql_avail(&dev_queue->dql) >= 0))
2938 return;
2939
2940 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2941
2942
2943
2944
2945
2946
2947 smp_mb();
2948
2949
2950 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2951 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2952#endif
2953}
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2965{
2966 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2967}
2968
2969static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2970 unsigned int pkts, unsigned int bytes)
2971{
2972#ifdef CONFIG_BQL
2973 if (unlikely(!bytes))
2974 return;
2975
2976 dql_completed(&dev_queue->dql, bytes);
2977
2978
2979
2980
2981
2982
2983 smp_mb();
2984
2985 if (dql_avail(&dev_queue->dql) < 0)
2986 return;
2987
2988 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2989 netif_schedule_queue(dev_queue);
2990#endif
2991}
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003static inline void netdev_completed_queue(struct net_device *dev,
3004 unsigned int pkts, unsigned int bytes)
3005{
3006 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3007}
3008
3009static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3010{
3011#ifdef CONFIG_BQL
3012 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3013 dql_reset(&q->dql);
3014#endif
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024static inline void netdev_reset_queue(struct net_device *dev_queue)
3025{
3026 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3027}
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3038{
3039 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3040 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3041 dev->name, queue_index,
3042 dev->real_num_tx_queues);
3043 return 0;
3044 }
3045
3046 return queue_index;
3047}
3048
3049
3050
3051
3052
3053
3054
3055static inline bool netif_running(const struct net_device *dev)
3056{
3057 return test_bit(__LINK_STATE_START, &dev->state);
3058}
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3075{
3076 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3077
3078 netif_tx_start_queue(txq);
3079}
3080
3081
3082
3083
3084
3085
3086
3087
3088static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3089{
3090 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3091 netif_tx_stop_queue(txq);
3092}
3093
3094
3095
3096
3097
3098
3099
3100
3101static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3102 u16 queue_index)
3103{
3104 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3105
3106 return netif_tx_queue_stopped(txq);
3107}
3108
3109static inline bool netif_subqueue_stopped(const struct net_device *dev,
3110 struct sk_buff *skb)
3111{
3112 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3113}
3114
3115
3116
3117
3118
3119
3120
3121
3122static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3123{
3124 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3125
3126 netif_tx_wake_queue(txq);
3127}
3128
3129#ifdef CONFIG_XPS
3130int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3131 u16 index);
3132#else
3133static inline int netif_set_xps_queue(struct net_device *dev,
3134 const struct cpumask *mask,
3135 u16 index)
3136{
3137 return 0;
3138}
3139#endif
3140
3141u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3142 unsigned int num_tx_queues);
3143
3144
3145
3146
3147
3148static inline u16 skb_tx_hash(const struct net_device *dev,
3149 struct sk_buff *skb)
3150{
3151 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
3152}
3153
3154
3155
3156
3157
3158
3159
3160static inline bool netif_is_multiqueue(const struct net_device *dev)
3161{
3162 return dev->num_tx_queues > 1;
3163}
3164
3165int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3166
3167#ifdef CONFIG_SYSFS
3168int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3169#else
3170static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3171 unsigned int rxq)
3172{
3173 return 0;
3174}
3175#endif
3176
3177#ifdef CONFIG_SYSFS
3178static inline unsigned int get_netdev_rx_queue_index(
3179 struct netdev_rx_queue *queue)
3180{
3181 struct net_device *dev = queue->dev;
3182 int index = queue - dev->_rx;
3183
3184 BUG_ON(index >= dev->num_rx_queues);
3185 return index;
3186}
3187#endif
3188
3189#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3190int netif_get_num_default_rss_queues(void);
3191
3192enum skb_free_reason {
3193 SKB_REASON_CONSUMED,
3194 SKB_REASON_DROPPED,
3195};
3196
3197void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3198void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3220{
3221 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3222}
3223
3224static inline void dev_consume_skb_irq(struct sk_buff *skb)
3225{
3226 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3227}
3228
3229static inline void dev_kfree_skb_any(struct sk_buff *skb)
3230{
3231 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3232}
3233
3234static inline void dev_consume_skb_any(struct sk_buff *skb)
3235{
3236 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3237}
3238
3239int netif_rx(struct sk_buff *skb);
3240int netif_rx_ni(struct sk_buff *skb);
3241int netif_receive_skb(struct sk_buff *skb);
3242gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3243void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3244struct sk_buff *napi_get_frags(struct napi_struct *napi);
3245gro_result_t napi_gro_frags(struct napi_struct *napi);
3246struct packet_offload *gro_find_receive_by_type(__be16 type);
3247struct packet_offload *gro_find_complete_by_type(__be16 type);
3248
3249static inline void napi_free_frags(struct napi_struct *napi)
3250{
3251 kfree_skb(napi->skb);
3252 napi->skb = NULL;
3253}
3254
3255bool netdev_is_rx_handler_busy(struct net_device *dev);
3256int netdev_rx_handler_register(struct net_device *dev,
3257 rx_handler_func_t *rx_handler,
3258 void *rx_handler_data);
3259void netdev_rx_handler_unregister(struct net_device *dev);
3260
3261bool dev_valid_name(const char *name);
3262int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
3263int dev_ethtool(struct net *net, struct ifreq *);
3264unsigned int dev_get_flags(const struct net_device *);
3265int __dev_change_flags(struct net_device *, unsigned int flags);
3266int dev_change_flags(struct net_device *, unsigned int);
3267void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3268 unsigned int gchanges);
3269int dev_change_name(struct net_device *, const char *);
3270int dev_set_alias(struct net_device *, const char *, size_t);
3271int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3272int dev_set_mtu(struct net_device *, int);
3273void dev_set_group(struct net_device *, int);
3274int dev_set_mac_address(struct net_device *, struct sockaddr *);
3275int dev_change_carrier(struct net_device *, bool new_carrier);
3276int dev_get_phys_port_id(struct net_device *dev,
3277 struct netdev_phys_item_id *ppid);
3278int dev_get_phys_port_name(struct net_device *dev,
3279 char *name, size_t len);
3280int dev_change_proto_down(struct net_device *dev, bool proto_down);
3281int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags);
3282struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3283struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3284 struct netdev_queue *txq, int *ret);
3285int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3286int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3287bool is_skb_forwardable(const struct net_device *dev,
3288 const struct sk_buff *skb);
3289
3290static __always_inline int ____dev_forward_skb(struct net_device *dev,
3291 struct sk_buff *skb)
3292{
3293 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3294 unlikely(!is_skb_forwardable(dev, skb))) {
3295 atomic_long_inc(&dev->rx_dropped);
3296 kfree_skb(skb);
3297 return NET_RX_DROP;
3298 }
3299
3300 skb_scrub_packet(skb, true);
3301 skb->priority = 0;
3302 return 0;
3303}
3304
3305void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3306
3307extern int netdev_budget;
3308
3309
3310void netdev_run_todo(void);
3311
3312
3313
3314
3315
3316
3317
3318static inline void dev_put(struct net_device *dev)
3319{
3320 this_cpu_dec(*dev->pcpu_refcnt);
3321}
3322
3323
3324
3325
3326
3327
3328
3329static inline void dev_hold(struct net_device *dev)
3330{
3331 this_cpu_inc(*dev->pcpu_refcnt);
3332}
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343void linkwatch_init_dev(struct net_device *dev);
3344void linkwatch_fire_event(struct net_device *dev);
3345void linkwatch_forget_dev(struct net_device *dev);
3346
3347
3348
3349
3350
3351
3352
3353static inline bool netif_carrier_ok(const struct net_device *dev)
3354{
3355 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3356}
3357
3358unsigned long dev_trans_start(struct net_device *dev);
3359
3360void __netdev_watchdog_up(struct net_device *dev);
3361
3362void netif_carrier_on(struct net_device *dev);
3363
3364void netif_carrier_off(struct net_device *dev);
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378static inline void netif_dormant_on(struct net_device *dev)
3379{
3380 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3381 linkwatch_fire_event(dev);
3382}
3383
3384
3385
3386
3387
3388
3389
3390static inline void netif_dormant_off(struct net_device *dev)
3391{
3392 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3393 linkwatch_fire_event(dev);
3394}
3395
3396
3397
3398
3399
3400
3401
3402static inline bool netif_dormant(const struct net_device *dev)
3403{
3404 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3405}
3406
3407
3408
3409
3410
3411
3412
3413
3414static inline bool netif_oper_up(const struct net_device *dev)
3415{
3416 return (dev->operstate == IF_OPER_UP ||
3417 dev->operstate == IF_OPER_UNKNOWN );
3418}
3419
3420
3421
3422
3423
3424
3425
3426static inline bool netif_device_present(struct net_device *dev)
3427{
3428 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3429}
3430
3431void netif_device_detach(struct net_device *dev);
3432
3433void netif_device_attach(struct net_device *dev);
3434
3435
3436
3437
3438
3439enum {
3440 NETIF_MSG_DRV = 0x0001,
3441 NETIF_MSG_PROBE = 0x0002,
3442 NETIF_MSG_LINK = 0x0004,
3443 NETIF_MSG_TIMER = 0x0008,
3444 NETIF_MSG_IFDOWN = 0x0010,
3445 NETIF_MSG_IFUP = 0x0020,
3446 NETIF_MSG_RX_ERR = 0x0040,
3447 NETIF_MSG_TX_ERR = 0x0080,
3448 NETIF_MSG_TX_QUEUED = 0x0100,
3449 NETIF_MSG_INTR = 0x0200,
3450 NETIF_MSG_TX_DONE = 0x0400,
3451 NETIF_MSG_RX_STATUS = 0x0800,
3452 NETIF_MSG_PKTDATA = 0x1000,
3453 NETIF_MSG_HW = 0x2000,
3454 NETIF_MSG_WOL = 0x4000,
3455};
3456
3457#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3458#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3459#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3460#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3461#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3462#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3463#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3464#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3465#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3466#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3467#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3468#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3469#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3470#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3471#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3472
3473static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3474{
3475
3476 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3477 return default_msg_enable_bits;
3478 if (debug_value == 0)
3479 return 0;
3480
3481 return (1 << debug_value) - 1;
3482}
3483
3484static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3485{
3486 spin_lock(&txq->_xmit_lock);
3487 txq->xmit_lock_owner = cpu;
3488}
3489
3490static inline bool __netif_tx_acquire(struct netdev_queue *txq)
3491{
3492 __acquire(&txq->_xmit_lock);
3493 return true;
3494}
3495
3496static inline void __netif_tx_release(struct netdev_queue *txq)
3497{
3498 __release(&txq->_xmit_lock);
3499}
3500
3501static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3502{
3503 spin_lock_bh(&txq->_xmit_lock);
3504 txq->xmit_lock_owner = smp_processor_id();
3505}
3506
3507static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3508{
3509 bool ok = spin_trylock(&txq->_xmit_lock);
3510 if (likely(ok))
3511 txq->xmit_lock_owner = smp_processor_id();
3512 return ok;
3513}
3514
3515static inline void __netif_tx_unlock(struct netdev_queue *txq)
3516{
3517 txq->xmit_lock_owner = -1;
3518 spin_unlock(&txq->_xmit_lock);
3519}
3520
3521static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3522{
3523 txq->xmit_lock_owner = -1;
3524 spin_unlock_bh(&txq->_xmit_lock);
3525}
3526
3527static inline void txq_trans_update(struct netdev_queue *txq)
3528{
3529 if (txq->xmit_lock_owner != -1)
3530 txq->trans_start = jiffies;
3531}
3532
3533
3534static inline void netif_trans_update(struct net_device *dev)
3535{
3536 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3537
3538 if (txq->trans_start != jiffies)
3539 txq->trans_start = jiffies;
3540}
3541
3542
3543
3544
3545
3546
3547
3548static inline void netif_tx_lock(struct net_device *dev)
3549{
3550 unsigned int i;
3551 int cpu;
3552
3553 spin_lock(&dev->tx_global_lock);
3554 cpu = smp_processor_id();
3555 for (i = 0; i < dev->num_tx_queues; i++) {
3556 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3557
3558
3559
3560
3561
3562
3563
3564 __netif_tx_lock(txq, cpu);
3565 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3566 __netif_tx_unlock(txq);
3567 }
3568}
3569
3570static inline void netif_tx_lock_bh(struct net_device *dev)
3571{
3572 local_bh_disable();
3573 netif_tx_lock(dev);
3574}
3575
3576static inline void netif_tx_unlock(struct net_device *dev)
3577{
3578 unsigned int i;
3579
3580 for (i = 0; i < dev->num_tx_queues; i++) {
3581 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3582
3583
3584
3585
3586
3587 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3588 netif_schedule_queue(txq);
3589 }
3590 spin_unlock(&dev->tx_global_lock);
3591}
3592
3593static inline void netif_tx_unlock_bh(struct net_device *dev)
3594{
3595 netif_tx_unlock(dev);
3596 local_bh_enable();
3597}
3598
3599#define HARD_TX_LOCK(dev, txq, cpu) { \
3600 if ((dev->features & NETIF_F_LLTX) == 0) { \
3601 __netif_tx_lock(txq, cpu); \
3602 } else { \
3603 __netif_tx_acquire(txq); \
3604 } \
3605}
3606
3607#define HARD_TX_TRYLOCK(dev, txq) \
3608 (((dev->features & NETIF_F_LLTX) == 0) ? \
3609 __netif_tx_trylock(txq) : \
3610 __netif_tx_acquire(txq))
3611
3612#define HARD_TX_UNLOCK(dev, txq) { \
3613 if ((dev->features & NETIF_F_LLTX) == 0) { \
3614 __netif_tx_unlock(txq); \
3615 } else { \
3616 __netif_tx_release(txq); \
3617 } \
3618}
3619
3620static inline void netif_tx_disable(struct net_device *dev)
3621{
3622 unsigned int i;
3623 int cpu;
3624
3625 local_bh_disable();
3626 cpu = smp_processor_id();
3627 for (i = 0; i < dev->num_tx_queues; i++) {
3628 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3629
3630 __netif_tx_lock(txq, cpu);
3631 netif_tx_stop_queue(txq);
3632 __netif_tx_unlock(txq);
3633 }
3634 local_bh_enable();
3635}
3636
3637static inline void netif_addr_lock(struct net_device *dev)
3638{
3639 spin_lock(&dev->addr_list_lock);
3640}
3641
3642static inline void netif_addr_lock_nested(struct net_device *dev)
3643{
3644 int subclass = SINGLE_DEPTH_NESTING;
3645
3646 if (dev->netdev_ops->ndo_get_lock_subclass)
3647 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3648
3649 spin_lock_nested(&dev->addr_list_lock, subclass);
3650}
3651
3652static inline void netif_addr_lock_bh(struct net_device *dev)
3653{
3654 spin_lock_bh(&dev->addr_list_lock);
3655}
3656
3657static inline void netif_addr_unlock(struct net_device *dev)
3658{
3659 spin_unlock(&dev->addr_list_lock);
3660}
3661
3662static inline void netif_addr_unlock_bh(struct net_device *dev)
3663{
3664 spin_unlock_bh(&dev->addr_list_lock);
3665}
3666
3667
3668
3669
3670
3671#define for_each_dev_addr(dev, ha) \
3672 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
3673
3674
3675
3676void ether_setup(struct net_device *dev);
3677
3678
3679struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3680 unsigned char name_assign_type,
3681 void (*setup)(struct net_device *),
3682 unsigned int txqs, unsigned int rxqs);
3683#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3684 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3685
3686#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
3687 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
3688 count)
3689
3690int register_netdev(struct net_device *dev);
3691void unregister_netdev(struct net_device *dev);
3692
3693
3694int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3695 struct netdev_hw_addr_list *from_list, int addr_len);
3696void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3697 struct netdev_hw_addr_list *from_list, int addr_len);
3698int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3699 struct net_device *dev,
3700 int (*sync)(struct net_device *, const unsigned char *),
3701 int (*unsync)(struct net_device *,
3702 const unsigned char *));
3703void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3704 struct net_device *dev,
3705 int (*unsync)(struct net_device *,
3706 const unsigned char *));
3707void __hw_addr_init(struct netdev_hw_addr_list *list);
3708
3709
3710int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3711 unsigned char addr_type);
3712int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3713 unsigned char addr_type);
3714void dev_addr_flush(struct net_device *dev);
3715int dev_addr_init(struct net_device *dev);
3716
3717
3718int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3719int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3720int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3721int dev_uc_sync(struct net_device *to, struct net_device *from);
3722int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3723void dev_uc_unsync(struct net_device *to, struct net_device *from);
3724void dev_uc_flush(struct net_device *dev);
3725void dev_uc_init(struct net_device *dev);
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736static inline int __dev_uc_sync(struct net_device *dev,
3737 int (*sync)(struct net_device *,
3738 const unsigned char *),
3739 int (*unsync)(struct net_device *,
3740 const unsigned char *))
3741{
3742 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3743}
3744
3745
3746
3747
3748
3749
3750
3751
3752static inline void __dev_uc_unsync(struct net_device *dev,
3753 int (*unsync)(struct net_device *,
3754 const unsigned char *))
3755{
3756 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3757}
3758
3759
3760int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3761int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3762int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3763int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3764int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3765int dev_mc_sync(struct net_device *to, struct net_device *from);
3766int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3767void dev_mc_unsync(struct net_device *to, struct net_device *from);
3768void dev_mc_flush(struct net_device *dev);
3769void dev_mc_init(struct net_device *dev);
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780static inline int __dev_mc_sync(struct net_device *dev,
3781 int (*sync)(struct net_device *,
3782 const unsigned char *),
3783 int (*unsync)(struct net_device *,
3784 const unsigned char *))
3785{
3786 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3787}
3788
3789
3790
3791
3792
3793
3794
3795
3796static inline void __dev_mc_unsync(struct net_device *dev,
3797 int (*unsync)(struct net_device *,
3798 const unsigned char *))
3799{
3800 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3801}
3802
3803
3804void dev_set_rx_mode(struct net_device *dev);
3805void __dev_set_rx_mode(struct net_device *dev);
3806int dev_set_promiscuity(struct net_device *dev, int inc);
3807int dev_set_allmulti(struct net_device *dev, int inc);
3808void netdev_state_change(struct net_device *dev);
3809void netdev_notify_peers(struct net_device *dev);
3810void netdev_features_change(struct net_device *dev);
3811
3812void dev_load(struct net *net, const char *name);
3813struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3814 struct rtnl_link_stats64 *storage);
3815void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3816 const struct net_device_stats *netdev_stats);
3817
3818extern int netdev_max_backlog;
3819extern int netdev_tstamp_prequeue;
3820extern int weight_p;
3821extern int dev_weight_rx_bias;
3822extern int dev_weight_tx_bias;
3823extern int dev_rx_weight;
3824extern int dev_tx_weight;
3825
3826bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3827struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3828 struct list_head **iter);
3829struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3830 struct list_head **iter);
3831
3832
3833#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3834 for (iter = &(dev)->adj_list.upper, \
3835 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3836 updev; \
3837 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3838
3839int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
3840 int (*fn)(struct net_device *upper_dev,
3841 void *data),
3842 void *data);
3843
3844bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
3845 struct net_device *upper_dev);
3846
3847void *netdev_lower_get_next_private(struct net_device *dev,
3848 struct list_head **iter);
3849void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3850 struct list_head **iter);
3851
3852#define netdev_for_each_lower_private(dev, priv, iter) \
3853 for (iter = (dev)->adj_list.lower.next, \
3854 priv = netdev_lower_get_next_private(dev, &(iter)); \
3855 priv; \
3856 priv = netdev_lower_get_next_private(dev, &(iter)))
3857
3858#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3859 for (iter = &(dev)->adj_list.lower, \
3860 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3861 priv; \
3862 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3863
3864void *netdev_lower_get_next(struct net_device *dev,
3865 struct list_head **iter);
3866
3867#define netdev_for_each_lower_dev(dev, ldev, iter) \
3868 for (iter = (dev)->adj_list.lower.next, \
3869 ldev = netdev_lower_get_next(dev, &(iter)); \
3870 ldev; \
3871 ldev = netdev_lower_get_next(dev, &(iter)))
3872
3873struct net_device *netdev_all_lower_get_next(struct net_device *dev,
3874 struct list_head **iter);
3875struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3876 struct list_head **iter);
3877
3878int netdev_walk_all_lower_dev(struct net_device *dev,
3879 int (*fn)(struct net_device *lower_dev,
3880 void *data),
3881 void *data);
3882int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
3883 int (*fn)(struct net_device *lower_dev,
3884 void *data),
3885 void *data);
3886
3887void *netdev_adjacent_get_private(struct list_head *adj_list);
3888void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3889struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3890struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3891int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3892int netdev_master_upper_dev_link(struct net_device *dev,
3893 struct net_device *upper_dev,
3894 void *upper_priv, void *upper_info);
3895void netdev_upper_dev_unlink(struct net_device *dev,
3896 struct net_device *upper_dev);
3897void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3898void *netdev_lower_dev_get_private(struct net_device *dev,
3899 struct net_device *lower_dev);
3900void netdev_lower_state_changed(struct net_device *lower_dev,
3901 void *lower_state_info);
3902
3903
3904#define NETDEV_RSS_KEY_LEN 52
3905extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
3906void netdev_rss_key_fill(void *buffer, size_t len);
3907
3908int dev_get_nest_level(struct net_device *dev);
3909int skb_checksum_help(struct sk_buff *skb);
3910struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3911 netdev_features_t features, bool tx_path);
3912struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3913 netdev_features_t features);
3914
3915struct netdev_bonding_info {
3916 ifslave slave;
3917 ifbond master;
3918};
3919
3920struct netdev_notifier_bonding_info {
3921 struct netdev_notifier_info info;
3922 struct netdev_bonding_info bonding_info;
3923};
3924
3925void netdev_bonding_info_change(struct net_device *dev,
3926 struct netdev_bonding_info *bonding_info);
3927
3928static inline
3929struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3930{
3931 return __skb_gso_segment(skb, features, true);
3932}
3933__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3934
3935static inline bool can_checksum_protocol(netdev_features_t features,
3936 __be16 protocol)
3937{
3938 if (protocol == htons(ETH_P_FCOE))
3939 return !!(features & NETIF_F_FCOE_CRC);
3940
3941
3942
3943 if (features & NETIF_F_HW_CSUM) {
3944
3945 return true;
3946 }
3947
3948 switch (protocol) {
3949 case htons(ETH_P_IP):
3950 return !!(features & NETIF_F_IP_CSUM);
3951 case htons(ETH_P_IPV6):
3952 return !!(features & NETIF_F_IPV6_CSUM);
3953 default:
3954 return false;
3955 }
3956}
3957
3958#ifdef CONFIG_BUG
3959void netdev_rx_csum_fault(struct net_device *dev);
3960#else
3961static inline void netdev_rx_csum_fault(struct net_device *dev)
3962{
3963}
3964#endif
3965
3966void net_enable_timestamp(void);
3967void net_disable_timestamp(void);
3968
3969#ifdef CONFIG_PROC_FS
3970int __init dev_proc_init(void);
3971#else
3972#define dev_proc_init() 0
3973#endif
3974
3975static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3976 struct sk_buff *skb, struct net_device *dev,
3977 bool more)
3978{
3979 skb->xmit_more = more ? 1 : 0;
3980 return ops->ndo_start_xmit(skb, dev);
3981}
3982
3983static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
3984 struct netdev_queue *txq, bool more)
3985{
3986 const struct net_device_ops *ops = dev->netdev_ops;
3987 int rc;
3988
3989 rc = __netdev_start_xmit(ops, skb, dev, more);
3990 if (rc == NETDEV_TX_OK)
3991 txq_trans_update(txq);
3992
3993 return rc;
3994}
3995
3996int netdev_class_create_file_ns(struct class_attribute *class_attr,
3997 const void *ns);
3998void netdev_class_remove_file_ns(struct class_attribute *class_attr,
3999 const void *ns);
4000
4001static inline int netdev_class_create_file(struct class_attribute *class_attr)
4002{
4003 return netdev_class_create_file_ns(class_attr, NULL);
4004}
4005
4006static inline void netdev_class_remove_file(struct class_attribute *class_attr)
4007{
4008 netdev_class_remove_file_ns(class_attr, NULL);
4009}
4010
4011extern struct kobj_ns_type_operations net_ns_type_operations;
4012
4013const char *netdev_drivername(const struct net_device *dev);
4014
4015void linkwatch_run_queue(void);
4016
4017static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4018 netdev_features_t f2)
4019{
4020 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4021 if (f1 & NETIF_F_HW_CSUM)
4022 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4023 else
4024 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4025 }
4026
4027 return f1 & f2;
4028}
4029
4030static inline netdev_features_t netdev_get_wanted_features(
4031 struct net_device *dev)
4032{
4033 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4034}
4035netdev_features_t netdev_increment_features(netdev_features_t all,
4036 netdev_features_t one, netdev_features_t mask);
4037
4038
4039
4040
4041
4042static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4043 netdev_features_t mask)
4044{
4045 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4046}
4047
4048int __netdev_update_features(struct net_device *dev);
4049void netdev_update_features(struct net_device *dev);
4050void netdev_change_features(struct net_device *dev);
4051
4052void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4053 struct net_device *dev);
4054
4055netdev_features_t passthru_features_check(struct sk_buff *skb,
4056 struct net_device *dev,
4057 netdev_features_t features);
4058netdev_features_t netif_skb_features(struct sk_buff *skb);
4059
4060static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4061{
4062 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4063
4064
4065 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4066 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
4067 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4068 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4069 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4070 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4071 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4072 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4073 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4074 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4075 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4076 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4077 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4078 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4079 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4080 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4081
4082 return (features & feature) == feature;
4083}
4084
4085static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4086{
4087 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4088 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4089}
4090
4091static inline bool netif_needs_gso(struct sk_buff *skb,
4092 netdev_features_t features)
4093{
4094 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4095 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4096 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4097}
4098
4099static inline void netif_set_gso_max_size(struct net_device *dev,
4100 unsigned int size)
4101{
4102 dev->gso_max_size = size;
4103}
4104
4105static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4106 int pulled_hlen, u16 mac_offset,
4107 int mac_len)
4108{
4109 skb->protocol = protocol;
4110 skb->encapsulation = 1;
4111 skb_push(skb, pulled_hlen);
4112 skb_reset_transport_header(skb);
4113 skb->mac_header = mac_offset;
4114 skb->network_header = skb->mac_header + mac_len;
4115 skb->mac_len = mac_len;
4116}
4117
4118static inline bool netif_is_macsec(const struct net_device *dev)
4119{
4120 return dev->priv_flags & IFF_MACSEC;
4121}
4122
4123static inline bool netif_is_macvlan(const struct net_device *dev)
4124{
4125 return dev->priv_flags & IFF_MACVLAN;
4126}
4127
4128static inline bool netif_is_macvlan_port(const struct net_device *dev)
4129{
4130 return dev->priv_flags & IFF_MACVLAN_PORT;
4131}
4132
4133static inline bool netif_is_ipvlan(const struct net_device *dev)
4134{
4135 return dev->priv_flags & IFF_IPVLAN_SLAVE;
4136}
4137
4138static inline bool netif_is_ipvlan_port(const struct net_device *dev)
4139{
4140 return dev->priv_flags & IFF_IPVLAN_MASTER;
4141}
4142
4143static inline bool netif_is_bond_master(const struct net_device *dev)
4144{
4145 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4146}
4147
4148static inline bool netif_is_bond_slave(const struct net_device *dev)
4149{
4150 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4151}
4152
4153static inline bool netif_supports_nofcs(struct net_device *dev)
4154{
4155 return dev->priv_flags & IFF_SUPP_NOFCS;
4156}
4157
4158static inline bool netif_is_l3_master(const struct net_device *dev)
4159{
4160 return dev->priv_flags & IFF_L3MDEV_MASTER;
4161}
4162
4163static inline bool netif_is_l3_slave(const struct net_device *dev)
4164{
4165 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4166}
4167
4168static inline bool netif_is_bridge_master(const struct net_device *dev)
4169{
4170 return dev->priv_flags & IFF_EBRIDGE;
4171}
4172
4173static inline bool netif_is_bridge_port(const struct net_device *dev)
4174{
4175 return dev->priv_flags & IFF_BRIDGE_PORT;
4176}
4177
4178static inline bool netif_is_ovs_master(const struct net_device *dev)
4179{
4180 return dev->priv_flags & IFF_OPENVSWITCH;
4181}
4182
4183static inline bool netif_is_team_master(const struct net_device *dev)
4184{
4185 return dev->priv_flags & IFF_TEAM;
4186}
4187
4188static inline bool netif_is_team_port(const struct net_device *dev)
4189{
4190 return dev->priv_flags & IFF_TEAM_PORT;
4191}
4192
4193static inline bool netif_is_lag_master(const struct net_device *dev)
4194{
4195 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4196}
4197
4198static inline bool netif_is_lag_port(const struct net_device *dev)
4199{
4200 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4201}
4202
4203static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4204{
4205 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4206}
4207
4208
4209static inline void netif_keep_dst(struct net_device *dev)
4210{
4211 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4212}
4213
4214
4215static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4216{
4217
4218 return dev->priv_flags & IFF_MACSEC;
4219}
4220
4221extern struct pernet_operations __net_initdata loopback_net_ops;
4222
4223
4224
4225
4226
4227static inline const char *netdev_name(const struct net_device *dev)
4228{
4229 if (!dev->name[0] || strchr(dev->name, '%'))
4230 return "(unnamed net_device)";
4231 return dev->name;
4232}
4233
4234static inline const char *netdev_reg_state(const struct net_device *dev)
4235{
4236 switch (dev->reg_state) {
4237 case NETREG_UNINITIALIZED: return " (uninitialized)";
4238 case NETREG_REGISTERED: return "";
4239 case NETREG_UNREGISTERING: return " (unregistering)";
4240 case NETREG_UNREGISTERED: return " (unregistered)";
4241 case NETREG_RELEASED: return " (released)";
4242 case NETREG_DUMMY: return " (dummy)";
4243 }
4244
4245 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4246 return " (unknown)";
4247}
4248
4249__printf(3, 4)
4250void netdev_printk(const char *level, const struct net_device *dev,
4251 const char *format, ...);
4252__printf(2, 3)
4253void netdev_emerg(const struct net_device *dev, const char *format, ...);
4254__printf(2, 3)
4255void netdev_alert(const struct net_device *dev, const char *format, ...);
4256__printf(2, 3)
4257void netdev_crit(const struct net_device *dev, const char *format, ...);
4258__printf(2, 3)
4259void netdev_err(const struct net_device *dev, const char *format, ...);
4260__printf(2, 3)
4261void netdev_warn(const struct net_device *dev, const char *format, ...);
4262__printf(2, 3)
4263void netdev_notice(const struct net_device *dev, const char *format, ...);
4264__printf(2, 3)
4265void netdev_info(const struct net_device *dev, const char *format, ...);
4266
4267#define MODULE_ALIAS_NETDEV(device) \
4268 MODULE_ALIAS("netdev-" device)
4269
4270#if defined(CONFIG_DYNAMIC_DEBUG)
4271#define netdev_dbg(__dev, format, args...) \
4272do { \
4273 dynamic_netdev_dbg(__dev, format, ##args); \
4274} while (0)
4275#elif defined(DEBUG)
4276#define netdev_dbg(__dev, format, args...) \
4277 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4278#else
4279#define netdev_dbg(__dev, format, args...) \
4280({ \
4281 if (0) \
4282 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4283})
4284#endif
4285
4286#if defined(VERBOSE_DEBUG)
4287#define netdev_vdbg netdev_dbg
4288#else
4289
4290#define netdev_vdbg(dev, format, args...) \
4291({ \
4292 if (0) \
4293 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4294 0; \
4295})
4296#endif
4297
4298
4299
4300
4301
4302
4303#define netdev_WARN(dev, format, args...) \
4304 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
4305 netdev_reg_state(dev), ##args)
4306
4307
4308
4309#define netif_printk(priv, type, level, dev, fmt, args...) \
4310do { \
4311 if (netif_msg_##type(priv)) \
4312 netdev_printk(level, (dev), fmt, ##args); \
4313} while (0)
4314
4315#define netif_level(level, priv, type, dev, fmt, args...) \
4316do { \
4317 if (netif_msg_##type(priv)) \
4318 netdev_##level(dev, fmt, ##args); \
4319} while (0)
4320
4321#define netif_emerg(priv, type, dev, fmt, args...) \
4322 netif_level(emerg, priv, type, dev, fmt, ##args)
4323#define netif_alert(priv, type, dev, fmt, args...) \
4324 netif_level(alert, priv, type, dev, fmt, ##args)
4325#define netif_crit(priv, type, dev, fmt, args...) \
4326 netif_level(crit, priv, type, dev, fmt, ##args)
4327#define netif_err(priv, type, dev, fmt, args...) \
4328 netif_level(err, priv, type, dev, fmt, ##args)
4329#define netif_warn(priv, type, dev, fmt, args...) \
4330 netif_level(warn, priv, type, dev, fmt, ##args)
4331#define netif_notice(priv, type, dev, fmt, args...) \
4332 netif_level(notice, priv, type, dev, fmt, ##args)
4333#define netif_info(priv, type, dev, fmt, args...) \
4334 netif_level(info, priv, type, dev, fmt, ##args)
4335
4336#if defined(CONFIG_DYNAMIC_DEBUG)
4337#define netif_dbg(priv, type, netdev, format, args...) \
4338do { \
4339 if (netif_msg_##type(priv)) \
4340 dynamic_netdev_dbg(netdev, format, ##args); \
4341} while (0)
4342#elif defined(DEBUG)
4343#define netif_dbg(priv, type, dev, format, args...) \
4344 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4345#else
4346#define netif_dbg(priv, type, dev, format, args...) \
4347({ \
4348 if (0) \
4349 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4350 0; \
4351})
4352#endif
4353
4354
4355#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
4356 do { \
4357 if (cond) \
4358 netif_dbg(priv, type, netdev, fmt, ##args); \
4359 else \
4360 netif_ ## level(priv, type, netdev, fmt, ##args); \
4361 } while (0)
4362
4363#if defined(VERBOSE_DEBUG)
4364#define netif_vdbg netif_dbg
4365#else
4366#define netif_vdbg(priv, type, dev, format, args...) \
4367({ \
4368 if (0) \
4369 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4370 0; \
4371})
4372#endif
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401#define PTYPE_HASH_SIZE (16)
4402#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4403
4404#endif
4405