1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/timer.h>
29#include <linux/bug.h>
30#include <linux/delay.h>
31#include <linux/atomic.h>
32#include <linux/prefetch.h>
33#include <asm/cache.h>
34#include <asm/byteorder.h>
35
36#include <linux/percpu.h>
37#include <linux/rculist.h>
38#include <linux/workqueue.h>
39#include <linux/dynamic_queue_limits.h>
40
41#include <linux/ethtool.h>
42#include <net/net_namespace.h>
43#ifdef CONFIG_DCB
44#include <net/dcbnl.h>
45#endif
46#include <net/netprio_cgroup.h>
47
48#include <linux/netdev_features.h>
49#include <linux/neighbour.h>
50#include <uapi/linux/netdevice.h>
51#include <uapi/linux/if_bonding.h>
52#include <uapi/linux/pkt_cls.h>
53#include <linux/hashtable.h>
54
55struct netpoll_info;
56struct device;
57struct phy_device;
58struct dsa_switch_tree;
59
60
61struct wireless_dev;
62
63struct wpan_dev;
64struct mpls_dev;
65
66struct udp_tunnel_info;
67struct bpf_prog;
68struct xdp_buff;
69
70void netdev_set_default_ethtool_ops(struct net_device *dev,
71 const struct ethtool_ops *ops);
72
73
74#define NET_RX_SUCCESS 0
75#define NET_RX_DROP 1
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define NET_XMIT_SUCCESS 0x00
96#define NET_XMIT_DROP 0x01
97#define NET_XMIT_CN 0x02
98#define NET_XMIT_MASK 0x0f
99
100
101
102
103#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
104#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
105
106
107#define NETDEV_TX_MASK 0xf0
108
109enum netdev_tx {
110 __NETDEV_TX_MIN = INT_MIN,
111 NETDEV_TX_OK = 0x00,
112 NETDEV_TX_BUSY = 0x10,
113};
114typedef enum netdev_tx netdev_tx_t;
115
116
117
118
119
120static inline bool dev_xmit_complete(int rc)
121{
122
123
124
125
126
127
128 if (likely(rc < NET_XMIT_MASK))
129 return true;
130
131 return false;
132}
133
134
135
136
137
138
139#if defined(CONFIG_HYPERV_NET)
140# define LL_MAX_HEADER 128
141#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
142# if defined(CONFIG_MAC80211_MESH)
143# define LL_MAX_HEADER 128
144# else
145# define LL_MAX_HEADER 96
146# endif
147#else
148# define LL_MAX_HEADER 32
149#endif
150
151#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
153#define MAX_HEADER LL_MAX_HEADER
154#else
155#define MAX_HEADER (LL_MAX_HEADER + 48)
156#endif
157
158
159
160
161
162
163struct net_device_stats {
164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_errors;
169 unsigned long tx_errors;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172 unsigned long multicast;
173 unsigned long collisions;
174 unsigned long rx_length_errors;
175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors;
180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors;
185 unsigned long rx_compressed;
186 unsigned long tx_compressed;
187};
188
189
190#include <linux/cache.h>
191#include <linux/skbuff.h>
192
193#ifdef CONFIG_RPS
194#include <linux/static_key.h>
195extern struct static_key rps_needed;
196extern struct static_key rfs_needed;
197#endif
198
199struct neighbour;
200struct neigh_parms;
201struct sk_buff;
202
203struct netdev_hw_addr {
204 struct list_head list;
205 unsigned char addr[MAX_ADDR_LEN];
206 unsigned char type;
207#define NETDEV_HW_ADDR_T_LAN 1
208#define NETDEV_HW_ADDR_T_SAN 2
209#define NETDEV_HW_ADDR_T_SLAVE 3
210#define NETDEV_HW_ADDR_T_UNICAST 4
211#define NETDEV_HW_ADDR_T_MULTICAST 5
212 bool global_use;
213 int sync_cnt;
214 int refcount;
215 int synced;
216 struct rcu_head rcu_head;
217};
218
219struct netdev_hw_addr_list {
220 struct list_head list;
221 int count;
222};
223
224#define netdev_hw_addr_list_count(l) ((l)->count)
225#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
226#define netdev_hw_addr_list_for_each(ha, l) \
227 list_for_each_entry(ha, &(l)->list, list)
228
229#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
230#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
231#define netdev_for_each_uc_addr(ha, dev) \
232 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
233
234#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
235#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
236#define netdev_for_each_mc_addr(ha, dev) \
237 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
238
239struct hh_cache {
240 unsigned int hh_len;
241 seqlock_t hh_lock;
242
243
244#define HH_DATA_MOD 16
245#define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247#define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250};
251
252
253
254
255
256
257
258
259
260#define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264
265struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
268 const void *saddr, unsigned int len);
269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
271 void (*cache_update)(struct hh_cache *hh,
272 const struct net_device *dev,
273 const unsigned char *haddr);
274 bool (*validate)(const char *ll_header, unsigned int len);
275};
276
277
278
279
280
281
282enum netdev_state_t {
283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
285 __LINK_STATE_NOCARRIER,
286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
288};
289
290
291
292
293
294
295struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298};
299#define NETDEV_BOOT_SETUP_MAX 8
300
301int __init netdev_boot_setup(char *str);
302
303
304
305
306struct napi_struct {
307
308
309
310
311
312
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
317 unsigned int gro_count;
318 int (*poll)(struct napi_struct *, int);
319#ifdef CONFIG_NETPOLL
320 int poll_owner;
321#endif
322 struct net_device *dev;
323 struct sk_buff *gro_list;
324 struct sk_buff *skb;
325 struct hrtimer timer;
326 struct list_head dev_list;
327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
329};
330
331enum {
332 NAPI_STATE_SCHED,
333 NAPI_STATE_MISSED,
334 NAPI_STATE_DISABLE,
335 NAPI_STATE_NPSVC,
336 NAPI_STATE_HASHED,
337 NAPI_STATE_NO_BUSY_POLL,
338 NAPI_STATE_IN_BUSY_POLL,
339};
340
341enum {
342 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
343 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
344 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
345 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
346 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
347 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
348 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
349};
350
351enum gro_result {
352 GRO_MERGED,
353 GRO_MERGED_FREE,
354 GRO_HELD,
355 GRO_NORMAL,
356 GRO_DROP,
357 GRO_CONSUMED,
358};
359typedef enum gro_result gro_result_t;
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402enum rx_handler_result {
403 RX_HANDLER_CONSUMED,
404 RX_HANDLER_ANOTHER,
405 RX_HANDLER_EXACT,
406 RX_HANDLER_PASS,
407};
408typedef enum rx_handler_result rx_handler_result_t;
409typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
410
411void __napi_schedule(struct napi_struct *n);
412void __napi_schedule_irqoff(struct napi_struct *n);
413
414static inline bool napi_disable_pending(struct napi_struct *n)
415{
416 return test_bit(NAPI_STATE_DISABLE, &n->state);
417}
418
419bool napi_schedule_prep(struct napi_struct *n);
420
421
422
423
424
425
426
427
428static inline void napi_schedule(struct napi_struct *n)
429{
430 if (napi_schedule_prep(n))
431 __napi_schedule(n);
432}
433
434
435
436
437
438
439
440static inline void napi_schedule_irqoff(struct napi_struct *n)
441{
442 if (napi_schedule_prep(n))
443 __napi_schedule_irqoff(n);
444}
445
446
447static inline bool napi_reschedule(struct napi_struct *napi)
448{
449 if (napi_schedule_prep(napi)) {
450 __napi_schedule(napi);
451 return true;
452 }
453 return false;
454}
455
456bool napi_complete_done(struct napi_struct *n, int work_done);
457
458
459
460
461
462
463
464
465static inline bool napi_complete(struct napi_struct *n)
466{
467 return napi_complete_done(n, 0);
468}
469
470
471
472
473
474
475
476
477
478
479
480
481
482bool napi_hash_del(struct napi_struct *napi);
483
484
485
486
487
488
489
490
491void napi_disable(struct napi_struct *n);
492
493
494
495
496
497
498
499
500static inline void napi_enable(struct napi_struct *n)
501{
502 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
503 smp_mb__before_atomic();
504 clear_bit(NAPI_STATE_SCHED, &n->state);
505 clear_bit(NAPI_STATE_NPSVC, &n->state);
506}
507
508
509
510
511
512
513
514
515
516static inline void napi_synchronize(const struct napi_struct *n)
517{
518 if (IS_ENABLED(CONFIG_SMP))
519 while (test_bit(NAPI_STATE_SCHED, &n->state))
520 msleep(1);
521 else
522 barrier();
523}
524
525enum netdev_queue_state_t {
526 __QUEUE_STATE_DRV_XOFF,
527 __QUEUE_STATE_STACK_XOFF,
528 __QUEUE_STATE_FROZEN,
529};
530
531#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
532#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
533#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
534
535#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
536#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
537 QUEUE_STATE_FROZEN)
538#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
539 QUEUE_STATE_FROZEN)
540
541
542
543
544
545
546
547
548
549
550
551struct netdev_queue {
552
553
554
555 struct net_device *dev;
556 struct Qdisc __rcu *qdisc;
557 struct Qdisc *qdisc_sleeping;
558#ifdef CONFIG_SYSFS
559 struct kobject kobj;
560#endif
561#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
562 int numa_node;
563#endif
564 unsigned long tx_maxrate;
565
566
567
568
569 unsigned long trans_timeout;
570
571
572
573 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
574 int xmit_lock_owner;
575
576
577
578 unsigned long trans_start;
579
580 unsigned long state;
581
582#ifdef CONFIG_BQL
583 struct dql dql;
584#endif
585} ____cacheline_aligned_in_smp;
586
587static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
588{
589#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
590 return q->numa_node;
591#else
592 return NUMA_NO_NODE;
593#endif
594}
595
596static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
597{
598#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
599 q->numa_node = node;
600#endif
601}
602
603#ifdef CONFIG_RPS
604
605
606
607
608struct rps_map {
609 unsigned int len;
610 struct rcu_head rcu;
611 u16 cpus[0];
612};
613#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
614
615
616
617
618
619
620struct rps_dev_flow {
621 u16 cpu;
622 u16 filter;
623 unsigned int last_qtail;
624};
625#define RPS_NO_FILTER 0xffff
626
627
628
629
630struct rps_dev_flow_table {
631 unsigned int mask;
632 struct rcu_head rcu;
633 struct rps_dev_flow flows[0];
634};
635#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
636 ((_num) * sizeof(struct rps_dev_flow)))
637
638
639
640
641
642
643
644
645
646
647
648struct rps_sock_flow_table {
649 u32 mask;
650
651 u32 ents[0] ____cacheline_aligned_in_smp;
652};
653#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
654
655#define RPS_NO_CPU 0xffff
656
657extern u32 rps_cpu_mask;
658extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
659
660static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
661 u32 hash)
662{
663 if (table && hash) {
664 unsigned int index = hash & table->mask;
665 u32 val = hash & ~rps_cpu_mask;
666
667
668 val |= raw_smp_processor_id();
669
670 if (table->ents[index] != val)
671 table->ents[index] = val;
672 }
673}
674
675#ifdef CONFIG_RFS_ACCEL
676bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
677 u16 filter_id);
678#endif
679#endif
680
681
682struct netdev_rx_queue {
683#ifdef CONFIG_RPS
684 struct rps_map __rcu *rps_map;
685 struct rps_dev_flow_table __rcu *rps_flow_table;
686#endif
687 struct kobject kobj;
688 struct net_device *dev;
689} ____cacheline_aligned_in_smp;
690
691
692
693
694struct rx_queue_attribute {
695 struct attribute attr;
696 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
697 ssize_t (*store)(struct netdev_rx_queue *queue,
698 const char *buf, size_t len);
699};
700
701#ifdef CONFIG_XPS
702
703
704
705
706struct xps_map {
707 unsigned int len;
708 unsigned int alloc_len;
709 struct rcu_head rcu;
710 u16 queues[0];
711};
712#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
713#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
714 - sizeof(struct xps_map)) / sizeof(u16))
715
716
717
718
719struct xps_dev_maps {
720 struct rcu_head rcu;
721 struct xps_map __rcu *cpu_map[0];
722};
723#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
724 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
725#endif
726
727#define TC_MAX_QUEUE 16
728#define TC_BITMASK 15
729
730struct netdev_tc_txq {
731 u16 count;
732 u16 offset;
733};
734
735#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
736
737
738
739
740struct netdev_fcoe_hbainfo {
741 char manufacturer[64];
742 char serial_number[64];
743 char hardware_version[64];
744 char driver_version[64];
745 char optionrom_version[64];
746 char firmware_version[64];
747 char model[256];
748 char model_description[256];
749};
750#endif
751
752#define MAX_PHYS_ITEM_ID_LEN 32
753
754
755
756
757struct netdev_phys_item_id {
758 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
759 unsigned char id_len;
760};
761
762static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
763 struct netdev_phys_item_id *b)
764{
765 return a->id_len == b->id_len &&
766 memcmp(a->id, b->id, a->id_len) == 0;
767}
768
769typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
770 struct sk_buff *skb);
771
772enum tc_setup_type {
773 TC_SETUP_MQPRIO,
774 TC_SETUP_CLSU32,
775 TC_SETUP_CLSFLOWER,
776 TC_SETUP_CLSMATCHALL,
777 TC_SETUP_CLSBPF,
778};
779
780
781
782
783enum xdp_netdev_command {
784
785
786
787
788
789
790
791 XDP_SETUP_PROG,
792 XDP_SETUP_PROG_HW,
793
794
795
796
797 XDP_QUERY_PROG,
798};
799
800struct netlink_ext_ack;
801
802struct netdev_xdp {
803 enum xdp_netdev_command command;
804 union {
805
806 struct {
807 u32 flags;
808 struct bpf_prog *prog;
809 struct netlink_ext_ack *extack;
810 };
811
812 struct {
813 u8 prog_attached;
814 u32 prog_id;
815 };
816 };
817};
818
819#ifdef CONFIG_XFRM_OFFLOAD
820struct xfrmdev_ops {
821 int (*xdo_dev_state_add) (struct xfrm_state *x);
822 void (*xdo_dev_state_delete) (struct xfrm_state *x);
823 void (*xdo_dev_state_free) (struct xfrm_state *x);
824 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
825 struct xfrm_state *x);
826};
827#endif
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130struct net_device_ops {
1131 int (*ndo_init)(struct net_device *dev);
1132 void (*ndo_uninit)(struct net_device *dev);
1133 int (*ndo_open)(struct net_device *dev);
1134 int (*ndo_stop)(struct net_device *dev);
1135 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1136 struct net_device *dev);
1137 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1138 struct net_device *dev,
1139 netdev_features_t features);
1140 u16 (*ndo_select_queue)(struct net_device *dev,
1141 struct sk_buff *skb,
1142 void *accel_priv,
1143 select_queue_fallback_t fallback);
1144 void (*ndo_change_rx_flags)(struct net_device *dev,
1145 int flags);
1146 void (*ndo_set_rx_mode)(struct net_device *dev);
1147 int (*ndo_set_mac_address)(struct net_device *dev,
1148 void *addr);
1149 int (*ndo_validate_addr)(struct net_device *dev);
1150 int (*ndo_do_ioctl)(struct net_device *dev,
1151 struct ifreq *ifr, int cmd);
1152 int (*ndo_set_config)(struct net_device *dev,
1153 struct ifmap *map);
1154 int (*ndo_change_mtu)(struct net_device *dev,
1155 int new_mtu);
1156 int (*ndo_neigh_setup)(struct net_device *dev,
1157 struct neigh_parms *);
1158 void (*ndo_tx_timeout) (struct net_device *dev);
1159
1160 void (*ndo_get_stats64)(struct net_device *dev,
1161 struct rtnl_link_stats64 *storage);
1162 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1163 int (*ndo_get_offload_stats)(int attr_id,
1164 const struct net_device *dev,
1165 void *attr_data);
1166 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1167
1168 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1169 __be16 proto, u16 vid);
1170 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1171 __be16 proto, u16 vid);
1172#ifdef CONFIG_NET_POLL_CONTROLLER
1173 void (*ndo_poll_controller)(struct net_device *dev);
1174 int (*ndo_netpoll_setup)(struct net_device *dev,
1175 struct netpoll_info *info);
1176 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1177#endif
1178 int (*ndo_set_vf_mac)(struct net_device *dev,
1179 int queue, u8 *mac);
1180 int (*ndo_set_vf_vlan)(struct net_device *dev,
1181 int queue, u16 vlan,
1182 u8 qos, __be16 proto);
1183 int (*ndo_set_vf_rate)(struct net_device *dev,
1184 int vf, int min_tx_rate,
1185 int max_tx_rate);
1186 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1187 int vf, bool setting);
1188 int (*ndo_set_vf_trust)(struct net_device *dev,
1189 int vf, bool setting);
1190 int (*ndo_get_vf_config)(struct net_device *dev,
1191 int vf,
1192 struct ifla_vf_info *ivf);
1193 int (*ndo_set_vf_link_state)(struct net_device *dev,
1194 int vf, int link_state);
1195 int (*ndo_get_vf_stats)(struct net_device *dev,
1196 int vf,
1197 struct ifla_vf_stats
1198 *vf_stats);
1199 int (*ndo_set_vf_port)(struct net_device *dev,
1200 int vf,
1201 struct nlattr *port[]);
1202 int (*ndo_get_vf_port)(struct net_device *dev,
1203 int vf, struct sk_buff *skb);
1204 int (*ndo_set_vf_guid)(struct net_device *dev,
1205 int vf, u64 guid,
1206 int guid_type);
1207 int (*ndo_set_vf_rss_query_en)(
1208 struct net_device *dev,
1209 int vf, bool setting);
1210 int (*ndo_setup_tc)(struct net_device *dev,
1211 enum tc_setup_type type,
1212 void *type_data);
1213#if IS_ENABLED(CONFIG_FCOE)
1214 int (*ndo_fcoe_enable)(struct net_device *dev);
1215 int (*ndo_fcoe_disable)(struct net_device *dev);
1216 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1217 u16 xid,
1218 struct scatterlist *sgl,
1219 unsigned int sgc);
1220 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1221 u16 xid);
1222 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1223 u16 xid,
1224 struct scatterlist *sgl,
1225 unsigned int sgc);
1226 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1227 struct netdev_fcoe_hbainfo *hbainfo);
1228#endif
1229
1230#if IS_ENABLED(CONFIG_LIBFCOE)
1231#define NETDEV_FCOE_WWNN 0
1232#define NETDEV_FCOE_WWPN 1
1233 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1234 u64 *wwn, int type);
1235#endif
1236
1237#ifdef CONFIG_RFS_ACCEL
1238 int (*ndo_rx_flow_steer)(struct net_device *dev,
1239 const struct sk_buff *skb,
1240 u16 rxq_index,
1241 u32 flow_id);
1242#endif
1243 int (*ndo_add_slave)(struct net_device *dev,
1244 struct net_device *slave_dev);
1245 int (*ndo_del_slave)(struct net_device *dev,
1246 struct net_device *slave_dev);
1247 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1248 netdev_features_t features);
1249 int (*ndo_set_features)(struct net_device *dev,
1250 netdev_features_t features);
1251 int (*ndo_neigh_construct)(struct net_device *dev,
1252 struct neighbour *n);
1253 void (*ndo_neigh_destroy)(struct net_device *dev,
1254 struct neighbour *n);
1255
1256 int (*ndo_fdb_add)(struct ndmsg *ndm,
1257 struct nlattr *tb[],
1258 struct net_device *dev,
1259 const unsigned char *addr,
1260 u16 vid,
1261 u16 flags);
1262 int (*ndo_fdb_del)(struct ndmsg *ndm,
1263 struct nlattr *tb[],
1264 struct net_device *dev,
1265 const unsigned char *addr,
1266 u16 vid);
1267 int (*ndo_fdb_dump)(struct sk_buff *skb,
1268 struct netlink_callback *cb,
1269 struct net_device *dev,
1270 struct net_device *filter_dev,
1271 int *idx);
1272
1273 int (*ndo_bridge_setlink)(struct net_device *dev,
1274 struct nlmsghdr *nlh,
1275 u16 flags);
1276 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1277 u32 pid, u32 seq,
1278 struct net_device *dev,
1279 u32 filter_mask,
1280 int nlflags);
1281 int (*ndo_bridge_dellink)(struct net_device *dev,
1282 struct nlmsghdr *nlh,
1283 u16 flags);
1284 int (*ndo_change_carrier)(struct net_device *dev,
1285 bool new_carrier);
1286 int (*ndo_get_phys_port_id)(struct net_device *dev,
1287 struct netdev_phys_item_id *ppid);
1288 int (*ndo_get_phys_port_name)(struct net_device *dev,
1289 char *name, size_t len);
1290 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1291 struct udp_tunnel_info *ti);
1292 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1293 struct udp_tunnel_info *ti);
1294 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1295 struct net_device *dev);
1296 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1297 void *priv);
1298
1299 int (*ndo_get_lock_subclass)(struct net_device *dev);
1300 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1301 int queue_index,
1302 u32 maxrate);
1303 int (*ndo_get_iflink)(const struct net_device *dev);
1304 int (*ndo_change_proto_down)(struct net_device *dev,
1305 bool proto_down);
1306 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1307 struct sk_buff *skb);
1308 void (*ndo_set_rx_headroom)(struct net_device *dev,
1309 int needed_headroom);
1310 int (*ndo_xdp)(struct net_device *dev,
1311 struct netdev_xdp *xdp);
1312 int (*ndo_xdp_xmit)(struct net_device *dev,
1313 struct xdp_buff *xdp);
1314 void (*ndo_xdp_flush)(struct net_device *dev);
1315};
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360enum netdev_priv_flags {
1361 IFF_802_1Q_VLAN = 1<<0,
1362 IFF_EBRIDGE = 1<<1,
1363 IFF_BONDING = 1<<2,
1364 IFF_ISATAP = 1<<3,
1365 IFF_WAN_HDLC = 1<<4,
1366 IFF_XMIT_DST_RELEASE = 1<<5,
1367 IFF_DONT_BRIDGE = 1<<6,
1368 IFF_DISABLE_NETPOLL = 1<<7,
1369 IFF_MACVLAN_PORT = 1<<8,
1370 IFF_BRIDGE_PORT = 1<<9,
1371 IFF_OVS_DATAPATH = 1<<10,
1372 IFF_TX_SKB_SHARING = 1<<11,
1373 IFF_UNICAST_FLT = 1<<12,
1374 IFF_TEAM_PORT = 1<<13,
1375 IFF_SUPP_NOFCS = 1<<14,
1376 IFF_LIVE_ADDR_CHANGE = 1<<15,
1377 IFF_MACVLAN = 1<<16,
1378 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1379 IFF_IPVLAN_MASTER = 1<<18,
1380 IFF_IPVLAN_SLAVE = 1<<19,
1381 IFF_L3MDEV_MASTER = 1<<20,
1382 IFF_NO_QUEUE = 1<<21,
1383 IFF_OPENVSWITCH = 1<<22,
1384 IFF_L3MDEV_SLAVE = 1<<23,
1385 IFF_TEAM = 1<<24,
1386 IFF_RXFH_CONFIGURED = 1<<25,
1387 IFF_PHONY_HEADROOM = 1<<26,
1388 IFF_MACSEC = 1<<27,
1389};
1390
1391#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1392#define IFF_EBRIDGE IFF_EBRIDGE
1393#define IFF_BONDING IFF_BONDING
1394#define IFF_ISATAP IFF_ISATAP
1395#define IFF_WAN_HDLC IFF_WAN_HDLC
1396#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1397#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1398#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1399#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1400#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1401#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1402#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1403#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1404#define IFF_TEAM_PORT IFF_TEAM_PORT
1405#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1406#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1407#define IFF_MACVLAN IFF_MACVLAN
1408#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1409#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1410#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1411#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1412#define IFF_NO_QUEUE IFF_NO_QUEUE
1413#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1414#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1415#define IFF_TEAM IFF_TEAM
1416#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1417#define IFF_MACSEC IFF_MACSEC
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632struct net_device {
1633 char name[IFNAMSIZ];
1634 struct hlist_node name_hlist;
1635 char *ifalias;
1636
1637
1638
1639
1640 unsigned long mem_end;
1641 unsigned long mem_start;
1642 unsigned long base_addr;
1643 int irq;
1644
1645 atomic_t carrier_changes;
1646
1647
1648
1649
1650
1651
1652
1653 unsigned long state;
1654
1655 struct list_head dev_list;
1656 struct list_head napi_list;
1657 struct list_head unreg_list;
1658 struct list_head close_list;
1659 struct list_head ptype_all;
1660 struct list_head ptype_specific;
1661
1662 struct {
1663 struct list_head upper;
1664 struct list_head lower;
1665 } adj_list;
1666
1667 netdev_features_t features;
1668 netdev_features_t hw_features;
1669 netdev_features_t wanted_features;
1670 netdev_features_t vlan_features;
1671 netdev_features_t hw_enc_features;
1672 netdev_features_t mpls_features;
1673 netdev_features_t gso_partial_features;
1674
1675 int ifindex;
1676 int group;
1677
1678 struct net_device_stats stats;
1679
1680 atomic_long_t rx_dropped;
1681 atomic_long_t tx_dropped;
1682 atomic_long_t rx_nohandler;
1683
1684#ifdef CONFIG_WIRELESS_EXT
1685 const struct iw_handler_def *wireless_handlers;
1686 struct iw_public_data *wireless_data;
1687#endif
1688 const struct net_device_ops *netdev_ops;
1689 const struct ethtool_ops *ethtool_ops;
1690#ifdef CONFIG_NET_SWITCHDEV
1691 const struct switchdev_ops *switchdev_ops;
1692#endif
1693#ifdef CONFIG_NET_L3_MASTER_DEV
1694 const struct l3mdev_ops *l3mdev_ops;
1695#endif
1696#if IS_ENABLED(CONFIG_IPV6)
1697 const struct ndisc_ops *ndisc_ops;
1698#endif
1699
1700#ifdef CONFIG_XFRM
1701 const struct xfrmdev_ops *xfrmdev_ops;
1702#endif
1703
1704 const struct header_ops *header_ops;
1705
1706 unsigned int flags;
1707 unsigned int priv_flags;
1708
1709 unsigned short gflags;
1710 unsigned short padded;
1711
1712 unsigned char operstate;
1713 unsigned char link_mode;
1714
1715 unsigned char if_port;
1716 unsigned char dma;
1717
1718 unsigned int mtu;
1719 unsigned int min_mtu;
1720 unsigned int max_mtu;
1721 unsigned short type;
1722 unsigned short hard_header_len;
1723 unsigned char min_header_len;
1724
1725 unsigned short needed_headroom;
1726 unsigned short needed_tailroom;
1727
1728
1729 unsigned char perm_addr[MAX_ADDR_LEN];
1730 unsigned char addr_assign_type;
1731 unsigned char addr_len;
1732 unsigned short neigh_priv_len;
1733 unsigned short dev_id;
1734 unsigned short dev_port;
1735 spinlock_t addr_list_lock;
1736 unsigned char name_assign_type;
1737 bool uc_promisc;
1738 struct netdev_hw_addr_list uc;
1739 struct netdev_hw_addr_list mc;
1740 struct netdev_hw_addr_list dev_addrs;
1741
1742#ifdef CONFIG_SYSFS
1743 struct kset *queues_kset;
1744#endif
1745 unsigned int promiscuity;
1746 unsigned int allmulti;
1747
1748
1749
1750
1751#if IS_ENABLED(CONFIG_VLAN_8021Q)
1752 struct vlan_info __rcu *vlan_info;
1753#endif
1754#if IS_ENABLED(CONFIG_NET_DSA)
1755 struct dsa_switch_tree *dsa_ptr;
1756#endif
1757#if IS_ENABLED(CONFIG_TIPC)
1758 struct tipc_bearer __rcu *tipc_ptr;
1759#endif
1760 void *atalk_ptr;
1761 struct in_device __rcu *ip_ptr;
1762 struct dn_dev __rcu *dn_ptr;
1763 struct inet6_dev __rcu *ip6_ptr;
1764 void *ax25_ptr;
1765 struct wireless_dev *ieee80211_ptr;
1766 struct wpan_dev *ieee802154_ptr;
1767#if IS_ENABLED(CONFIG_MPLS_ROUTING)
1768 struct mpls_dev __rcu *mpls_ptr;
1769#endif
1770
1771
1772
1773
1774
1775 unsigned char *dev_addr;
1776
1777#ifdef CONFIG_SYSFS
1778 struct netdev_rx_queue *_rx;
1779
1780 unsigned int num_rx_queues;
1781 unsigned int real_num_rx_queues;
1782#endif
1783
1784 struct bpf_prog __rcu *xdp_prog;
1785 unsigned long gro_flush_timeout;
1786 rx_handler_func_t __rcu *rx_handler;
1787 void __rcu *rx_handler_data;
1788
1789#ifdef CONFIG_NET_CLS_ACT
1790 struct tcf_proto __rcu *ingress_cl_list;
1791#endif
1792 struct netdev_queue __rcu *ingress_queue;
1793#ifdef CONFIG_NETFILTER_INGRESS
1794 struct nf_hook_entries __rcu *nf_hooks_ingress;
1795#endif
1796
1797 unsigned char broadcast[MAX_ADDR_LEN];
1798#ifdef CONFIG_RFS_ACCEL
1799 struct cpu_rmap *rx_cpu_rmap;
1800#endif
1801 struct hlist_node index_hlist;
1802
1803
1804
1805
1806 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1807 unsigned int num_tx_queues;
1808 unsigned int real_num_tx_queues;
1809 struct Qdisc *qdisc;
1810#ifdef CONFIG_NET_SCHED
1811 DECLARE_HASHTABLE (qdisc_hash, 4);
1812#endif
1813 unsigned int tx_queue_len;
1814 spinlock_t tx_global_lock;
1815 int watchdog_timeo;
1816
1817#ifdef CONFIG_XPS
1818 struct xps_dev_maps __rcu *xps_maps;
1819#endif
1820#ifdef CONFIG_NET_CLS_ACT
1821 struct tcf_proto __rcu *egress_cl_list;
1822#endif
1823
1824
1825 struct timer_list watchdog_timer;
1826
1827 int __percpu *pcpu_refcnt;
1828 struct list_head todo_list;
1829
1830 struct list_head link_watch_list;
1831
1832 enum { NETREG_UNINITIALIZED=0,
1833 NETREG_REGISTERED,
1834 NETREG_UNREGISTERING,
1835 NETREG_UNREGISTERED,
1836 NETREG_RELEASED,
1837 NETREG_DUMMY,
1838 } reg_state:8;
1839
1840 bool dismantle;
1841
1842 enum {
1843 RTNL_LINK_INITIALIZED,
1844 RTNL_LINK_INITIALIZING,
1845 } rtnl_link_state:16;
1846
1847 bool needs_free_netdev;
1848 void (*priv_destructor)(struct net_device *dev);
1849
1850#ifdef CONFIG_NETPOLL
1851 struct netpoll_info __rcu *npinfo;
1852#endif
1853
1854 possible_net_t nd_net;
1855
1856
1857 union {
1858 void *ml_priv;
1859 struct pcpu_lstats __percpu *lstats;
1860 struct pcpu_sw_netstats __percpu *tstats;
1861 struct pcpu_dstats __percpu *dstats;
1862 struct pcpu_vstats __percpu *vstats;
1863 };
1864
1865#if IS_ENABLED(CONFIG_GARP)
1866 struct garp_port __rcu *garp_port;
1867#endif
1868#if IS_ENABLED(CONFIG_MRP)
1869 struct mrp_port __rcu *mrp_port;
1870#endif
1871
1872 struct device dev;
1873 const struct attribute_group *sysfs_groups[4];
1874 const struct attribute_group *sysfs_rx_queue_group;
1875
1876 const struct rtnl_link_ops *rtnl_link_ops;
1877
1878
1879#define GSO_MAX_SIZE 65536
1880 unsigned int gso_max_size;
1881#define GSO_MAX_SEGS 65535
1882 u16 gso_max_segs;
1883
1884#ifdef CONFIG_DCB
1885 const struct dcbnl_rtnl_ops *dcbnl_ops;
1886#endif
1887 u8 num_tc;
1888 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1889 u8 prio_tc_map[TC_BITMASK + 1];
1890
1891#if IS_ENABLED(CONFIG_FCOE)
1892 unsigned int fcoe_ddp_xid;
1893#endif
1894#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1895 struct netprio_map __rcu *priomap;
1896#endif
1897 struct phy_device *phydev;
1898 struct lock_class_key *qdisc_tx_busylock;
1899 struct lock_class_key *qdisc_running_key;
1900 bool proto_down;
1901};
1902#define to_net_dev(d) container_of(d, struct net_device, dev)
1903
1904static inline bool netif_elide_gro(const struct net_device *dev)
1905{
1906 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
1907 return true;
1908 return false;
1909}
1910
1911#define NETDEV_ALIGN 32
1912
1913static inline
1914int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1915{
1916 return dev->prio_tc_map[prio & TC_BITMASK];
1917}
1918
1919static inline
1920int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1921{
1922 if (tc >= dev->num_tc)
1923 return -EINVAL;
1924
1925 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1926 return 0;
1927}
1928
1929int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
1930void netdev_reset_tc(struct net_device *dev);
1931int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
1932int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
1933
1934static inline
1935int netdev_get_num_tc(struct net_device *dev)
1936{
1937 return dev->num_tc;
1938}
1939
1940static inline
1941struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1942 unsigned int index)
1943{
1944 return &dev->_tx[index];
1945}
1946
1947static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1948 const struct sk_buff *skb)
1949{
1950 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1951}
1952
1953static inline void netdev_for_each_tx_queue(struct net_device *dev,
1954 void (*f)(struct net_device *,
1955 struct netdev_queue *,
1956 void *),
1957 void *arg)
1958{
1959 unsigned int i;
1960
1961 for (i = 0; i < dev->num_tx_queues; i++)
1962 f(dev, &dev->_tx[i], arg);
1963}
1964
1965#define netdev_lockdep_set_classes(dev) \
1966{ \
1967 static struct lock_class_key qdisc_tx_busylock_key; \
1968 static struct lock_class_key qdisc_running_key; \
1969 static struct lock_class_key qdisc_xmit_lock_key; \
1970 static struct lock_class_key dev_addr_list_lock_key; \
1971 unsigned int i; \
1972 \
1973 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
1974 (dev)->qdisc_running_key = &qdisc_running_key; \
1975 lockdep_set_class(&(dev)->addr_list_lock, \
1976 &dev_addr_list_lock_key); \
1977 for (i = 0; i < (dev)->num_tx_queues; i++) \
1978 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
1979 &qdisc_xmit_lock_key); \
1980}
1981
1982struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1983 struct sk_buff *skb,
1984 void *accel_priv);
1985
1986
1987
1988
1989static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
1990{
1991 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
1992}
1993
1994static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
1995{
1996 if (dev->netdev_ops->ndo_set_rx_headroom)
1997 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
1998}
1999
2000
2001static inline void netdev_reset_rx_headroom(struct net_device *dev)
2002{
2003 netdev_set_rx_headroom(dev, -1);
2004}
2005
2006
2007
2008
2009static inline
2010struct net *dev_net(const struct net_device *dev)
2011{
2012 return read_pnet(&dev->nd_net);
2013}
2014
2015static inline
2016void dev_net_set(struct net_device *dev, struct net *net)
2017{
2018 write_pnet(&dev->nd_net, net);
2019}
2020
2021
2022
2023
2024
2025
2026
2027static inline void *netdev_priv(const struct net_device *dev)
2028{
2029 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2030}
2031
2032
2033
2034
2035#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2036
2037
2038
2039
2040
2041#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2042
2043
2044
2045
2046#define NAPI_POLL_WEIGHT 64
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2059 int (*poll)(struct napi_struct *, int), int weight);
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072static inline void netif_tx_napi_add(struct net_device *dev,
2073 struct napi_struct *napi,
2074 int (*poll)(struct napi_struct *, int),
2075 int weight)
2076{
2077 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2078 netif_napi_add(dev, napi, poll, weight);
2079}
2080
2081
2082
2083
2084
2085
2086
2087void netif_napi_del(struct napi_struct *napi);
2088
2089struct napi_gro_cb {
2090
2091 void *frag0;
2092
2093
2094 unsigned int frag0_len;
2095
2096
2097 int data_offset;
2098
2099
2100 u16 flush;
2101
2102
2103 u16 flush_id;
2104
2105
2106 u16 count;
2107
2108
2109 u16 gro_remcsum_start;
2110
2111
2112 unsigned long age;
2113
2114
2115 u16 proto;
2116
2117
2118 u8 same_flow:1;
2119
2120
2121 u8 encap_mark:1;
2122
2123
2124 u8 csum_valid:1;
2125
2126
2127 u8 csum_cnt:3;
2128
2129
2130 u8 free:2;
2131#define NAPI_GRO_FREE 1
2132#define NAPI_GRO_FREE_STOLEN_HEAD 2
2133
2134
2135 u8 is_ipv6:1;
2136
2137
2138 u8 is_fou:1;
2139
2140
2141 u8 is_atomic:1;
2142
2143
2144 u8 recursion_counter:4;
2145
2146
2147
2148
2149 __wsum csum;
2150
2151
2152 struct sk_buff *last;
2153};
2154
2155#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2156
2157#define GRO_RECURSION_LIMIT 15
2158static inline int gro_recursion_inc_test(struct sk_buff *skb)
2159{
2160 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2161}
2162
2163typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
2164static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2165 struct sk_buff **head,
2166 struct sk_buff *skb)
2167{
2168 if (unlikely(gro_recursion_inc_test(skb))) {
2169 NAPI_GRO_CB(skb)->flush |= 1;
2170 return NULL;
2171 }
2172
2173 return cb(head, skb);
2174}
2175
2176typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
2177 struct sk_buff *);
2178static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
2179 struct sock *sk,
2180 struct sk_buff **head,
2181 struct sk_buff *skb)
2182{
2183 if (unlikely(gro_recursion_inc_test(skb))) {
2184 NAPI_GRO_CB(skb)->flush |= 1;
2185 return NULL;
2186 }
2187
2188 return cb(sk, head, skb);
2189}
2190
2191struct packet_type {
2192 __be16 type;
2193 struct net_device *dev;
2194 int (*func) (struct sk_buff *,
2195 struct net_device *,
2196 struct packet_type *,
2197 struct net_device *);
2198 bool (*id_match)(struct packet_type *ptype,
2199 struct sock *sk);
2200 void *af_packet_priv;
2201 struct list_head list;
2202};
2203
2204struct offload_callbacks {
2205 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2206 netdev_features_t features);
2207 struct sk_buff **(*gro_receive)(struct sk_buff **head,
2208 struct sk_buff *skb);
2209 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2210};
2211
2212struct packet_offload {
2213 __be16 type;
2214 u16 priority;
2215 struct offload_callbacks callbacks;
2216 struct list_head list;
2217};
2218
2219
2220struct pcpu_sw_netstats {
2221 u64 rx_packets;
2222 u64 rx_bytes;
2223 u64 tx_packets;
2224 u64 tx_bytes;
2225 struct u64_stats_sync syncp;
2226};
2227
2228#define __netdev_alloc_pcpu_stats(type, gfp) \
2229({ \
2230 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2231 if (pcpu_stats) { \
2232 int __cpu; \
2233 for_each_possible_cpu(__cpu) { \
2234 typeof(type) *stat; \
2235 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2236 u64_stats_init(&stat->syncp); \
2237 } \
2238 } \
2239 pcpu_stats; \
2240})
2241
2242#define netdev_alloc_pcpu_stats(type) \
2243 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2244
2245enum netdev_lag_tx_type {
2246 NETDEV_LAG_TX_TYPE_UNKNOWN,
2247 NETDEV_LAG_TX_TYPE_RANDOM,
2248 NETDEV_LAG_TX_TYPE_BROADCAST,
2249 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2250 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2251 NETDEV_LAG_TX_TYPE_HASH,
2252};
2253
2254struct netdev_lag_upper_info {
2255 enum netdev_lag_tx_type tx_type;
2256};
2257
2258struct netdev_lag_lower_state_info {
2259 u8 link_up : 1,
2260 tx_enabled : 1;
2261};
2262
2263#include <linux/notifier.h>
2264
2265
2266
2267
2268
2269#define NETDEV_UP 0x0001
2270#define NETDEV_DOWN 0x0002
2271#define NETDEV_REBOOT 0x0003
2272
2273
2274
2275#define NETDEV_CHANGE 0x0004
2276#define NETDEV_REGISTER 0x0005
2277#define NETDEV_UNREGISTER 0x0006
2278#define NETDEV_CHANGEMTU 0x0007
2279#define NETDEV_CHANGEADDR 0x0008
2280#define NETDEV_GOING_DOWN 0x0009
2281#define NETDEV_CHANGENAME 0x000A
2282#define NETDEV_FEAT_CHANGE 0x000B
2283#define NETDEV_BONDING_FAILOVER 0x000C
2284#define NETDEV_PRE_UP 0x000D
2285#define NETDEV_PRE_TYPE_CHANGE 0x000E
2286#define NETDEV_POST_TYPE_CHANGE 0x000F
2287#define NETDEV_POST_INIT 0x0010
2288#define NETDEV_UNREGISTER_FINAL 0x0011
2289#define NETDEV_RELEASE 0x0012
2290#define NETDEV_NOTIFY_PEERS 0x0013
2291#define NETDEV_JOIN 0x0014
2292#define NETDEV_CHANGEUPPER 0x0015
2293#define NETDEV_RESEND_IGMP 0x0016
2294#define NETDEV_PRECHANGEMTU 0x0017
2295#define NETDEV_CHANGEINFODATA 0x0018
2296#define NETDEV_BONDING_INFO 0x0019
2297#define NETDEV_PRECHANGEUPPER 0x001A
2298#define NETDEV_CHANGELOWERSTATE 0x001B
2299#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
2300#define NETDEV_UDP_TUNNEL_DROP_INFO 0x001D
2301#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
2302
2303int register_netdevice_notifier(struct notifier_block *nb);
2304int unregister_netdevice_notifier(struct notifier_block *nb);
2305
2306struct netdev_notifier_info {
2307 struct net_device *dev;
2308};
2309
2310struct netdev_notifier_change_info {
2311 struct netdev_notifier_info info;
2312 unsigned int flags_changed;
2313};
2314
2315struct netdev_notifier_changeupper_info {
2316 struct netdev_notifier_info info;
2317 struct net_device *upper_dev;
2318 bool master;
2319 bool linking;
2320 void *upper_info;
2321};
2322
2323struct netdev_notifier_changelowerstate_info {
2324 struct netdev_notifier_info info;
2325 void *lower_state_info;
2326};
2327
2328static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2329 struct net_device *dev)
2330{
2331 info->dev = dev;
2332}
2333
2334static inline struct net_device *
2335netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2336{
2337 return info->dev;
2338}
2339
2340int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2341
2342
2343extern rwlock_t dev_base_lock;
2344
2345#define for_each_netdev(net, d) \
2346 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2347#define for_each_netdev_reverse(net, d) \
2348 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2349#define for_each_netdev_rcu(net, d) \
2350 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2351#define for_each_netdev_safe(net, d, n) \
2352 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2353#define for_each_netdev_continue(net, d) \
2354 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2355#define for_each_netdev_continue_rcu(net, d) \
2356 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2357#define for_each_netdev_in_bond_rcu(bond, slave) \
2358 for_each_netdev_rcu(&init_net, slave) \
2359 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2360#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2361
2362static inline struct net_device *next_net_device(struct net_device *dev)
2363{
2364 struct list_head *lh;
2365 struct net *net;
2366
2367 net = dev_net(dev);
2368 lh = dev->dev_list.next;
2369 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2370}
2371
2372static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2373{
2374 struct list_head *lh;
2375 struct net *net;
2376
2377 net = dev_net(dev);
2378 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2379 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2380}
2381
2382static inline struct net_device *first_net_device(struct net *net)
2383{
2384 return list_empty(&net->dev_base_head) ? NULL :
2385 net_device_entry(net->dev_base_head.next);
2386}
2387
2388static inline struct net_device *first_net_device_rcu(struct net *net)
2389{
2390 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2391
2392 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2393}
2394
2395int netdev_boot_setup_check(struct net_device *dev);
2396unsigned long netdev_boot_base(const char *prefix, int unit);
2397struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2398 const char *hwaddr);
2399struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2400struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2401void dev_add_pack(struct packet_type *pt);
2402void dev_remove_pack(struct packet_type *pt);
2403void __dev_remove_pack(struct packet_type *pt);
2404void dev_add_offload(struct packet_offload *po);
2405void dev_remove_offload(struct packet_offload *po);
2406
2407int dev_get_iflink(const struct net_device *dev);
2408int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2409struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2410 unsigned short mask);
2411struct net_device *dev_get_by_name(struct net *net, const char *name);
2412struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2413struct net_device *__dev_get_by_name(struct net *net, const char *name);
2414int dev_alloc_name(struct net_device *dev, const char *name);
2415int dev_open(struct net_device *dev);
2416void dev_close(struct net_device *dev);
2417void dev_close_many(struct list_head *head, bool unlink);
2418void dev_disable_lro(struct net_device *dev);
2419int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2420int dev_queue_xmit(struct sk_buff *skb);
2421int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2422int register_netdevice(struct net_device *dev);
2423void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2424void unregister_netdevice_many(struct list_head *head);
2425static inline void unregister_netdevice(struct net_device *dev)
2426{
2427 unregister_netdevice_queue(dev, NULL);
2428}
2429
2430int netdev_refcnt_read(const struct net_device *dev);
2431void free_netdev(struct net_device *dev);
2432void netdev_freemem(struct net_device *dev);
2433void synchronize_net(void);
2434int init_dummy_netdev(struct net_device *dev);
2435
2436DECLARE_PER_CPU(int, xmit_recursion);
2437#define XMIT_RECURSION_LIMIT 10
2438
2439static inline int dev_recursion_level(void)
2440{
2441 return this_cpu_read(xmit_recursion);
2442}
2443
2444struct net_device *dev_get_by_index(struct net *net, int ifindex);
2445struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2446struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2447struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2448int netdev_get_name(struct net *net, char *name, int ifindex);
2449int dev_restart(struct net_device *dev);
2450int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
2451
2452static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2453{
2454 return NAPI_GRO_CB(skb)->data_offset;
2455}
2456
2457static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2458{
2459 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2460}
2461
2462static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2463{
2464 NAPI_GRO_CB(skb)->data_offset += len;
2465}
2466
2467static inline void *skb_gro_header_fast(struct sk_buff *skb,
2468 unsigned int offset)
2469{
2470 return NAPI_GRO_CB(skb)->frag0 + offset;
2471}
2472
2473static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2474{
2475 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2476}
2477
2478static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2479{
2480 NAPI_GRO_CB(skb)->frag0 = NULL;
2481 NAPI_GRO_CB(skb)->frag0_len = 0;
2482}
2483
2484static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2485 unsigned int offset)
2486{
2487 if (!pskb_may_pull(skb, hlen))
2488 return NULL;
2489
2490 skb_gro_frag0_invalidate(skb);
2491 return skb->data + offset;
2492}
2493
2494static inline void *skb_gro_network_header(struct sk_buff *skb)
2495{
2496 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2497 skb_network_offset(skb);
2498}
2499
2500static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2501 const void *start, unsigned int len)
2502{
2503 if (NAPI_GRO_CB(skb)->csum_valid)
2504 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2505 csum_partial(start, len, 0));
2506}
2507
2508
2509
2510
2511
2512
2513__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2514
2515static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2516{
2517 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2518}
2519
2520static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2521 bool zero_okay,
2522 __sum16 check)
2523{
2524 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2525 skb_checksum_start_offset(skb) <
2526 skb_gro_offset(skb)) &&
2527 !skb_at_gro_remcsum_start(skb) &&
2528 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2529 (!zero_okay || check));
2530}
2531
2532static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2533 __wsum psum)
2534{
2535 if (NAPI_GRO_CB(skb)->csum_valid &&
2536 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2537 return 0;
2538
2539 NAPI_GRO_CB(skb)->csum = psum;
2540
2541 return __skb_gro_checksum_complete(skb);
2542}
2543
2544static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2545{
2546 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2547
2548 NAPI_GRO_CB(skb)->csum_cnt--;
2549 } else {
2550
2551
2552
2553
2554 __skb_incr_checksum_unnecessary(skb);
2555 }
2556}
2557
2558#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2559 compute_pseudo) \
2560({ \
2561 __sum16 __ret = 0; \
2562 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2563 __ret = __skb_gro_checksum_validate_complete(skb, \
2564 compute_pseudo(skb, proto)); \
2565 if (!__ret) \
2566 skb_gro_incr_csum_unnecessary(skb); \
2567 __ret; \
2568})
2569
2570#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2571 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2572
2573#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2574 compute_pseudo) \
2575 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2576
2577#define skb_gro_checksum_simple_validate(skb) \
2578 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2579
2580static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2581{
2582 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2583 !NAPI_GRO_CB(skb)->csum_valid);
2584}
2585
2586static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2587 __sum16 check, __wsum pseudo)
2588{
2589 NAPI_GRO_CB(skb)->csum = ~pseudo;
2590 NAPI_GRO_CB(skb)->csum_valid = 1;
2591}
2592
2593#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2594do { \
2595 if (__skb_gro_checksum_convert_check(skb)) \
2596 __skb_gro_checksum_convert(skb, check, \
2597 compute_pseudo(skb, proto)); \
2598} while (0)
2599
2600struct gro_remcsum {
2601 int offset;
2602 __wsum delta;
2603};
2604
2605static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2606{
2607 grc->offset = 0;
2608 grc->delta = 0;
2609}
2610
2611static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2612 unsigned int off, size_t hdrlen,
2613 int start, int offset,
2614 struct gro_remcsum *grc,
2615 bool nopartial)
2616{
2617 __wsum delta;
2618 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2619
2620 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2621
2622 if (!nopartial) {
2623 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2624 return ptr;
2625 }
2626
2627 ptr = skb_gro_header_fast(skb, off);
2628 if (skb_gro_header_hard(skb, off + plen)) {
2629 ptr = skb_gro_header_slow(skb, off + plen, off);
2630 if (!ptr)
2631 return NULL;
2632 }
2633
2634 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2635 start, offset);
2636
2637
2638 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2639
2640 grc->offset = off + hdrlen + offset;
2641 grc->delta = delta;
2642
2643 return ptr;
2644}
2645
2646static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2647 struct gro_remcsum *grc)
2648{
2649 void *ptr;
2650 size_t plen = grc->offset + sizeof(u16);
2651
2652 if (!grc->delta)
2653 return;
2654
2655 ptr = skb_gro_header_fast(skb, grc->offset);
2656 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2657 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2658 if (!ptr)
2659 return;
2660 }
2661
2662 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2663}
2664
2665#ifdef CONFIG_XFRM_OFFLOAD
2666static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
2667{
2668 if (PTR_ERR(pp) != -EINPROGRESS)
2669 NAPI_GRO_CB(skb)->flush |= flush;
2670}
2671#else
2672static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
2673{
2674 NAPI_GRO_CB(skb)->flush |= flush;
2675}
2676#endif
2677
2678static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2679 unsigned short type,
2680 const void *daddr, const void *saddr,
2681 unsigned int len)
2682{
2683 if (!dev->header_ops || !dev->header_ops->create)
2684 return 0;
2685
2686 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2687}
2688
2689static inline int dev_parse_header(const struct sk_buff *skb,
2690 unsigned char *haddr)
2691{
2692 const struct net_device *dev = skb->dev;
2693
2694 if (!dev->header_ops || !dev->header_ops->parse)
2695 return 0;
2696 return dev->header_ops->parse(skb, haddr);
2697}
2698
2699
2700static inline bool dev_validate_header(const struct net_device *dev,
2701 char *ll_header, int len)
2702{
2703 if (likely(len >= dev->hard_header_len))
2704 return true;
2705 if (len < dev->min_header_len)
2706 return false;
2707
2708 if (capable(CAP_SYS_RAWIO)) {
2709 memset(ll_header + len, 0, dev->hard_header_len - len);
2710 return true;
2711 }
2712
2713 if (dev->header_ops && dev->header_ops->validate)
2714 return dev->header_ops->validate(ll_header, len);
2715
2716 return false;
2717}
2718
2719typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2720int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2721static inline int unregister_gifconf(unsigned int family)
2722{
2723 return register_gifconf(family, NULL);
2724}
2725
2726#ifdef CONFIG_NET_FLOW_LIMIT
2727#define FLOW_LIMIT_HISTORY (1 << 7)
2728struct sd_flow_limit {
2729 u64 count;
2730 unsigned int num_buckets;
2731 unsigned int history_head;
2732 u16 history[FLOW_LIMIT_HISTORY];
2733 u8 buckets[];
2734};
2735
2736extern int netdev_flow_limit_table_len;
2737#endif
2738
2739
2740
2741
2742struct softnet_data {
2743 struct list_head poll_list;
2744 struct sk_buff_head process_queue;
2745
2746
2747 unsigned int processed;
2748 unsigned int time_squeeze;
2749 unsigned int received_rps;
2750#ifdef CONFIG_RPS
2751 struct softnet_data *rps_ipi_list;
2752#endif
2753#ifdef CONFIG_NET_FLOW_LIMIT
2754 struct sd_flow_limit __rcu *flow_limit;
2755#endif
2756 struct Qdisc *output_queue;
2757 struct Qdisc **output_queue_tailp;
2758 struct sk_buff *completion_queue;
2759
2760#ifdef CONFIG_RPS
2761
2762
2763
2764 unsigned int input_queue_head ____cacheline_aligned_in_smp;
2765
2766
2767 call_single_data_t csd ____cacheline_aligned_in_smp;
2768 struct softnet_data *rps_ipi_next;
2769 unsigned int cpu;
2770 unsigned int input_queue_tail;
2771#endif
2772 unsigned int dropped;
2773 struct sk_buff_head input_pkt_queue;
2774 struct napi_struct backlog;
2775
2776};
2777
2778static inline void input_queue_head_incr(struct softnet_data *sd)
2779{
2780#ifdef CONFIG_RPS
2781 sd->input_queue_head++;
2782#endif
2783}
2784
2785static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2786 unsigned int *qtail)
2787{
2788#ifdef CONFIG_RPS
2789 *qtail = ++sd->input_queue_tail;
2790#endif
2791}
2792
2793DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2794
2795void __netif_schedule(struct Qdisc *q);
2796void netif_schedule_queue(struct netdev_queue *txq);
2797
2798static inline void netif_tx_schedule_all(struct net_device *dev)
2799{
2800 unsigned int i;
2801
2802 for (i = 0; i < dev->num_tx_queues; i++)
2803 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2804}
2805
2806static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2807{
2808 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2809}
2810
2811
2812
2813
2814
2815
2816
2817static inline void netif_start_queue(struct net_device *dev)
2818{
2819 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
2820}
2821
2822static inline void netif_tx_start_all_queues(struct net_device *dev)
2823{
2824 unsigned int i;
2825
2826 for (i = 0; i < dev->num_tx_queues; i++) {
2827 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2828 netif_tx_start_queue(txq);
2829 }
2830}
2831
2832void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2833
2834
2835
2836
2837
2838
2839
2840
2841static inline void netif_wake_queue(struct net_device *dev)
2842{
2843 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2844}
2845
2846static inline void netif_tx_wake_all_queues(struct net_device *dev)
2847{
2848 unsigned int i;
2849
2850 for (i = 0; i < dev->num_tx_queues; i++) {
2851 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2852 netif_tx_wake_queue(txq);
2853 }
2854}
2855
2856static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2857{
2858 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2859}
2860
2861
2862
2863
2864
2865
2866
2867
2868static inline void netif_stop_queue(struct net_device *dev)
2869{
2870 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2871}
2872
2873void netif_tx_stop_all_queues(struct net_device *dev);
2874
2875static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2876{
2877 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2878}
2879
2880
2881
2882
2883
2884
2885
2886static inline bool netif_queue_stopped(const struct net_device *dev)
2887{
2888 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2889}
2890
2891static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2892{
2893 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2894}
2895
2896static inline bool
2897netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2898{
2899 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2900}
2901
2902static inline bool
2903netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2904{
2905 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2906}
2907
2908
2909
2910
2911
2912
2913
2914
2915static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2916{
2917#ifdef CONFIG_BQL
2918 prefetchw(&dev_queue->dql.num_queued);
2919#endif
2920}
2921
2922
2923
2924
2925
2926
2927
2928
2929static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2930{
2931#ifdef CONFIG_BQL
2932 prefetchw(&dev_queue->dql.limit);
2933#endif
2934}
2935
2936static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2937 unsigned int bytes)
2938{
2939#ifdef CONFIG_BQL
2940 dql_queued(&dev_queue->dql, bytes);
2941
2942 if (likely(dql_avail(&dev_queue->dql) >= 0))
2943 return;
2944
2945 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2946
2947
2948
2949
2950
2951
2952 smp_mb();
2953
2954
2955 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2956 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2957#endif
2958}
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2970{
2971 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2972}
2973
2974static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2975 unsigned int pkts, unsigned int bytes)
2976{
2977#ifdef CONFIG_BQL
2978 if (unlikely(!bytes))
2979 return;
2980
2981 dql_completed(&dev_queue->dql, bytes);
2982
2983
2984
2985
2986
2987
2988 smp_mb();
2989
2990 if (dql_avail(&dev_queue->dql) < 0)
2991 return;
2992
2993 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2994 netif_schedule_queue(dev_queue);
2995#endif
2996}
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008static inline void netdev_completed_queue(struct net_device *dev,
3009 unsigned int pkts, unsigned int bytes)
3010{
3011 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3012}
3013
3014static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3015{
3016#ifdef CONFIG_BQL
3017 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3018 dql_reset(&q->dql);
3019#endif
3020}
3021
3022
3023
3024
3025
3026
3027
3028
3029static inline void netdev_reset_queue(struct net_device *dev_queue)
3030{
3031 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3032}
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3043{
3044 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3045 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3046 dev->name, queue_index,
3047 dev->real_num_tx_queues);
3048 return 0;
3049 }
3050
3051 return queue_index;
3052}
3053
3054
3055
3056
3057
3058
3059
3060static inline bool netif_running(const struct net_device *dev)
3061{
3062 return test_bit(__LINK_STATE_START, &dev->state);
3063}
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3080{
3081 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3082
3083 netif_tx_start_queue(txq);
3084}
3085
3086
3087
3088
3089
3090
3091
3092
3093static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3094{
3095 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3096 netif_tx_stop_queue(txq);
3097}
3098
3099
3100
3101
3102
3103
3104
3105
3106static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3107 u16 queue_index)
3108{
3109 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3110
3111 return netif_tx_queue_stopped(txq);
3112}
3113
3114static inline bool netif_subqueue_stopped(const struct net_device *dev,
3115 struct sk_buff *skb)
3116{
3117 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3118}
3119
3120
3121
3122
3123
3124
3125
3126
3127static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3128{
3129 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3130
3131 netif_tx_wake_queue(txq);
3132}
3133
3134#ifdef CONFIG_XPS
3135int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3136 u16 index);
3137#else
3138static inline int netif_set_xps_queue(struct net_device *dev,
3139 const struct cpumask *mask,
3140 u16 index)
3141{
3142 return 0;
3143}
3144#endif
3145
3146u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3147 unsigned int num_tx_queues);
3148
3149
3150
3151
3152
3153static inline u16 skb_tx_hash(const struct net_device *dev,
3154 struct sk_buff *skb)
3155{
3156 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
3157}
3158
3159
3160
3161
3162
3163
3164
3165static inline bool netif_is_multiqueue(const struct net_device *dev)
3166{
3167 return dev->num_tx_queues > 1;
3168}
3169
3170int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3171
3172#ifdef CONFIG_SYSFS
3173int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3174#else
3175static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3176 unsigned int rxq)
3177{
3178 return 0;
3179}
3180#endif
3181
3182#ifdef CONFIG_SYSFS
3183static inline unsigned int get_netdev_rx_queue_index(
3184 struct netdev_rx_queue *queue)
3185{
3186 struct net_device *dev = queue->dev;
3187 int index = queue - dev->_rx;
3188
3189 BUG_ON(index >= dev->num_rx_queues);
3190 return index;
3191}
3192#endif
3193
3194#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3195int netif_get_num_default_rss_queues(void);
3196
3197enum skb_free_reason {
3198 SKB_REASON_CONSUMED,
3199 SKB_REASON_DROPPED,
3200};
3201
3202void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3203void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3225{
3226 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3227}
3228
3229static inline void dev_consume_skb_irq(struct sk_buff *skb)
3230{
3231 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3232}
3233
3234static inline void dev_kfree_skb_any(struct sk_buff *skb)
3235{
3236 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3237}
3238
3239static inline void dev_consume_skb_any(struct sk_buff *skb)
3240{
3241 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3242}
3243
3244void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3245int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
3246int netif_rx(struct sk_buff *skb);
3247int netif_rx_ni(struct sk_buff *skb);
3248int netif_receive_skb(struct sk_buff *skb);
3249gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3250void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3251struct sk_buff *napi_get_frags(struct napi_struct *napi);
3252gro_result_t napi_gro_frags(struct napi_struct *napi);
3253struct packet_offload *gro_find_receive_by_type(__be16 type);
3254struct packet_offload *gro_find_complete_by_type(__be16 type);
3255
3256static inline void napi_free_frags(struct napi_struct *napi)
3257{
3258 kfree_skb(napi->skb);
3259 napi->skb = NULL;
3260}
3261
3262bool netdev_is_rx_handler_busy(struct net_device *dev);
3263int netdev_rx_handler_register(struct net_device *dev,
3264 rx_handler_func_t *rx_handler,
3265 void *rx_handler_data);
3266void netdev_rx_handler_unregister(struct net_device *dev);
3267
3268bool dev_valid_name(const char *name);
3269int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
3270int dev_ethtool(struct net *net, struct ifreq *);
3271unsigned int dev_get_flags(const struct net_device *);
3272int __dev_change_flags(struct net_device *, unsigned int flags);
3273int dev_change_flags(struct net_device *, unsigned int);
3274void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3275 unsigned int gchanges);
3276int dev_change_name(struct net_device *, const char *);
3277int dev_set_alias(struct net_device *, const char *, size_t);
3278int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3279int __dev_set_mtu(struct net_device *, int);
3280int dev_set_mtu(struct net_device *, int);
3281void dev_set_group(struct net_device *, int);
3282int dev_set_mac_address(struct net_device *, struct sockaddr *);
3283int dev_change_carrier(struct net_device *, bool new_carrier);
3284int dev_get_phys_port_id(struct net_device *dev,
3285 struct netdev_phys_item_id *ppid);
3286int dev_get_phys_port_name(struct net_device *dev,
3287 char *name, size_t len);
3288int dev_change_proto_down(struct net_device *dev, bool proto_down);
3289struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3290struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3291 struct netdev_queue *txq, int *ret);
3292
3293typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp);
3294int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3295 int fd, u32 flags);
3296u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id);
3297
3298int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3299int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3300bool is_skb_forwardable(const struct net_device *dev,
3301 const struct sk_buff *skb);
3302
3303static __always_inline int ____dev_forward_skb(struct net_device *dev,
3304 struct sk_buff *skb)
3305{
3306 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3307 unlikely(!is_skb_forwardable(dev, skb))) {
3308 atomic_long_inc(&dev->rx_dropped);
3309 kfree_skb(skb);
3310 return NET_RX_DROP;
3311 }
3312
3313 skb_scrub_packet(skb, true);
3314 skb->priority = 0;
3315 return 0;
3316}
3317
3318void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3319
3320extern int netdev_budget;
3321extern unsigned int netdev_budget_usecs;
3322
3323
3324void netdev_run_todo(void);
3325
3326
3327
3328
3329
3330
3331
3332static inline void dev_put(struct net_device *dev)
3333{
3334 this_cpu_dec(*dev->pcpu_refcnt);
3335}
3336
3337
3338
3339
3340
3341
3342
3343static inline void dev_hold(struct net_device *dev)
3344{
3345 this_cpu_inc(*dev->pcpu_refcnt);
3346}
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357void linkwatch_init_dev(struct net_device *dev);
3358void linkwatch_fire_event(struct net_device *dev);
3359void linkwatch_forget_dev(struct net_device *dev);
3360
3361
3362
3363
3364
3365
3366
3367static inline bool netif_carrier_ok(const struct net_device *dev)
3368{
3369 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3370}
3371
3372unsigned long dev_trans_start(struct net_device *dev);
3373
3374void __netdev_watchdog_up(struct net_device *dev);
3375
3376void netif_carrier_on(struct net_device *dev);
3377
3378void netif_carrier_off(struct net_device *dev);
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392static inline void netif_dormant_on(struct net_device *dev)
3393{
3394 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3395 linkwatch_fire_event(dev);
3396}
3397
3398
3399
3400
3401
3402
3403
3404static inline void netif_dormant_off(struct net_device *dev)
3405{
3406 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3407 linkwatch_fire_event(dev);
3408}
3409
3410
3411
3412
3413
3414
3415
3416static inline bool netif_dormant(const struct net_device *dev)
3417{
3418 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3419}
3420
3421
3422
3423
3424
3425
3426
3427
3428static inline bool netif_oper_up(const struct net_device *dev)
3429{
3430 return (dev->operstate == IF_OPER_UP ||
3431 dev->operstate == IF_OPER_UNKNOWN );
3432}
3433
3434
3435
3436
3437
3438
3439
3440static inline bool netif_device_present(struct net_device *dev)
3441{
3442 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3443}
3444
3445void netif_device_detach(struct net_device *dev);
3446
3447void netif_device_attach(struct net_device *dev);
3448
3449
3450
3451
3452
3453enum {
3454 NETIF_MSG_DRV = 0x0001,
3455 NETIF_MSG_PROBE = 0x0002,
3456 NETIF_MSG_LINK = 0x0004,
3457 NETIF_MSG_TIMER = 0x0008,
3458 NETIF_MSG_IFDOWN = 0x0010,
3459 NETIF_MSG_IFUP = 0x0020,
3460 NETIF_MSG_RX_ERR = 0x0040,
3461 NETIF_MSG_TX_ERR = 0x0080,
3462 NETIF_MSG_TX_QUEUED = 0x0100,
3463 NETIF_MSG_INTR = 0x0200,
3464 NETIF_MSG_TX_DONE = 0x0400,
3465 NETIF_MSG_RX_STATUS = 0x0800,
3466 NETIF_MSG_PKTDATA = 0x1000,
3467 NETIF_MSG_HW = 0x2000,
3468 NETIF_MSG_WOL = 0x4000,
3469};
3470
3471#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3472#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3473#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3474#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3475#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3476#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3477#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3478#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3479#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3480#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3481#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3482#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3483#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3484#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3485#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3486
3487static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3488{
3489
3490 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3491 return default_msg_enable_bits;
3492 if (debug_value == 0)
3493 return 0;
3494
3495 return (1 << debug_value) - 1;
3496}
3497
3498static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
3499{
3500 spin_lock(&txq->_xmit_lock);
3501 txq->xmit_lock_owner = cpu;
3502}
3503
3504static inline bool __netif_tx_acquire(struct netdev_queue *txq)
3505{
3506 __acquire(&txq->_xmit_lock);
3507 return true;
3508}
3509
3510static inline void __netif_tx_release(struct netdev_queue *txq)
3511{
3512 __release(&txq->_xmit_lock);
3513}
3514
3515static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3516{
3517 spin_lock_bh(&txq->_xmit_lock);
3518 txq->xmit_lock_owner = smp_processor_id();
3519}
3520
3521static inline bool __netif_tx_trylock(struct netdev_queue *txq)
3522{
3523 bool ok = spin_trylock(&txq->_xmit_lock);
3524 if (likely(ok))
3525 txq->xmit_lock_owner = smp_processor_id();
3526 return ok;
3527}
3528
3529static inline void __netif_tx_unlock(struct netdev_queue *txq)
3530{
3531 txq->xmit_lock_owner = -1;
3532 spin_unlock(&txq->_xmit_lock);
3533}
3534
3535static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3536{
3537 txq->xmit_lock_owner = -1;
3538 spin_unlock_bh(&txq->_xmit_lock);
3539}
3540
3541static inline void txq_trans_update(struct netdev_queue *txq)
3542{
3543 if (txq->xmit_lock_owner != -1)
3544 txq->trans_start = jiffies;
3545}
3546
3547
3548static inline void netif_trans_update(struct net_device *dev)
3549{
3550 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3551
3552 if (txq->trans_start != jiffies)
3553 txq->trans_start = jiffies;
3554}
3555
3556
3557
3558
3559
3560
3561
3562static inline void netif_tx_lock(struct net_device *dev)
3563{
3564 unsigned int i;
3565 int cpu;
3566
3567 spin_lock(&dev->tx_global_lock);
3568 cpu = smp_processor_id();
3569 for (i = 0; i < dev->num_tx_queues; i++) {
3570 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3571
3572
3573
3574
3575
3576
3577
3578 __netif_tx_lock(txq, cpu);
3579 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3580 __netif_tx_unlock(txq);
3581 }
3582}
3583
3584static inline void netif_tx_lock_bh(struct net_device *dev)
3585{
3586 local_bh_disable();
3587 netif_tx_lock(dev);
3588}
3589
3590static inline void netif_tx_unlock(struct net_device *dev)
3591{
3592 unsigned int i;
3593
3594 for (i = 0; i < dev->num_tx_queues; i++) {
3595 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3596
3597
3598
3599
3600
3601 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
3602 netif_schedule_queue(txq);
3603 }
3604 spin_unlock(&dev->tx_global_lock);
3605}
3606
3607static inline void netif_tx_unlock_bh(struct net_device *dev)
3608{
3609 netif_tx_unlock(dev);
3610 local_bh_enable();
3611}
3612
3613#define HARD_TX_LOCK(dev, txq, cpu) { \
3614 if ((dev->features & NETIF_F_LLTX) == 0) { \
3615 __netif_tx_lock(txq, cpu); \
3616 } else { \
3617 __netif_tx_acquire(txq); \
3618 } \
3619}
3620
3621#define HARD_TX_TRYLOCK(dev, txq) \
3622 (((dev->features & NETIF_F_LLTX) == 0) ? \
3623 __netif_tx_trylock(txq) : \
3624 __netif_tx_acquire(txq))
3625
3626#define HARD_TX_UNLOCK(dev, txq) { \
3627 if ((dev->features & NETIF_F_LLTX) == 0) { \
3628 __netif_tx_unlock(txq); \
3629 } else { \
3630 __netif_tx_release(txq); \
3631 } \
3632}
3633
3634static inline void netif_tx_disable(struct net_device *dev)
3635{
3636 unsigned int i;
3637 int cpu;
3638
3639 local_bh_disable();
3640 cpu = smp_processor_id();
3641 for (i = 0; i < dev->num_tx_queues; i++) {
3642 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3643
3644 __netif_tx_lock(txq, cpu);
3645 netif_tx_stop_queue(txq);
3646 __netif_tx_unlock(txq);
3647 }
3648 local_bh_enable();
3649}
3650
3651static inline void netif_addr_lock(struct net_device *dev)
3652{
3653 spin_lock(&dev->addr_list_lock);
3654}
3655
3656static inline void netif_addr_lock_nested(struct net_device *dev)
3657{
3658 int subclass = SINGLE_DEPTH_NESTING;
3659
3660 if (dev->netdev_ops->ndo_get_lock_subclass)
3661 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3662
3663 spin_lock_nested(&dev->addr_list_lock, subclass);
3664}
3665
3666static inline void netif_addr_lock_bh(struct net_device *dev)
3667{
3668 spin_lock_bh(&dev->addr_list_lock);
3669}
3670
3671static inline void netif_addr_unlock(struct net_device *dev)
3672{
3673 spin_unlock(&dev->addr_list_lock);
3674}
3675
3676static inline void netif_addr_unlock_bh(struct net_device *dev)
3677{
3678 spin_unlock_bh(&dev->addr_list_lock);
3679}
3680
3681
3682
3683
3684
3685#define for_each_dev_addr(dev, ha) \
3686 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
3687
3688
3689
3690void ether_setup(struct net_device *dev);
3691
3692
3693struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3694 unsigned char name_assign_type,
3695 void (*setup)(struct net_device *),
3696 unsigned int txqs, unsigned int rxqs);
3697int dev_get_valid_name(struct net *net, struct net_device *dev,
3698 const char *name);
3699
3700#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3701 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3702
3703#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
3704 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
3705 count)
3706
3707int register_netdev(struct net_device *dev);
3708void unregister_netdev(struct net_device *dev);
3709
3710
3711int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3712 struct netdev_hw_addr_list *from_list, int addr_len);
3713void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3714 struct netdev_hw_addr_list *from_list, int addr_len);
3715int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3716 struct net_device *dev,
3717 int (*sync)(struct net_device *, const unsigned char *),
3718 int (*unsync)(struct net_device *,
3719 const unsigned char *));
3720void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3721 struct net_device *dev,
3722 int (*unsync)(struct net_device *,
3723 const unsigned char *));
3724void __hw_addr_init(struct netdev_hw_addr_list *list);
3725
3726
3727int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3728 unsigned char addr_type);
3729int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3730 unsigned char addr_type);
3731void dev_addr_flush(struct net_device *dev);
3732int dev_addr_init(struct net_device *dev);
3733
3734
3735int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3736int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3737int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3738int dev_uc_sync(struct net_device *to, struct net_device *from);
3739int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3740void dev_uc_unsync(struct net_device *to, struct net_device *from);
3741void dev_uc_flush(struct net_device *dev);
3742void dev_uc_init(struct net_device *dev);
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753static inline int __dev_uc_sync(struct net_device *dev,
3754 int (*sync)(struct net_device *,
3755 const unsigned char *),
3756 int (*unsync)(struct net_device *,
3757 const unsigned char *))
3758{
3759 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3760}
3761
3762
3763
3764
3765
3766
3767
3768
3769static inline void __dev_uc_unsync(struct net_device *dev,
3770 int (*unsync)(struct net_device *,
3771 const unsigned char *))
3772{
3773 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3774}
3775
3776
3777int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3778int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3779int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3780int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3781int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3782int dev_mc_sync(struct net_device *to, struct net_device *from);
3783int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3784void dev_mc_unsync(struct net_device *to, struct net_device *from);
3785void dev_mc_flush(struct net_device *dev);
3786void dev_mc_init(struct net_device *dev);
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797static inline int __dev_mc_sync(struct net_device *dev,
3798 int (*sync)(struct net_device *,
3799 const unsigned char *),
3800 int (*unsync)(struct net_device *,
3801 const unsigned char *))
3802{
3803 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3804}
3805
3806
3807
3808
3809
3810
3811
3812
3813static inline void __dev_mc_unsync(struct net_device *dev,
3814 int (*unsync)(struct net_device *,
3815 const unsigned char *))
3816{
3817 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3818}
3819
3820
3821void dev_set_rx_mode(struct net_device *dev);
3822void __dev_set_rx_mode(struct net_device *dev);
3823int dev_set_promiscuity(struct net_device *dev, int inc);
3824int dev_set_allmulti(struct net_device *dev, int inc);
3825void netdev_state_change(struct net_device *dev);
3826void netdev_notify_peers(struct net_device *dev);
3827void netdev_features_change(struct net_device *dev);
3828
3829void dev_load(struct net *net, const char *name);
3830struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3831 struct rtnl_link_stats64 *storage);
3832void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3833 const struct net_device_stats *netdev_stats);
3834
3835extern int netdev_max_backlog;
3836extern int netdev_tstamp_prequeue;
3837extern int weight_p;
3838extern int dev_weight_rx_bias;
3839extern int dev_weight_tx_bias;
3840extern int dev_rx_weight;
3841extern int dev_tx_weight;
3842
3843bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3844struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3845 struct list_head **iter);
3846struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3847 struct list_head **iter);
3848
3849
3850#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3851 for (iter = &(dev)->adj_list.upper, \
3852 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3853 updev; \
3854 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3855
3856int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
3857 int (*fn)(struct net_device *upper_dev,
3858 void *data),
3859 void *data);
3860
3861bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
3862 struct net_device *upper_dev);
3863
3864bool netdev_has_any_upper_dev(struct net_device *dev);
3865
3866void *netdev_lower_get_next_private(struct net_device *dev,
3867 struct list_head **iter);
3868void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3869 struct list_head **iter);
3870
3871#define netdev_for_each_lower_private(dev, priv, iter) \
3872 for (iter = (dev)->adj_list.lower.next, \
3873 priv = netdev_lower_get_next_private(dev, &(iter)); \
3874 priv; \
3875 priv = netdev_lower_get_next_private(dev, &(iter)))
3876
3877#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3878 for (iter = &(dev)->adj_list.lower, \
3879 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3880 priv; \
3881 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3882
3883void *netdev_lower_get_next(struct net_device *dev,
3884 struct list_head **iter);
3885
3886#define netdev_for_each_lower_dev(dev, ldev, iter) \
3887 for (iter = (dev)->adj_list.lower.next, \
3888 ldev = netdev_lower_get_next(dev, &(iter)); \
3889 ldev; \
3890 ldev = netdev_lower_get_next(dev, &(iter)))
3891
3892struct net_device *netdev_all_lower_get_next(struct net_device *dev,
3893 struct list_head **iter);
3894struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3895 struct list_head **iter);
3896
3897int netdev_walk_all_lower_dev(struct net_device *dev,
3898 int (*fn)(struct net_device *lower_dev,
3899 void *data),
3900 void *data);
3901int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
3902 int (*fn)(struct net_device *lower_dev,
3903 void *data),
3904 void *data);
3905
3906void *netdev_adjacent_get_private(struct list_head *adj_list);
3907void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3908struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3909struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3910int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3911int netdev_master_upper_dev_link(struct net_device *dev,
3912 struct net_device *upper_dev,
3913 void *upper_priv, void *upper_info);
3914void netdev_upper_dev_unlink(struct net_device *dev,
3915 struct net_device *upper_dev);
3916void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3917void *netdev_lower_dev_get_private(struct net_device *dev,
3918 struct net_device *lower_dev);
3919void netdev_lower_state_changed(struct net_device *lower_dev,
3920 void *lower_state_info);
3921
3922
3923#define NETDEV_RSS_KEY_LEN 52
3924extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
3925void netdev_rss_key_fill(void *buffer, size_t len);
3926
3927int dev_get_nest_level(struct net_device *dev);
3928int skb_checksum_help(struct sk_buff *skb);
3929int skb_crc32c_csum_help(struct sk_buff *skb);
3930int skb_csum_hwoffload_help(struct sk_buff *skb,
3931 const netdev_features_t features);
3932
3933struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3934 netdev_features_t features, bool tx_path);
3935struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3936 netdev_features_t features);
3937
3938struct netdev_bonding_info {
3939 ifslave slave;
3940 ifbond master;
3941};
3942
3943struct netdev_notifier_bonding_info {
3944 struct netdev_notifier_info info;
3945 struct netdev_bonding_info bonding_info;
3946};
3947
3948void netdev_bonding_info_change(struct net_device *dev,
3949 struct netdev_bonding_info *bonding_info);
3950
3951static inline
3952struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3953{
3954 return __skb_gso_segment(skb, features, true);
3955}
3956__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3957
3958static inline bool can_checksum_protocol(netdev_features_t features,
3959 __be16 protocol)
3960{
3961 if (protocol == htons(ETH_P_FCOE))
3962 return !!(features & NETIF_F_FCOE_CRC);
3963
3964
3965
3966 if (features & NETIF_F_HW_CSUM) {
3967
3968 return true;
3969 }
3970
3971 switch (protocol) {
3972 case htons(ETH_P_IP):
3973 return !!(features & NETIF_F_IP_CSUM);
3974 case htons(ETH_P_IPV6):
3975 return !!(features & NETIF_F_IPV6_CSUM);
3976 default:
3977 return false;
3978 }
3979}
3980
3981#ifdef CONFIG_BUG
3982void netdev_rx_csum_fault(struct net_device *dev);
3983#else
3984static inline void netdev_rx_csum_fault(struct net_device *dev)
3985{
3986}
3987#endif
3988
3989void net_enable_timestamp(void);
3990void net_disable_timestamp(void);
3991
3992#ifdef CONFIG_PROC_FS
3993int __init dev_proc_init(void);
3994#else
3995#define dev_proc_init() 0
3996#endif
3997
3998static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3999 struct sk_buff *skb, struct net_device *dev,
4000 bool more)
4001{
4002 skb->xmit_more = more ? 1 : 0;
4003 return ops->ndo_start_xmit(skb, dev);
4004}
4005
4006static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4007 struct netdev_queue *txq, bool more)
4008{
4009 const struct net_device_ops *ops = dev->netdev_ops;
4010 int rc;
4011
4012 rc = __netdev_start_xmit(ops, skb, dev, more);
4013 if (rc == NETDEV_TX_OK)
4014 txq_trans_update(txq);
4015
4016 return rc;
4017}
4018
4019int netdev_class_create_file_ns(const struct class_attribute *class_attr,
4020 const void *ns);
4021void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
4022 const void *ns);
4023
4024static inline int netdev_class_create_file(const struct class_attribute *class_attr)
4025{
4026 return netdev_class_create_file_ns(class_attr, NULL);
4027}
4028
4029static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
4030{
4031 netdev_class_remove_file_ns(class_attr, NULL);
4032}
4033
4034extern const struct kobj_ns_type_operations net_ns_type_operations;
4035
4036const char *netdev_drivername(const struct net_device *dev);
4037
4038void linkwatch_run_queue(void);
4039
4040static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4041 netdev_features_t f2)
4042{
4043 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4044 if (f1 & NETIF_F_HW_CSUM)
4045 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4046 else
4047 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4048 }
4049
4050 return f1 & f2;
4051}
4052
4053static inline netdev_features_t netdev_get_wanted_features(
4054 struct net_device *dev)
4055{
4056 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4057}
4058netdev_features_t netdev_increment_features(netdev_features_t all,
4059 netdev_features_t one, netdev_features_t mask);
4060
4061
4062
4063
4064
4065static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4066 netdev_features_t mask)
4067{
4068 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4069}
4070
4071int __netdev_update_features(struct net_device *dev);
4072void netdev_update_features(struct net_device *dev);
4073void netdev_change_features(struct net_device *dev);
4074
4075void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4076 struct net_device *dev);
4077
4078netdev_features_t passthru_features_check(struct sk_buff *skb,
4079 struct net_device *dev,
4080 netdev_features_t features);
4081netdev_features_t netif_skb_features(struct sk_buff *skb);
4082
4083static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4084{
4085 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4086
4087
4088 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4089 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4090 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4091 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4092 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4093 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4094 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4095 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4096 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4097 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4098 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4099 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4100 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4101 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4102 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4103 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
4104
4105 return (features & feature) == feature;
4106}
4107
4108static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4109{
4110 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4111 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4112}
4113
4114static inline bool netif_needs_gso(struct sk_buff *skb,
4115 netdev_features_t features)
4116{
4117 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4118 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4119 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4120}
4121
4122static inline void netif_set_gso_max_size(struct net_device *dev,
4123 unsigned int size)
4124{
4125 dev->gso_max_size = size;
4126}
4127
4128static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4129 int pulled_hlen, u16 mac_offset,
4130 int mac_len)
4131{
4132 skb->protocol = protocol;
4133 skb->encapsulation = 1;
4134 skb_push(skb, pulled_hlen);
4135 skb_reset_transport_header(skb);
4136 skb->mac_header = mac_offset;
4137 skb->network_header = skb->mac_header + mac_len;
4138 skb->mac_len = mac_len;
4139}
4140
4141static inline bool netif_is_macsec(const struct net_device *dev)
4142{
4143 return dev->priv_flags & IFF_MACSEC;
4144}
4145
4146static inline bool netif_is_macvlan(const struct net_device *dev)
4147{
4148 return dev->priv_flags & IFF_MACVLAN;
4149}
4150
4151static inline bool netif_is_macvlan_port(const struct net_device *dev)
4152{
4153 return dev->priv_flags & IFF_MACVLAN_PORT;
4154}
4155
4156static inline bool netif_is_ipvlan(const struct net_device *dev)
4157{
4158 return dev->priv_flags & IFF_IPVLAN_SLAVE;
4159}
4160
4161static inline bool netif_is_ipvlan_port(const struct net_device *dev)
4162{
4163 return dev->priv_flags & IFF_IPVLAN_MASTER;
4164}
4165
4166static inline bool netif_is_bond_master(const struct net_device *dev)
4167{
4168 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4169}
4170
4171static inline bool netif_is_bond_slave(const struct net_device *dev)
4172{
4173 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4174}
4175
4176static inline bool netif_supports_nofcs(struct net_device *dev)
4177{
4178 return dev->priv_flags & IFF_SUPP_NOFCS;
4179}
4180
4181static inline bool netif_is_l3_master(const struct net_device *dev)
4182{
4183 return dev->priv_flags & IFF_L3MDEV_MASTER;
4184}
4185
4186static inline bool netif_is_l3_slave(const struct net_device *dev)
4187{
4188 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4189}
4190
4191static inline bool netif_is_bridge_master(const struct net_device *dev)
4192{
4193 return dev->priv_flags & IFF_EBRIDGE;
4194}
4195
4196static inline bool netif_is_bridge_port(const struct net_device *dev)
4197{
4198 return dev->priv_flags & IFF_BRIDGE_PORT;
4199}
4200
4201static inline bool netif_is_ovs_master(const struct net_device *dev)
4202{
4203 return dev->priv_flags & IFF_OPENVSWITCH;
4204}
4205
4206static inline bool netif_is_ovs_port(const struct net_device *dev)
4207{
4208 return dev->priv_flags & IFF_OVS_DATAPATH;
4209}
4210
4211static inline bool netif_is_team_master(const struct net_device *dev)
4212{
4213 return dev->priv_flags & IFF_TEAM;
4214}
4215
4216static inline bool netif_is_team_port(const struct net_device *dev)
4217{
4218 return dev->priv_flags & IFF_TEAM_PORT;
4219}
4220
4221static inline bool netif_is_lag_master(const struct net_device *dev)
4222{
4223 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4224}
4225
4226static inline bool netif_is_lag_port(const struct net_device *dev)
4227{
4228 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4229}
4230
4231static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4232{
4233 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4234}
4235
4236
4237static inline void netif_keep_dst(struct net_device *dev)
4238{
4239 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4240}
4241
4242
4243static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4244{
4245
4246 return dev->priv_flags & IFF_MACSEC;
4247}
4248
4249extern struct pernet_operations __net_initdata loopback_net_ops;
4250
4251
4252
4253
4254
4255static inline const char *netdev_name(const struct net_device *dev)
4256{
4257 if (!dev->name[0] || strchr(dev->name, '%'))
4258 return "(unnamed net_device)";
4259 return dev->name;
4260}
4261
4262static inline bool netdev_unregistering(const struct net_device *dev)
4263{
4264 return dev->reg_state == NETREG_UNREGISTERING;
4265}
4266
4267static inline const char *netdev_reg_state(const struct net_device *dev)
4268{
4269 switch (dev->reg_state) {
4270 case NETREG_UNINITIALIZED: return " (uninitialized)";
4271 case NETREG_REGISTERED: return "";
4272 case NETREG_UNREGISTERING: return " (unregistering)";
4273 case NETREG_UNREGISTERED: return " (unregistered)";
4274 case NETREG_RELEASED: return " (released)";
4275 case NETREG_DUMMY: return " (dummy)";
4276 }
4277
4278 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4279 return " (unknown)";
4280}
4281
4282__printf(3, 4)
4283void netdev_printk(const char *level, const struct net_device *dev,
4284 const char *format, ...);
4285__printf(2, 3)
4286void netdev_emerg(const struct net_device *dev, const char *format, ...);
4287__printf(2, 3)
4288void netdev_alert(const struct net_device *dev, const char *format, ...);
4289__printf(2, 3)
4290void netdev_crit(const struct net_device *dev, const char *format, ...);
4291__printf(2, 3)
4292void netdev_err(const struct net_device *dev, const char *format, ...);
4293__printf(2, 3)
4294void netdev_warn(const struct net_device *dev, const char *format, ...);
4295__printf(2, 3)
4296void netdev_notice(const struct net_device *dev, const char *format, ...);
4297__printf(2, 3)
4298void netdev_info(const struct net_device *dev, const char *format, ...);
4299
4300#define MODULE_ALIAS_NETDEV(device) \
4301 MODULE_ALIAS("netdev-" device)
4302
4303#if defined(CONFIG_DYNAMIC_DEBUG)
4304#define netdev_dbg(__dev, format, args...) \
4305do { \
4306 dynamic_netdev_dbg(__dev, format, ##args); \
4307} while (0)
4308#elif defined(DEBUG)
4309#define netdev_dbg(__dev, format, args...) \
4310 netdev_printk(KERN_DEBUG, __dev, format, ##args)
4311#else
4312#define netdev_dbg(__dev, format, args...) \
4313({ \
4314 if (0) \
4315 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
4316})
4317#endif
4318
4319#if defined(VERBOSE_DEBUG)
4320#define netdev_vdbg netdev_dbg
4321#else
4322
4323#define netdev_vdbg(dev, format, args...) \
4324({ \
4325 if (0) \
4326 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4327 0; \
4328})
4329#endif
4330
4331
4332
4333
4334
4335
4336#define netdev_WARN(dev, format, args...) \
4337 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
4338 netdev_reg_state(dev), ##args)
4339
4340
4341
4342#define netif_printk(priv, type, level, dev, fmt, args...) \
4343do { \
4344 if (netif_msg_##type(priv)) \
4345 netdev_printk(level, (dev), fmt, ##args); \
4346} while (0)
4347
4348#define netif_level(level, priv, type, dev, fmt, args...) \
4349do { \
4350 if (netif_msg_##type(priv)) \
4351 netdev_##level(dev, fmt, ##args); \
4352} while (0)
4353
4354#define netif_emerg(priv, type, dev, fmt, args...) \
4355 netif_level(emerg, priv, type, dev, fmt, ##args)
4356#define netif_alert(priv, type, dev, fmt, args...) \
4357 netif_level(alert, priv, type, dev, fmt, ##args)
4358#define netif_crit(priv, type, dev, fmt, args...) \
4359 netif_level(crit, priv, type, dev, fmt, ##args)
4360#define netif_err(priv, type, dev, fmt, args...) \
4361 netif_level(err, priv, type, dev, fmt, ##args)
4362#define netif_warn(priv, type, dev, fmt, args...) \
4363 netif_level(warn, priv, type, dev, fmt, ##args)
4364#define netif_notice(priv, type, dev, fmt, args...) \
4365 netif_level(notice, priv, type, dev, fmt, ##args)
4366#define netif_info(priv, type, dev, fmt, args...) \
4367 netif_level(info, priv, type, dev, fmt, ##args)
4368
4369#if defined(CONFIG_DYNAMIC_DEBUG)
4370#define netif_dbg(priv, type, netdev, format, args...) \
4371do { \
4372 if (netif_msg_##type(priv)) \
4373 dynamic_netdev_dbg(netdev, format, ##args); \
4374} while (0)
4375#elif defined(DEBUG)
4376#define netif_dbg(priv, type, dev, format, args...) \
4377 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
4378#else
4379#define netif_dbg(priv, type, dev, format, args...) \
4380({ \
4381 if (0) \
4382 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4383 0; \
4384})
4385#endif
4386
4387
4388#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
4389 do { \
4390 if (cond) \
4391 netif_dbg(priv, type, netdev, fmt, ##args); \
4392 else \
4393 netif_ ## level(priv, type, netdev, fmt, ##args); \
4394 } while (0)
4395
4396#if defined(VERBOSE_DEBUG)
4397#define netif_vdbg netif_dbg
4398#else
4399#define netif_vdbg(priv, type, dev, format, args...) \
4400({ \
4401 if (0) \
4402 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4403 0; \
4404})
4405#endif
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434#define PTYPE_HASH_SIZE (16)
4435#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4436
4437#endif
4438