1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31#include <linux/if_link.h>
32
33#ifdef __KERNEL__
34#include <linux/pm_qos.h>
35#include <linux/timer.h>
36#include <linux/bug.h>
37#include <linux/delay.h>
38#include <linux/atomic.h>
39#include <asm/cache.h>
40#include <asm/byteorder.h>
41
42#include <linux/percpu.h>
43#include <linux/rculist.h>
44#include <linux/dmaengine.h>
45#include <linux/workqueue.h>
46#include <linux/dynamic_queue_limits.h>
47
48#include <linux/ethtool.h>
49#include <net/net_namespace.h>
50#include <net/dsa.h>
51#ifdef CONFIG_DCB
52#include <net/dcbnl.h>
53#endif
54#include <net/netprio_cgroup.h>
55
56#include <linux/netdev_features.h>
57
58struct netpoll_info;
59struct device;
60struct phy_device;
61
62struct wireless_dev;
63
64#define SET_ETHTOOL_OPS(netdev,ops) \
65 ( (netdev)->ethtool_ops = (ops) )
66
67
68#define NET_ADDR_PERM 0
69#define NET_ADDR_RANDOM 1
70#define NET_ADDR_STOLEN 2
71
72
73#define NET_RX_SUCCESS 0
74#define NET_RX_DROP 1
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94#define NET_XMIT_SUCCESS 0x00
95#define NET_XMIT_DROP 0x01
96#define NET_XMIT_CN 0x02
97#define NET_XMIT_POLICED 0x03
98#define NET_XMIT_MASK 0x0f
99
100
101
102
103#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
104#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
105
106
107#define NETDEV_TX_MASK 0xf0
108
109enum netdev_tx {
110 __NETDEV_TX_MIN = INT_MIN,
111 NETDEV_TX_OK = 0x00,
112 NETDEV_TX_BUSY = 0x10,
113 NETDEV_TX_LOCKED = 0x20,
114};
115typedef enum netdev_tx netdev_tx_t;
116
117
118
119
120
121static inline bool dev_xmit_complete(int rc)
122{
123
124
125
126
127
128
129 if (likely(rc < NET_XMIT_MASK))
130 return true;
131
132 return false;
133}
134
135#endif
136
137#define MAX_ADDR_LEN 32
138
139
140#define INIT_NETDEV_GROUP 0
141
142#ifdef __KERNEL__
143
144
145
146
147
148#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
149# if defined(CONFIG_MAC80211_MESH)
150# define LL_MAX_HEADER 128
151# else
152# define LL_MAX_HEADER 96
153# endif
154#elif IS_ENABLED(CONFIG_TR)
155# define LL_MAX_HEADER 48
156#else
157# define LL_MAX_HEADER 32
158#endif
159
160#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
161 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
162#define MAX_HEADER LL_MAX_HEADER
163#else
164#define MAX_HEADER (LL_MAX_HEADER + 48)
165#endif
166
167
168
169
170
171
172struct net_device_stats {
173 unsigned long rx_packets;
174 unsigned long tx_packets;
175 unsigned long rx_bytes;
176 unsigned long tx_bytes;
177 unsigned long rx_errors;
178 unsigned long tx_errors;
179 unsigned long rx_dropped;
180 unsigned long tx_dropped;
181 unsigned long multicast;
182 unsigned long collisions;
183 unsigned long rx_length_errors;
184 unsigned long rx_over_errors;
185 unsigned long rx_crc_errors;
186 unsigned long rx_frame_errors;
187 unsigned long rx_fifo_errors;
188 unsigned long rx_missed_errors;
189 unsigned long tx_aborted_errors;
190 unsigned long tx_carrier_errors;
191 unsigned long tx_fifo_errors;
192 unsigned long tx_heartbeat_errors;
193 unsigned long tx_window_errors;
194 unsigned long rx_compressed;
195 unsigned long tx_compressed;
196};
197
198#endif
199
200
201
202enum {
203 IF_PORT_UNKNOWN = 0,
204 IF_PORT_10BASE2,
205 IF_PORT_10BASET,
206 IF_PORT_AUI,
207 IF_PORT_100BASET,
208 IF_PORT_100BASETX,
209 IF_PORT_100BASEFX
210};
211
212#ifdef __KERNEL__
213
214#include <linux/cache.h>
215#include <linux/skbuff.h>
216
217#ifdef CONFIG_RPS
218#include <linux/static_key.h>
219extern struct static_key rps_needed;
220#endif
221
222struct neighbour;
223struct neigh_parms;
224struct sk_buff;
225
226struct netdev_hw_addr {
227 struct list_head list;
228 unsigned char addr[MAX_ADDR_LEN];
229 unsigned char type;
230#define NETDEV_HW_ADDR_T_LAN 1
231#define NETDEV_HW_ADDR_T_SAN 2
232#define NETDEV_HW_ADDR_T_SLAVE 3
233#define NETDEV_HW_ADDR_T_UNICAST 4
234#define NETDEV_HW_ADDR_T_MULTICAST 5
235 bool synced;
236 bool global_use;
237 int refcount;
238 struct rcu_head rcu_head;
239};
240
241struct netdev_hw_addr_list {
242 struct list_head list;
243 int count;
244};
245
246#define netdev_hw_addr_list_count(l) ((l)->count)
247#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
248#define netdev_hw_addr_list_for_each(ha, l) \
249 list_for_each_entry(ha, &(l)->list, list)
250
251#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
252#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
253#define netdev_for_each_uc_addr(ha, dev) \
254 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
255
256#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
257#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
258#define netdev_for_each_mc_addr(ha, dev) \
259 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
260
261struct hh_cache {
262 u16 hh_len;
263 u16 __pad;
264 seqlock_t hh_lock;
265
266
267#define HH_DATA_MOD 16
268#define HH_DATA_OFF(__len) \
269 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
270#define HH_DATA_ALIGN(__len) \
271 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
272 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
273};
274
275
276
277
278
279
280
281
282
283#define LL_RESERVED_SPACE(dev) \
284 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
285#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
286 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
287
288struct header_ops {
289 int (*create) (struct sk_buff *skb, struct net_device *dev,
290 unsigned short type, const void *daddr,
291 const void *saddr, unsigned len);
292 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
293 int (*rebuild)(struct sk_buff *skb);
294 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
295 void (*cache_update)(struct hh_cache *hh,
296 const struct net_device *dev,
297 const unsigned char *haddr);
298};
299
300
301
302
303
304
305enum netdev_state_t {
306 __LINK_STATE_START,
307 __LINK_STATE_PRESENT,
308 __LINK_STATE_NOCARRIER,
309 __LINK_STATE_LINKWATCH_PENDING,
310 __LINK_STATE_DORMANT,
311};
312
313
314
315
316
317
318struct netdev_boot_setup {
319 char name[IFNAMSIZ];
320 struct ifmap map;
321};
322#define NETDEV_BOOT_SETUP_MAX 8
323
324extern int __init netdev_boot_setup(char *str);
325
326
327
328
329struct napi_struct {
330
331
332
333
334
335
336 struct list_head poll_list;
337
338 unsigned long state;
339 int weight;
340 int (*poll)(struct napi_struct *, int);
341#ifdef CONFIG_NETPOLL
342 spinlock_t poll_lock;
343 int poll_owner;
344#endif
345
346 unsigned int gro_count;
347
348 struct net_device *dev;
349 struct list_head dev_list;
350 struct sk_buff *gro_list;
351 struct sk_buff *skb;
352};
353
354enum {
355 NAPI_STATE_SCHED,
356 NAPI_STATE_DISABLE,
357 NAPI_STATE_NPSVC,
358};
359
360enum gro_result {
361 GRO_MERGED,
362 GRO_MERGED_FREE,
363 GRO_HELD,
364 GRO_NORMAL,
365 GRO_DROP,
366};
367typedef enum gro_result gro_result_t;
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410enum rx_handler_result {
411 RX_HANDLER_CONSUMED,
412 RX_HANDLER_ANOTHER,
413 RX_HANDLER_EXACT,
414 RX_HANDLER_PASS,
415};
416typedef enum rx_handler_result rx_handler_result_t;
417typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
418
419extern void __napi_schedule(struct napi_struct *n);
420
421static inline bool napi_disable_pending(struct napi_struct *n)
422{
423 return test_bit(NAPI_STATE_DISABLE, &n->state);
424}
425
426
427
428
429
430
431
432
433
434
435static inline bool napi_schedule_prep(struct napi_struct *n)
436{
437 return !napi_disable_pending(n) &&
438 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
439}
440
441
442
443
444
445
446
447
448static inline void napi_schedule(struct napi_struct *n)
449{
450 if (napi_schedule_prep(n))
451 __napi_schedule(n);
452}
453
454
455static inline bool napi_reschedule(struct napi_struct *napi)
456{
457 if (napi_schedule_prep(napi)) {
458 __napi_schedule(napi);
459 return true;
460 }
461 return false;
462}
463
464
465
466
467
468
469
470extern void __napi_complete(struct napi_struct *n);
471extern void napi_complete(struct napi_struct *n);
472
473
474
475
476
477
478
479
480static inline void napi_disable(struct napi_struct *n)
481{
482 set_bit(NAPI_STATE_DISABLE, &n->state);
483 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
484 msleep(1);
485 clear_bit(NAPI_STATE_DISABLE, &n->state);
486}
487
488
489
490
491
492
493
494
495static inline void napi_enable(struct napi_struct *n)
496{
497 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
498 smp_mb__before_clear_bit();
499 clear_bit(NAPI_STATE_SCHED, &n->state);
500}
501
502#ifdef CONFIG_SMP
503
504
505
506
507
508
509
510
511static inline void napi_synchronize(const struct napi_struct *n)
512{
513 while (test_bit(NAPI_STATE_SCHED, &n->state))
514 msleep(1);
515}
516#else
517# define napi_synchronize(n) barrier()
518#endif
519
520enum netdev_queue_state_t {
521 __QUEUE_STATE_DRV_XOFF,
522 __QUEUE_STATE_STACK_XOFF,
523 __QUEUE_STATE_FROZEN,
524#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
525 (1 << __QUEUE_STATE_STACK_XOFF))
526#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
527 (1 << __QUEUE_STATE_FROZEN))
528};
529
530
531
532
533
534
535
536
537
538
539struct netdev_queue {
540
541
542
543 struct net_device *dev;
544 struct Qdisc *qdisc;
545 struct Qdisc *qdisc_sleeping;
546#ifdef CONFIG_SYSFS
547 struct kobject kobj;
548#endif
549#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
550 int numa_node;
551#endif
552
553
554
555 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
556 int xmit_lock_owner;
557
558
559
560 unsigned long trans_start;
561
562
563
564
565
566 unsigned long trans_timeout;
567
568 unsigned long state;
569
570#ifdef CONFIG_BQL
571 struct dql dql;
572#endif
573} ____cacheline_aligned_in_smp;
574
575static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
576{
577#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
578 return q->numa_node;
579#else
580 return NUMA_NO_NODE;
581#endif
582}
583
584static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
585{
586#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
587 q->numa_node = node;
588#endif
589}
590
591#ifdef CONFIG_RPS
592
593
594
595
596struct rps_map {
597 unsigned int len;
598 struct rcu_head rcu;
599 u16 cpus[0];
600};
601#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
602
603
604
605
606
607
608struct rps_dev_flow {
609 u16 cpu;
610 u16 filter;
611 unsigned int last_qtail;
612};
613#define RPS_NO_FILTER 0xffff
614
615
616
617
618struct rps_dev_flow_table {
619 unsigned int mask;
620 struct rcu_head rcu;
621 struct work_struct free_work;
622 struct rps_dev_flow flows[0];
623};
624#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
625 ((_num) * sizeof(struct rps_dev_flow)))
626
627
628
629
630
631struct rps_sock_flow_table {
632 unsigned int mask;
633 u16 ents[0];
634};
635#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
636 ((_num) * sizeof(u16)))
637
638#define RPS_NO_CPU 0xffff
639
640static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
641 u32 hash)
642{
643 if (table && hash) {
644 unsigned int cpu, index = hash & table->mask;
645
646
647 cpu = raw_smp_processor_id();
648
649 if (table->ents[index] != cpu)
650 table->ents[index] = cpu;
651 }
652}
653
654static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
655 u32 hash)
656{
657 if (table && hash)
658 table->ents[hash & table->mask] = RPS_NO_CPU;
659}
660
661extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
662
663#ifdef CONFIG_RFS_ACCEL
664extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
665 u32 flow_id, u16 filter_id);
666#endif
667
668
669struct netdev_rx_queue {
670 struct rps_map __rcu *rps_map;
671 struct rps_dev_flow_table __rcu *rps_flow_table;
672 struct kobject kobj;
673 struct net_device *dev;
674} ____cacheline_aligned_in_smp;
675#endif
676
677#ifdef CONFIG_XPS
678
679
680
681
682struct xps_map {
683 unsigned int len;
684 unsigned int alloc_len;
685 struct rcu_head rcu;
686 u16 queues[0];
687};
688#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
689#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
690 / sizeof(u16))
691
692
693
694
695struct xps_dev_maps {
696 struct rcu_head rcu;
697 struct xps_map __rcu *cpu_map[0];
698};
699#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
700 (nr_cpu_ids * sizeof(struct xps_map *)))
701#endif
702
703#define TC_MAX_QUEUE 16
704#define TC_BITMASK 15
705
706struct netdev_tc_txq {
707 u16 count;
708 u16 offset;
709};
710
711#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
712
713
714
715
716struct netdev_fcoe_hbainfo {
717 char manufacturer[64];
718 char serial_number[64];
719 char hardware_version[64];
720 char driver_version[64];
721 char optionrom_version[64];
722 char firmware_version[64];
723 char model[256];
724 char model_description[256];
725};
726#endif
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909struct net_device_ops {
910 int (*ndo_init)(struct net_device *dev);
911 void (*ndo_uninit)(struct net_device *dev);
912 int (*ndo_open)(struct net_device *dev);
913 int (*ndo_stop)(struct net_device *dev);
914 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
915 struct net_device *dev);
916 u16 (*ndo_select_queue)(struct net_device *dev,
917 struct sk_buff *skb);
918 void (*ndo_change_rx_flags)(struct net_device *dev,
919 int flags);
920 void (*ndo_set_rx_mode)(struct net_device *dev);
921 int (*ndo_set_mac_address)(struct net_device *dev,
922 void *addr);
923 int (*ndo_validate_addr)(struct net_device *dev);
924 int (*ndo_do_ioctl)(struct net_device *dev,
925 struct ifreq *ifr, int cmd);
926 int (*ndo_set_config)(struct net_device *dev,
927 struct ifmap *map);
928 int (*ndo_change_mtu)(struct net_device *dev,
929 int new_mtu);
930 int (*ndo_neigh_setup)(struct net_device *dev,
931 struct neigh_parms *);
932 void (*ndo_tx_timeout) (struct net_device *dev);
933
934 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
935 struct rtnl_link_stats64 *storage);
936 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
937
938 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
939 unsigned short vid);
940 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
941 unsigned short vid);
942#ifdef CONFIG_NET_POLL_CONTROLLER
943 void (*ndo_poll_controller)(struct net_device *dev);
944 int (*ndo_netpoll_setup)(struct net_device *dev,
945 struct netpoll_info *info);
946 void (*ndo_netpoll_cleanup)(struct net_device *dev);
947#endif
948 int (*ndo_set_vf_mac)(struct net_device *dev,
949 int queue, u8 *mac);
950 int (*ndo_set_vf_vlan)(struct net_device *dev,
951 int queue, u16 vlan, u8 qos);
952 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
953 int vf, int rate);
954 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
955 int vf, bool setting);
956 int (*ndo_get_vf_config)(struct net_device *dev,
957 int vf,
958 struct ifla_vf_info *ivf);
959 int (*ndo_set_vf_port)(struct net_device *dev,
960 int vf,
961 struct nlattr *port[]);
962 int (*ndo_get_vf_port)(struct net_device *dev,
963 int vf, struct sk_buff *skb);
964 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
965#if IS_ENABLED(CONFIG_FCOE)
966 int (*ndo_fcoe_enable)(struct net_device *dev);
967 int (*ndo_fcoe_disable)(struct net_device *dev);
968 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
969 u16 xid,
970 struct scatterlist *sgl,
971 unsigned int sgc);
972 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
973 u16 xid);
974 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
975 u16 xid,
976 struct scatterlist *sgl,
977 unsigned int sgc);
978 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
979 struct netdev_fcoe_hbainfo *hbainfo);
980#endif
981
982#if IS_ENABLED(CONFIG_LIBFCOE)
983#define NETDEV_FCOE_WWNN 0
984#define NETDEV_FCOE_WWPN 1
985 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
986 u64 *wwn, int type);
987#endif
988
989#ifdef CONFIG_RFS_ACCEL
990 int (*ndo_rx_flow_steer)(struct net_device *dev,
991 const struct sk_buff *skb,
992 u16 rxq_index,
993 u32 flow_id);
994#endif
995 int (*ndo_add_slave)(struct net_device *dev,
996 struct net_device *slave_dev);
997 int (*ndo_del_slave)(struct net_device *dev,
998 struct net_device *slave_dev);
999 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1000 netdev_features_t features);
1001 int (*ndo_set_features)(struct net_device *dev,
1002 netdev_features_t features);
1003 int (*ndo_neigh_construct)(struct neighbour *n);
1004 void (*ndo_neigh_destroy)(struct neighbour *n);
1005};
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017struct net_device {
1018
1019
1020
1021
1022
1023
1024 char name[IFNAMSIZ];
1025
1026 struct pm_qos_request pm_qos_req;
1027
1028
1029 struct hlist_node name_hlist;
1030
1031 char *ifalias;
1032
1033
1034
1035
1036
1037 unsigned long mem_end;
1038 unsigned long mem_start;
1039 unsigned long base_addr;
1040 unsigned int irq;
1041
1042
1043
1044
1045
1046
1047 unsigned long state;
1048
1049 struct list_head dev_list;
1050 struct list_head napi_list;
1051 struct list_head unreg_list;
1052
1053
1054 netdev_features_t features;
1055
1056 netdev_features_t hw_features;
1057
1058 netdev_features_t wanted_features;
1059
1060 netdev_features_t vlan_features;
1061
1062
1063 int ifindex;
1064 int iflink;
1065
1066 struct net_device_stats stats;
1067 atomic_long_t rx_dropped;
1068
1069
1070
1071#ifdef CONFIG_WIRELESS_EXT
1072
1073
1074 const struct iw_handler_def * wireless_handlers;
1075
1076 struct iw_public_data * wireless_data;
1077#endif
1078
1079 const struct net_device_ops *netdev_ops;
1080 const struct ethtool_ops *ethtool_ops;
1081
1082
1083 const struct header_ops *header_ops;
1084
1085 unsigned int flags;
1086 unsigned int priv_flags;
1087
1088 unsigned short gflags;
1089 unsigned short padded;
1090
1091 unsigned char operstate;
1092 unsigned char link_mode;
1093
1094 unsigned char if_port;
1095 unsigned char dma;
1096
1097 unsigned int mtu;
1098 unsigned short type;
1099 unsigned short hard_header_len;
1100
1101
1102
1103
1104
1105 unsigned short needed_headroom;
1106 unsigned short needed_tailroom;
1107
1108
1109 unsigned char perm_addr[MAX_ADDR_LEN];
1110 unsigned char addr_assign_type;
1111 unsigned char addr_len;
1112 unsigned char neigh_priv_len;
1113 unsigned short dev_id;
1114
1115 spinlock_t addr_list_lock;
1116 struct netdev_hw_addr_list uc;
1117 struct netdev_hw_addr_list mc;
1118 bool uc_promisc;
1119 unsigned int promiscuity;
1120 unsigned int allmulti;
1121
1122
1123
1124
1125#if IS_ENABLED(CONFIG_VLAN_8021Q)
1126 struct vlan_info __rcu *vlan_info;
1127#endif
1128#if IS_ENABLED(CONFIG_NET_DSA)
1129 struct dsa_switch_tree *dsa_ptr;
1130#endif
1131 void *atalk_ptr;
1132 struct in_device __rcu *ip_ptr;
1133 struct dn_dev __rcu *dn_ptr;
1134 struct inet6_dev __rcu *ip6_ptr;
1135 void *ec_ptr;
1136 void *ax25_ptr;
1137 struct wireless_dev *ieee80211_ptr;
1138
1139
1140
1141
1142
1143 unsigned long last_rx;
1144
1145
1146
1147
1148
1149
1150
1151 struct net_device *master;
1152
1153
1154
1155
1156 unsigned char *dev_addr;
1157
1158
1159
1160 struct netdev_hw_addr_list dev_addrs;
1161
1162
1163 unsigned char broadcast[MAX_ADDR_LEN];
1164
1165#ifdef CONFIG_SYSFS
1166 struct kset *queues_kset;
1167#endif
1168
1169#ifdef CONFIG_RPS
1170 struct netdev_rx_queue *_rx;
1171
1172
1173 unsigned int num_rx_queues;
1174
1175
1176 unsigned int real_num_rx_queues;
1177
1178#ifdef CONFIG_RFS_ACCEL
1179
1180
1181
1182 struct cpu_rmap *rx_cpu_rmap;
1183#endif
1184#endif
1185
1186 rx_handler_func_t __rcu *rx_handler;
1187 void __rcu *rx_handler_data;
1188
1189 struct netdev_queue __rcu *ingress_queue;
1190
1191
1192
1193
1194 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1195
1196
1197 unsigned int num_tx_queues;
1198
1199
1200 unsigned int real_num_tx_queues;
1201
1202
1203 struct Qdisc *qdisc;
1204
1205 unsigned long tx_queue_len;
1206 spinlock_t tx_global_lock;
1207
1208#ifdef CONFIG_XPS
1209 struct xps_dev_maps __rcu *xps_maps;
1210#endif
1211
1212
1213
1214
1215
1216
1217
1218 unsigned long trans_start;
1219
1220 int watchdog_timeo;
1221 struct timer_list watchdog_timer;
1222
1223
1224 int __percpu *pcpu_refcnt;
1225
1226
1227 struct list_head todo_list;
1228
1229 struct hlist_node index_hlist;
1230
1231 struct list_head link_watch_list;
1232
1233
1234 enum { NETREG_UNINITIALIZED=0,
1235 NETREG_REGISTERED,
1236 NETREG_UNREGISTERING,
1237 NETREG_UNREGISTERED,
1238 NETREG_RELEASED,
1239 NETREG_DUMMY,
1240 } reg_state:8;
1241
1242 bool dismantle;
1243
1244 enum {
1245 RTNL_LINK_INITIALIZED,
1246 RTNL_LINK_INITIALIZING,
1247 } rtnl_link_state:16;
1248
1249
1250 void (*destructor)(struct net_device *dev);
1251
1252#ifdef CONFIG_NETPOLL
1253 struct netpoll_info *npinfo;
1254#endif
1255
1256#ifdef CONFIG_NET_NS
1257
1258 struct net *nd_net;
1259#endif
1260
1261
1262 union {
1263 void *ml_priv;
1264 struct pcpu_lstats __percpu *lstats;
1265 struct pcpu_tstats __percpu *tstats;
1266 struct pcpu_dstats __percpu *dstats;
1267 };
1268
1269 struct garp_port __rcu *garp_port;
1270
1271
1272 struct device dev;
1273
1274 const struct attribute_group *sysfs_groups[4];
1275
1276
1277 const struct rtnl_link_ops *rtnl_link_ops;
1278
1279
1280#define GSO_MAX_SIZE 65536
1281 unsigned int gso_max_size;
1282
1283#ifdef CONFIG_DCB
1284
1285 const struct dcbnl_rtnl_ops *dcbnl_ops;
1286#endif
1287 u8 num_tc;
1288 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1289 u8 prio_tc_map[TC_BITMASK + 1];
1290
1291#if IS_ENABLED(CONFIG_FCOE)
1292
1293 unsigned int fcoe_ddp_xid;
1294#endif
1295#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1296 struct netprio_map __rcu *priomap;
1297#endif
1298
1299 struct phy_device *phydev;
1300
1301
1302 int group;
1303};
1304#define to_net_dev(d) container_of(d, struct net_device, dev)
1305
1306#define NETDEV_ALIGN 32
1307
1308static inline
1309int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1310{
1311 return dev->prio_tc_map[prio & TC_BITMASK];
1312}
1313
1314static inline
1315int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1316{
1317 if (tc >= dev->num_tc)
1318 return -EINVAL;
1319
1320 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1321 return 0;
1322}
1323
1324static inline
1325void netdev_reset_tc(struct net_device *dev)
1326{
1327 dev->num_tc = 0;
1328 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1329 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1330}
1331
1332static inline
1333int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1334{
1335 if (tc >= dev->num_tc)
1336 return -EINVAL;
1337
1338 dev->tc_to_txq[tc].count = count;
1339 dev->tc_to_txq[tc].offset = offset;
1340 return 0;
1341}
1342
1343static inline
1344int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1345{
1346 if (num_tc > TC_MAX_QUEUE)
1347 return -EINVAL;
1348
1349 dev->num_tc = num_tc;
1350 return 0;
1351}
1352
1353static inline
1354int netdev_get_num_tc(struct net_device *dev)
1355{
1356 return dev->num_tc;
1357}
1358
1359static inline
1360struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1361 unsigned int index)
1362{
1363 return &dev->_tx[index];
1364}
1365
1366static inline void netdev_for_each_tx_queue(struct net_device *dev,
1367 void (*f)(struct net_device *,
1368 struct netdev_queue *,
1369 void *),
1370 void *arg)
1371{
1372 unsigned int i;
1373
1374 for (i = 0; i < dev->num_tx_queues; i++)
1375 f(dev, &dev->_tx[i], arg);
1376}
1377
1378
1379
1380
1381static inline
1382struct net *dev_net(const struct net_device *dev)
1383{
1384 return read_pnet(&dev->nd_net);
1385}
1386
1387static inline
1388void dev_net_set(struct net_device *dev, struct net *net)
1389{
1390#ifdef CONFIG_NET_NS
1391 release_net(dev->nd_net);
1392 dev->nd_net = hold_net(net);
1393#endif
1394}
1395
1396static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1397{
1398#ifdef CONFIG_NET_DSA_TAG_DSA
1399 if (dev->dsa_ptr != NULL)
1400 return dsa_uses_dsa_tags(dev->dsa_ptr);
1401#endif
1402
1403 return 0;
1404}
1405
1406static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1407{
1408#ifdef CONFIG_NET_DSA_TAG_TRAILER
1409 if (dev->dsa_ptr != NULL)
1410 return dsa_uses_trailer_tags(dev->dsa_ptr);
1411#endif
1412
1413 return 0;
1414}
1415
1416
1417
1418
1419
1420
1421
1422static inline void *netdev_priv(const struct net_device *dev)
1423{
1424 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1425}
1426
1427
1428
1429
1430#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1431
1432
1433
1434
1435
1436#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1449 int (*poll)(struct napi_struct *, int), int weight);
1450
1451
1452
1453
1454
1455
1456
1457void netif_napi_del(struct napi_struct *napi);
1458
1459struct napi_gro_cb {
1460
1461 void *frag0;
1462
1463
1464 unsigned int frag0_len;
1465
1466
1467 int data_offset;
1468
1469
1470 int same_flow;
1471
1472
1473 int flush;
1474
1475
1476 int count;
1477
1478
1479 int free;
1480};
1481
1482#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1483
1484struct packet_type {
1485 __be16 type;
1486 struct net_device *dev;
1487 int (*func) (struct sk_buff *,
1488 struct net_device *,
1489 struct packet_type *,
1490 struct net_device *);
1491 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1492 netdev_features_t features);
1493 int (*gso_send_check)(struct sk_buff *skb);
1494 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1495 struct sk_buff *skb);
1496 int (*gro_complete)(struct sk_buff *skb);
1497 void *af_packet_priv;
1498 struct list_head list;
1499};
1500
1501#include <linux/notifier.h>
1502
1503
1504
1505
1506
1507#define NETDEV_UP 0x0001
1508#define NETDEV_DOWN 0x0002
1509#define NETDEV_REBOOT 0x0003
1510
1511
1512
1513#define NETDEV_CHANGE 0x0004
1514#define NETDEV_REGISTER 0x0005
1515#define NETDEV_UNREGISTER 0x0006
1516#define NETDEV_CHANGEMTU 0x0007
1517#define NETDEV_CHANGEADDR 0x0008
1518#define NETDEV_GOING_DOWN 0x0009
1519#define NETDEV_CHANGENAME 0x000A
1520#define NETDEV_FEAT_CHANGE 0x000B
1521#define NETDEV_BONDING_FAILOVER 0x000C
1522#define NETDEV_PRE_UP 0x000D
1523#define NETDEV_PRE_TYPE_CHANGE 0x000E
1524#define NETDEV_POST_TYPE_CHANGE 0x000F
1525#define NETDEV_POST_INIT 0x0010
1526#define NETDEV_UNREGISTER_BATCH 0x0011
1527#define NETDEV_RELEASE 0x0012
1528#define NETDEV_NOTIFY_PEERS 0x0013
1529#define NETDEV_JOIN 0x0014
1530
1531extern int register_netdevice_notifier(struct notifier_block *nb);
1532extern int unregister_netdevice_notifier(struct notifier_block *nb);
1533extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1534
1535
1536extern rwlock_t dev_base_lock;
1537
1538
1539#define for_each_netdev(net, d) \
1540 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1541#define for_each_netdev_reverse(net, d) \
1542 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1543#define for_each_netdev_rcu(net, d) \
1544 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1545#define for_each_netdev_safe(net, d, n) \
1546 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1547#define for_each_netdev_continue(net, d) \
1548 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1549#define for_each_netdev_continue_rcu(net, d) \
1550 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1551#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1552
1553static inline struct net_device *next_net_device(struct net_device *dev)
1554{
1555 struct list_head *lh;
1556 struct net *net;
1557
1558 net = dev_net(dev);
1559 lh = dev->dev_list.next;
1560 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1561}
1562
1563static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1564{
1565 struct list_head *lh;
1566 struct net *net;
1567
1568 net = dev_net(dev);
1569 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1570 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1571}
1572
1573static inline struct net_device *first_net_device(struct net *net)
1574{
1575 return list_empty(&net->dev_base_head) ? NULL :
1576 net_device_entry(net->dev_base_head.next);
1577}
1578
1579static inline struct net_device *first_net_device_rcu(struct net *net)
1580{
1581 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1582
1583 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1584}
1585
1586extern int netdev_boot_setup_check(struct net_device *dev);
1587extern unsigned long netdev_boot_base(const char *prefix, int unit);
1588extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1589 const char *hwaddr);
1590extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1591extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1592extern void dev_add_pack(struct packet_type *pt);
1593extern void dev_remove_pack(struct packet_type *pt);
1594extern void __dev_remove_pack(struct packet_type *pt);
1595
1596extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1597 unsigned short mask);
1598extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1599extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1600extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1601extern int dev_alloc_name(struct net_device *dev, const char *name);
1602extern int dev_open(struct net_device *dev);
1603extern int dev_close(struct net_device *dev);
1604extern void dev_disable_lro(struct net_device *dev);
1605extern int dev_queue_xmit(struct sk_buff *skb);
1606extern int register_netdevice(struct net_device *dev);
1607extern void unregister_netdevice_queue(struct net_device *dev,
1608 struct list_head *head);
1609extern void unregister_netdevice_many(struct list_head *head);
1610static inline void unregister_netdevice(struct net_device *dev)
1611{
1612 unregister_netdevice_queue(dev, NULL);
1613}
1614
1615extern int netdev_refcnt_read(const struct net_device *dev);
1616extern void free_netdev(struct net_device *dev);
1617extern void synchronize_net(void);
1618extern int init_dummy_netdev(struct net_device *dev);
1619extern void netdev_resync_ops(struct net_device *dev);
1620
1621extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1622extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1623extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1624extern int dev_restart(struct net_device *dev);
1625#ifdef CONFIG_NETPOLL_TRAP
1626extern int netpoll_trap(void);
1627#endif
1628extern int skb_gro_receive(struct sk_buff **head,
1629 struct sk_buff *skb);
1630extern void skb_gro_reset_offset(struct sk_buff *skb);
1631
1632static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1633{
1634 return NAPI_GRO_CB(skb)->data_offset;
1635}
1636
1637static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1638{
1639 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1640}
1641
1642static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1643{
1644 NAPI_GRO_CB(skb)->data_offset += len;
1645}
1646
1647static inline void *skb_gro_header_fast(struct sk_buff *skb,
1648 unsigned int offset)
1649{
1650 return NAPI_GRO_CB(skb)->frag0 + offset;
1651}
1652
1653static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1654{
1655 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1656}
1657
1658static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1659 unsigned int offset)
1660{
1661 if (!pskb_may_pull(skb, hlen))
1662 return NULL;
1663
1664 NAPI_GRO_CB(skb)->frag0 = NULL;
1665 NAPI_GRO_CB(skb)->frag0_len = 0;
1666 return skb->data + offset;
1667}
1668
1669static inline void *skb_gro_mac_header(struct sk_buff *skb)
1670{
1671 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1672}
1673
1674static inline void *skb_gro_network_header(struct sk_buff *skb)
1675{
1676 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1677 skb_network_offset(skb);
1678}
1679
1680static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1681 unsigned short type,
1682 const void *daddr, const void *saddr,
1683 unsigned len)
1684{
1685 if (!dev->header_ops || !dev->header_ops->create)
1686 return 0;
1687
1688 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1689}
1690
1691static inline int dev_parse_header(const struct sk_buff *skb,
1692 unsigned char *haddr)
1693{
1694 const struct net_device *dev = skb->dev;
1695
1696 if (!dev->header_ops || !dev->header_ops->parse)
1697 return 0;
1698 return dev->header_ops->parse(skb, haddr);
1699}
1700
1701typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1702extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1703static inline int unregister_gifconf(unsigned int family)
1704{
1705 return register_gifconf(family, NULL);
1706}
1707
1708
1709
1710
1711struct softnet_data {
1712 struct Qdisc *output_queue;
1713 struct Qdisc **output_queue_tailp;
1714 struct list_head poll_list;
1715 struct sk_buff *completion_queue;
1716 struct sk_buff_head process_queue;
1717
1718
1719 unsigned int processed;
1720 unsigned int time_squeeze;
1721 unsigned int cpu_collision;
1722 unsigned int received_rps;
1723
1724#ifdef CONFIG_RPS
1725 struct softnet_data *rps_ipi_list;
1726
1727
1728 struct call_single_data csd ____cacheline_aligned_in_smp;
1729 struct softnet_data *rps_ipi_next;
1730 unsigned int cpu;
1731 unsigned int input_queue_head;
1732 unsigned int input_queue_tail;
1733#endif
1734 unsigned dropped;
1735 struct sk_buff_head input_pkt_queue;
1736 struct napi_struct backlog;
1737};
1738
1739static inline void input_queue_head_incr(struct softnet_data *sd)
1740{
1741#ifdef CONFIG_RPS
1742 sd->input_queue_head++;
1743#endif
1744}
1745
1746static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1747 unsigned int *qtail)
1748{
1749#ifdef CONFIG_RPS
1750 *qtail = ++sd->input_queue_tail;
1751#endif
1752}
1753
1754DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1755
1756extern void __netif_schedule(struct Qdisc *q);
1757
1758static inline void netif_schedule_queue(struct netdev_queue *txq)
1759{
1760 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1761 __netif_schedule(txq->qdisc);
1762}
1763
1764static inline void netif_tx_schedule_all(struct net_device *dev)
1765{
1766 unsigned int i;
1767
1768 for (i = 0; i < dev->num_tx_queues; i++)
1769 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1770}
1771
1772static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1773{
1774 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1775}
1776
1777
1778
1779
1780
1781
1782
1783static inline void netif_start_queue(struct net_device *dev)
1784{
1785 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1786}
1787
1788static inline void netif_tx_start_all_queues(struct net_device *dev)
1789{
1790 unsigned int i;
1791
1792 for (i = 0; i < dev->num_tx_queues; i++) {
1793 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1794 netif_tx_start_queue(txq);
1795 }
1796}
1797
1798static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1799{
1800#ifdef CONFIG_NETPOLL_TRAP
1801 if (netpoll_trap()) {
1802 netif_tx_start_queue(dev_queue);
1803 return;
1804 }
1805#endif
1806 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
1807 __netif_schedule(dev_queue->qdisc);
1808}
1809
1810
1811
1812
1813
1814
1815
1816
1817static inline void netif_wake_queue(struct net_device *dev)
1818{
1819 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1820}
1821
1822static inline void netif_tx_wake_all_queues(struct net_device *dev)
1823{
1824 unsigned int i;
1825
1826 for (i = 0; i < dev->num_tx_queues; i++) {
1827 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1828 netif_tx_wake_queue(txq);
1829 }
1830}
1831
1832static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1833{
1834 if (WARN_ON(!dev_queue)) {
1835 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1836 return;
1837 }
1838 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1839}
1840
1841
1842
1843
1844
1845
1846
1847
1848static inline void netif_stop_queue(struct net_device *dev)
1849{
1850 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1851}
1852
1853static inline void netif_tx_stop_all_queues(struct net_device *dev)
1854{
1855 unsigned int i;
1856
1857 for (i = 0; i < dev->num_tx_queues; i++) {
1858 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1859 netif_tx_stop_queue(txq);
1860 }
1861}
1862
1863static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1864{
1865 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1866}
1867
1868
1869
1870
1871
1872
1873
1874static inline bool netif_queue_stopped(const struct net_device *dev)
1875{
1876 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1877}
1878
1879static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
1880{
1881 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1882}
1883
1884static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
1885{
1886 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1887}
1888
1889static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
1890 unsigned int bytes)
1891{
1892#ifdef CONFIG_BQL
1893 dql_queued(&dev_queue->dql, bytes);
1894
1895 if (likely(dql_avail(&dev_queue->dql) >= 0))
1896 return;
1897
1898 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1899
1900
1901
1902
1903
1904
1905 smp_mb();
1906
1907
1908 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
1909 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1910#endif
1911}
1912
1913static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
1914{
1915 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
1916}
1917
1918static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1919 unsigned pkts, unsigned bytes)
1920{
1921#ifdef CONFIG_BQL
1922 if (unlikely(!bytes))
1923 return;
1924
1925 dql_completed(&dev_queue->dql, bytes);
1926
1927
1928
1929
1930
1931
1932 smp_mb();
1933
1934 if (dql_avail(&dev_queue->dql) < 0)
1935 return;
1936
1937 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
1938 netif_schedule_queue(dev_queue);
1939#endif
1940}
1941
1942static inline void netdev_completed_queue(struct net_device *dev,
1943 unsigned pkts, unsigned bytes)
1944{
1945 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
1946}
1947
1948static inline void netdev_tx_reset_queue(struct netdev_queue *q)
1949{
1950#ifdef CONFIG_BQL
1951 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
1952 dql_reset(&q->dql);
1953#endif
1954}
1955
1956static inline void netdev_reset_queue(struct net_device *dev_queue)
1957{
1958 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
1959}
1960
1961
1962
1963
1964
1965
1966
1967static inline bool netif_running(const struct net_device *dev)
1968{
1969 return test_bit(__LINK_STATE_START, &dev->state);
1970}
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1987{
1988 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1989
1990 netif_tx_start_queue(txq);
1991}
1992
1993
1994
1995
1996
1997
1998
1999
2000static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2001{
2002 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2003#ifdef CONFIG_NETPOLL_TRAP
2004 if (netpoll_trap())
2005 return;
2006#endif
2007 netif_tx_stop_queue(txq);
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2018 u16 queue_index)
2019{
2020 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2021
2022 return netif_tx_queue_stopped(txq);
2023}
2024
2025static inline bool netif_subqueue_stopped(const struct net_device *dev,
2026 struct sk_buff *skb)
2027{
2028 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2029}
2030
2031
2032
2033
2034
2035
2036
2037
2038static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2039{
2040 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2041#ifdef CONFIG_NETPOLL_TRAP
2042 if (netpoll_trap())
2043 return;
2044#endif
2045 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2046 __netif_schedule(txq->qdisc);
2047}
2048
2049
2050
2051
2052
2053static inline u16 skb_tx_hash(const struct net_device *dev,
2054 const struct sk_buff *skb)
2055{
2056 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2057}
2058
2059
2060
2061
2062
2063
2064
2065static inline bool netif_is_multiqueue(const struct net_device *dev)
2066{
2067 return dev->num_tx_queues > 1;
2068}
2069
2070extern int netif_set_real_num_tx_queues(struct net_device *dev,
2071 unsigned int txq);
2072
2073#ifdef CONFIG_RPS
2074extern int netif_set_real_num_rx_queues(struct net_device *dev,
2075 unsigned int rxq);
2076#else
2077static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2078 unsigned int rxq)
2079{
2080 return 0;
2081}
2082#endif
2083
2084static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2085 const struct net_device *from_dev)
2086{
2087 netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
2088#ifdef CONFIG_RPS
2089 return netif_set_real_num_rx_queues(to_dev,
2090 from_dev->real_num_rx_queues);
2091#else
2092 return 0;
2093#endif
2094}
2095
2096
2097
2098
2099
2100extern void dev_kfree_skb_irq(struct sk_buff *skb);
2101
2102
2103
2104
2105
2106extern void dev_kfree_skb_any(struct sk_buff *skb);
2107
2108extern int netif_rx(struct sk_buff *skb);
2109extern int netif_rx_ni(struct sk_buff *skb);
2110extern int netif_receive_skb(struct sk_buff *skb);
2111extern gro_result_t dev_gro_receive(struct napi_struct *napi,
2112 struct sk_buff *skb);
2113extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
2114extern gro_result_t napi_gro_receive(struct napi_struct *napi,
2115 struct sk_buff *skb);
2116extern void napi_gro_flush(struct napi_struct *napi);
2117extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
2118extern gro_result_t napi_frags_finish(struct napi_struct *napi,
2119 struct sk_buff *skb,
2120 gro_result_t ret);
2121extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
2122extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2123
2124static inline void napi_free_frags(struct napi_struct *napi)
2125{
2126 kfree_skb(napi->skb);
2127 napi->skb = NULL;
2128}
2129
2130extern int netdev_rx_handler_register(struct net_device *dev,
2131 rx_handler_func_t *rx_handler,
2132 void *rx_handler_data);
2133extern void netdev_rx_handler_unregister(struct net_device *dev);
2134
2135extern bool dev_valid_name(const char *name);
2136extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2137extern int dev_ethtool(struct net *net, struct ifreq *);
2138extern unsigned dev_get_flags(const struct net_device *);
2139extern int __dev_change_flags(struct net_device *, unsigned int flags);
2140extern int dev_change_flags(struct net_device *, unsigned);
2141extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2142extern int dev_change_name(struct net_device *, const char *);
2143extern int dev_set_alias(struct net_device *, const char *, size_t);
2144extern int dev_change_net_namespace(struct net_device *,
2145 struct net *, const char *);
2146extern int dev_set_mtu(struct net_device *, int);
2147extern void dev_set_group(struct net_device *, int);
2148extern int dev_set_mac_address(struct net_device *,
2149 struct sockaddr *);
2150extern int dev_hard_start_xmit(struct sk_buff *skb,
2151 struct net_device *dev,
2152 struct netdev_queue *txq);
2153extern int dev_forward_skb(struct net_device *dev,
2154 struct sk_buff *skb);
2155
2156extern int netdev_budget;
2157
2158
2159extern void netdev_run_todo(void);
2160
2161
2162
2163
2164
2165
2166
2167static inline void dev_put(struct net_device *dev)
2168{
2169 this_cpu_dec(*dev->pcpu_refcnt);
2170}
2171
2172
2173
2174
2175
2176
2177
2178static inline void dev_hold(struct net_device *dev)
2179{
2180 this_cpu_inc(*dev->pcpu_refcnt);
2181}
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192extern void linkwatch_fire_event(struct net_device *dev);
2193extern void linkwatch_forget_dev(struct net_device *dev);
2194
2195
2196
2197
2198
2199
2200
2201static inline bool netif_carrier_ok(const struct net_device *dev)
2202{
2203 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2204}
2205
2206extern unsigned long dev_trans_start(struct net_device *dev);
2207
2208extern void __netdev_watchdog_up(struct net_device *dev);
2209
2210extern void netif_carrier_on(struct net_device *dev);
2211
2212extern void netif_carrier_off(struct net_device *dev);
2213
2214extern void netif_notify_peers(struct net_device *dev);
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229static inline void netif_dormant_on(struct net_device *dev)
2230{
2231 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2232 linkwatch_fire_event(dev);
2233}
2234
2235
2236
2237
2238
2239
2240
2241static inline void netif_dormant_off(struct net_device *dev)
2242{
2243 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2244 linkwatch_fire_event(dev);
2245}
2246
2247
2248
2249
2250
2251
2252
2253static inline bool netif_dormant(const struct net_device *dev)
2254{
2255 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265static inline bool netif_oper_up(const struct net_device *dev)
2266{
2267 return (dev->operstate == IF_OPER_UP ||
2268 dev->operstate == IF_OPER_UNKNOWN );
2269}
2270
2271
2272
2273
2274
2275
2276
2277static inline bool netif_device_present(struct net_device *dev)
2278{
2279 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2280}
2281
2282extern void netif_device_detach(struct net_device *dev);
2283
2284extern void netif_device_attach(struct net_device *dev);
2285
2286
2287
2288
2289
2290enum {
2291 NETIF_MSG_DRV = 0x0001,
2292 NETIF_MSG_PROBE = 0x0002,
2293 NETIF_MSG_LINK = 0x0004,
2294 NETIF_MSG_TIMER = 0x0008,
2295 NETIF_MSG_IFDOWN = 0x0010,
2296 NETIF_MSG_IFUP = 0x0020,
2297 NETIF_MSG_RX_ERR = 0x0040,
2298 NETIF_MSG_TX_ERR = 0x0080,
2299 NETIF_MSG_TX_QUEUED = 0x0100,
2300 NETIF_MSG_INTR = 0x0200,
2301 NETIF_MSG_TX_DONE = 0x0400,
2302 NETIF_MSG_RX_STATUS = 0x0800,
2303 NETIF_MSG_PKTDATA = 0x1000,
2304 NETIF_MSG_HW = 0x2000,
2305 NETIF_MSG_WOL = 0x4000,
2306};
2307
2308#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2309#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2310#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2311#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2312#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2313#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2314#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2315#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2316#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2317#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2318#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2319#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2320#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2321#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2322#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2323
2324static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2325{
2326
2327 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2328 return default_msg_enable_bits;
2329 if (debug_value == 0)
2330 return 0;
2331
2332 return (1 << debug_value) - 1;
2333}
2334
2335static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2336{
2337 spin_lock(&txq->_xmit_lock);
2338 txq->xmit_lock_owner = cpu;
2339}
2340
2341static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2342{
2343 spin_lock_bh(&txq->_xmit_lock);
2344 txq->xmit_lock_owner = smp_processor_id();
2345}
2346
2347static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2348{
2349 bool ok = spin_trylock(&txq->_xmit_lock);
2350 if (likely(ok))
2351 txq->xmit_lock_owner = smp_processor_id();
2352 return ok;
2353}
2354
2355static inline void __netif_tx_unlock(struct netdev_queue *txq)
2356{
2357 txq->xmit_lock_owner = -1;
2358 spin_unlock(&txq->_xmit_lock);
2359}
2360
2361static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2362{
2363 txq->xmit_lock_owner = -1;
2364 spin_unlock_bh(&txq->_xmit_lock);
2365}
2366
2367static inline void txq_trans_update(struct netdev_queue *txq)
2368{
2369 if (txq->xmit_lock_owner != -1)
2370 txq->trans_start = jiffies;
2371}
2372
2373
2374
2375
2376
2377
2378
2379static inline void netif_tx_lock(struct net_device *dev)
2380{
2381 unsigned int i;
2382 int cpu;
2383
2384 spin_lock(&dev->tx_global_lock);
2385 cpu = smp_processor_id();
2386 for (i = 0; i < dev->num_tx_queues; i++) {
2387 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2388
2389
2390
2391
2392
2393
2394
2395 __netif_tx_lock(txq, cpu);
2396 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2397 __netif_tx_unlock(txq);
2398 }
2399}
2400
2401static inline void netif_tx_lock_bh(struct net_device *dev)
2402{
2403 local_bh_disable();
2404 netif_tx_lock(dev);
2405}
2406
2407static inline void netif_tx_unlock(struct net_device *dev)
2408{
2409 unsigned int i;
2410
2411 for (i = 0; i < dev->num_tx_queues; i++) {
2412 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2413
2414
2415
2416
2417
2418 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2419 netif_schedule_queue(txq);
2420 }
2421 spin_unlock(&dev->tx_global_lock);
2422}
2423
2424static inline void netif_tx_unlock_bh(struct net_device *dev)
2425{
2426 netif_tx_unlock(dev);
2427 local_bh_enable();
2428}
2429
2430#define HARD_TX_LOCK(dev, txq, cpu) { \
2431 if ((dev->features & NETIF_F_LLTX) == 0) { \
2432 __netif_tx_lock(txq, cpu); \
2433 } \
2434}
2435
2436#define HARD_TX_UNLOCK(dev, txq) { \
2437 if ((dev->features & NETIF_F_LLTX) == 0) { \
2438 __netif_tx_unlock(txq); \
2439 } \
2440}
2441
2442static inline void netif_tx_disable(struct net_device *dev)
2443{
2444 unsigned int i;
2445 int cpu;
2446
2447 local_bh_disable();
2448 cpu = smp_processor_id();
2449 for (i = 0; i < dev->num_tx_queues; i++) {
2450 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2451
2452 __netif_tx_lock(txq, cpu);
2453 netif_tx_stop_queue(txq);
2454 __netif_tx_unlock(txq);
2455 }
2456 local_bh_enable();
2457}
2458
2459static inline void netif_addr_lock(struct net_device *dev)
2460{
2461 spin_lock(&dev->addr_list_lock);
2462}
2463
2464static inline void netif_addr_lock_nested(struct net_device *dev)
2465{
2466 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2467}
2468
2469static inline void netif_addr_lock_bh(struct net_device *dev)
2470{
2471 spin_lock_bh(&dev->addr_list_lock);
2472}
2473
2474static inline void netif_addr_unlock(struct net_device *dev)
2475{
2476 spin_unlock(&dev->addr_list_lock);
2477}
2478
2479static inline void netif_addr_unlock_bh(struct net_device *dev)
2480{
2481 spin_unlock_bh(&dev->addr_list_lock);
2482}
2483
2484
2485
2486
2487
2488#define for_each_dev_addr(dev, ha) \
2489 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2490
2491
2492
2493extern void ether_setup(struct net_device *dev);
2494
2495
2496extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2497 void (*setup)(struct net_device *),
2498 unsigned int txqs, unsigned int rxqs);
2499#define alloc_netdev(sizeof_priv, name, setup) \
2500 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2501
2502#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2503 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2504
2505extern int register_netdev(struct net_device *dev);
2506extern void unregister_netdev(struct net_device *dev);
2507
2508
2509extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2510 struct netdev_hw_addr_list *from_list,
2511 int addr_len, unsigned char addr_type);
2512extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2513 struct netdev_hw_addr_list *from_list,
2514 int addr_len, unsigned char addr_type);
2515extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2516 struct netdev_hw_addr_list *from_list,
2517 int addr_len);
2518extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2519 struct netdev_hw_addr_list *from_list,
2520 int addr_len);
2521extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2522extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2523
2524
2525extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
2526 unsigned char addr_type);
2527extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
2528 unsigned char addr_type);
2529extern int dev_addr_add_multiple(struct net_device *to_dev,
2530 struct net_device *from_dev,
2531 unsigned char addr_type);
2532extern int dev_addr_del_multiple(struct net_device *to_dev,
2533 struct net_device *from_dev,
2534 unsigned char addr_type);
2535extern void dev_addr_flush(struct net_device *dev);
2536extern int dev_addr_init(struct net_device *dev);
2537
2538
2539extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2540extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2541extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2542extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2543extern void dev_uc_flush(struct net_device *dev);
2544extern void dev_uc_init(struct net_device *dev);
2545
2546
2547extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2548extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2549extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2550extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2551extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2552extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2553extern void dev_mc_flush(struct net_device *dev);
2554extern void dev_mc_init(struct net_device *dev);
2555
2556
2557extern void dev_set_rx_mode(struct net_device *dev);
2558extern void __dev_set_rx_mode(struct net_device *dev);
2559extern int dev_set_promiscuity(struct net_device *dev, int inc);
2560extern int dev_set_allmulti(struct net_device *dev, int inc);
2561extern void netdev_state_change(struct net_device *dev);
2562extern int netdev_bonding_change(struct net_device *dev,
2563 unsigned long event);
2564extern void netdev_features_change(struct net_device *dev);
2565
2566extern void dev_load(struct net *net, const char *name);
2567extern void dev_mcast_init(void);
2568extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2569 struct rtnl_link_stats64 *storage);
2570extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2571 const struct net_device_stats *netdev_stats);
2572
2573extern int netdev_max_backlog;
2574extern int netdev_tstamp_prequeue;
2575extern int weight_p;
2576extern int bpf_jit_enable;
2577extern int netdev_set_master(struct net_device *dev, struct net_device *master);
2578extern int netdev_set_bond_master(struct net_device *dev,
2579 struct net_device *master);
2580extern int skb_checksum_help(struct sk_buff *skb);
2581extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2582 netdev_features_t features);
2583#ifdef CONFIG_BUG
2584extern void netdev_rx_csum_fault(struct net_device *dev);
2585#else
2586static inline void netdev_rx_csum_fault(struct net_device *dev)
2587{
2588}
2589#endif
2590
2591extern void net_enable_timestamp(void);
2592extern void net_disable_timestamp(void);
2593
2594#ifdef CONFIG_PROC_FS
2595extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2596extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2597extern void dev_seq_stop(struct seq_file *seq, void *v);
2598#endif
2599
2600extern int netdev_class_create_file(struct class_attribute *class_attr);
2601extern void netdev_class_remove_file(struct class_attribute *class_attr);
2602
2603extern struct kobj_ns_type_operations net_ns_type_operations;
2604
2605extern const char *netdev_drivername(const struct net_device *dev);
2606
2607extern void linkwatch_run_queue(void);
2608
2609static inline netdev_features_t netdev_get_wanted_features(
2610 struct net_device *dev)
2611{
2612 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2613}
2614netdev_features_t netdev_increment_features(netdev_features_t all,
2615 netdev_features_t one, netdev_features_t mask);
2616int __netdev_update_features(struct net_device *dev);
2617void netdev_update_features(struct net_device *dev);
2618void netdev_change_features(struct net_device *dev);
2619
2620void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2621 struct net_device *dev);
2622
2623netdev_features_t netif_skb_features(struct sk_buff *skb);
2624
2625static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2626{
2627 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2628
2629
2630 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2631 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2632 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2633 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2634 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2635 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2636
2637 return (features & feature) == feature;
2638}
2639
2640static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2641{
2642 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2643 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2644}
2645
2646static inline bool netif_needs_gso(struct sk_buff *skb,
2647 netdev_features_t features)
2648{
2649 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2650 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2651 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
2652}
2653
2654static inline void netif_set_gso_max_size(struct net_device *dev,
2655 unsigned int size)
2656{
2657 dev->gso_max_size = size;
2658}
2659
2660static inline bool netif_is_bond_slave(struct net_device *dev)
2661{
2662 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2663}
2664
2665static inline bool netif_supports_nofcs(struct net_device *dev)
2666{
2667 return dev->priv_flags & IFF_SUPP_NOFCS;
2668}
2669
2670extern struct pernet_operations __net_initdata loopback_net_ops;
2671
2672
2673
2674
2675
2676static inline const char *netdev_name(const struct net_device *dev)
2677{
2678 if (dev->reg_state != NETREG_REGISTERED)
2679 return "(unregistered net_device)";
2680 return dev->name;
2681}
2682
2683extern int __netdev_printk(const char *level, const struct net_device *dev,
2684 struct va_format *vaf);
2685
2686extern __printf(3, 4)
2687int netdev_printk(const char *level, const struct net_device *dev,
2688 const char *format, ...);
2689extern __printf(2, 3)
2690int netdev_emerg(const struct net_device *dev, const char *format, ...);
2691extern __printf(2, 3)
2692int netdev_alert(const struct net_device *dev, const char *format, ...);
2693extern __printf(2, 3)
2694int netdev_crit(const struct net_device *dev, const char *format, ...);
2695extern __printf(2, 3)
2696int netdev_err(const struct net_device *dev, const char *format, ...);
2697extern __printf(2, 3)
2698int netdev_warn(const struct net_device *dev, const char *format, ...);
2699extern __printf(2, 3)
2700int netdev_notice(const struct net_device *dev, const char *format, ...);
2701extern __printf(2, 3)
2702int netdev_info(const struct net_device *dev, const char *format, ...);
2703
2704#define MODULE_ALIAS_NETDEV(device) \
2705 MODULE_ALIAS("netdev-" device)
2706
2707#if defined(CONFIG_DYNAMIC_DEBUG)
2708#define netdev_dbg(__dev, format, args...) \
2709do { \
2710 dynamic_netdev_dbg(__dev, format, ##args); \
2711} while (0)
2712#elif defined(DEBUG)
2713#define netdev_dbg(__dev, format, args...) \
2714 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2715#else
2716#define netdev_dbg(__dev, format, args...) \
2717({ \
2718 if (0) \
2719 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2720 0; \
2721})
2722#endif
2723
2724#if defined(VERBOSE_DEBUG)
2725#define netdev_vdbg netdev_dbg
2726#else
2727
2728#define netdev_vdbg(dev, format, args...) \
2729({ \
2730 if (0) \
2731 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2732 0; \
2733})
2734#endif
2735
2736
2737
2738
2739
2740
2741#define netdev_WARN(dev, format, args...) \
2742 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2743
2744
2745
2746#define netif_printk(priv, type, level, dev, fmt, args...) \
2747do { \
2748 if (netif_msg_##type(priv)) \
2749 netdev_printk(level, (dev), fmt, ##args); \
2750} while (0)
2751
2752#define netif_level(level, priv, type, dev, fmt, args...) \
2753do { \
2754 if (netif_msg_##type(priv)) \
2755 netdev_##level(dev, fmt, ##args); \
2756} while (0)
2757
2758#define netif_emerg(priv, type, dev, fmt, args...) \
2759 netif_level(emerg, priv, type, dev, fmt, ##args)
2760#define netif_alert(priv, type, dev, fmt, args...) \
2761 netif_level(alert, priv, type, dev, fmt, ##args)
2762#define netif_crit(priv, type, dev, fmt, args...) \
2763 netif_level(crit, priv, type, dev, fmt, ##args)
2764#define netif_err(priv, type, dev, fmt, args...) \
2765 netif_level(err, priv, type, dev, fmt, ##args)
2766#define netif_warn(priv, type, dev, fmt, args...) \
2767 netif_level(warn, priv, type, dev, fmt, ##args)
2768#define netif_notice(priv, type, dev, fmt, args...) \
2769 netif_level(notice, priv, type, dev, fmt, ##args)
2770#define netif_info(priv, type, dev, fmt, args...) \
2771 netif_level(info, priv, type, dev, fmt, ##args)
2772
2773#if defined(DEBUG)
2774#define netif_dbg(priv, type, dev, format, args...) \
2775 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2776#elif defined(CONFIG_DYNAMIC_DEBUG)
2777#define netif_dbg(priv, type, netdev, format, args...) \
2778do { \
2779 if (netif_msg_##type(priv)) \
2780 dynamic_netdev_dbg(netdev, format, ##args); \
2781} while (0)
2782#else
2783#define netif_dbg(priv, type, dev, format, args...) \
2784({ \
2785 if (0) \
2786 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2787 0; \
2788})
2789#endif
2790
2791#if defined(VERBOSE_DEBUG)
2792#define netif_vdbg netif_dbg
2793#else
2794#define netif_vdbg(priv, type, dev, format, args...) \
2795({ \
2796 if (0) \
2797 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2798 0; \
2799})
2800#endif
2801
2802#endif
2803
2804#endif
2805