1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31#include <linux/if_link.h>
32
33#ifdef __KERNEL__
34#include <linux/pm_qos.h>
35#include <linux/timer.h>
36#include <linux/bug.h>
37#include <linux/delay.h>
38#include <linux/atomic.h>
39#include <asm/cache.h>
40#include <asm/byteorder.h>
41
42#include <linux/percpu.h>
43#include <linux/rculist.h>
44#include <linux/dmaengine.h>
45#include <linux/workqueue.h>
46#include <linux/dynamic_queue_limits.h>
47
48#include <linux/ethtool.h>
49#include <net/net_namespace.h>
50#include <net/dsa.h>
51#ifdef CONFIG_DCB
52#include <net/dcbnl.h>
53#endif
54#include <net/netprio_cgroup.h>
55
56#include <linux/netdev_features.h>
57#include <linux/neighbour.h>
58
59struct netpoll_info;
60struct device;
61struct phy_device;
62
63struct wireless_dev;
64
65#define SET_ETHTOOL_OPS(netdev,ops) \
66 ( (netdev)->ethtool_ops = (ops) )
67
68
69#define NET_ADDR_PERM 0
70#define NET_ADDR_RANDOM 1
71#define NET_ADDR_STOLEN 2
72
73
74#define NET_RX_SUCCESS 0
75#define NET_RX_DROP 1
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define NET_XMIT_SUCCESS 0x00
96#define NET_XMIT_DROP 0x01
97#define NET_XMIT_CN 0x02
98#define NET_XMIT_POLICED 0x03
99#define NET_XMIT_MASK 0x0f
100
101
102
103
104#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
105#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106
107
108#define NETDEV_TX_MASK 0xf0
109
110enum netdev_tx {
111 __NETDEV_TX_MIN = INT_MIN,
112 NETDEV_TX_OK = 0x00,
113 NETDEV_TX_BUSY = 0x10,
114 NETDEV_TX_LOCKED = 0x20,
115};
116typedef enum netdev_tx netdev_tx_t;
117
118
119
120
121
122static inline bool dev_xmit_complete(int rc)
123{
124
125
126
127
128
129
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
136#endif
137
138#define MAX_ADDR_LEN 32
139
140
141#define INIT_NETDEV_GROUP 0
142
143#ifdef __KERNEL__
144
145
146
147
148
149#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
150# if defined(CONFIG_MAC80211_MESH)
151# define LL_MAX_HEADER 128
152# else
153# define LL_MAX_HEADER 96
154# endif
155#elif IS_ENABLED(CONFIG_TR)
156# define LL_MAX_HEADER 48
157#else
158# define LL_MAX_HEADER 32
159#endif
160
161#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
162 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
163#define MAX_HEADER LL_MAX_HEADER
164#else
165#define MAX_HEADER (LL_MAX_HEADER + 48)
166#endif
167
168
169
170
171
172
173struct net_device_stats {
174 unsigned long rx_packets;
175 unsigned long tx_packets;
176 unsigned long rx_bytes;
177 unsigned long tx_bytes;
178 unsigned long rx_errors;
179 unsigned long tx_errors;
180 unsigned long rx_dropped;
181 unsigned long tx_dropped;
182 unsigned long multicast;
183 unsigned long collisions;
184 unsigned long rx_length_errors;
185 unsigned long rx_over_errors;
186 unsigned long rx_crc_errors;
187 unsigned long rx_frame_errors;
188 unsigned long rx_fifo_errors;
189 unsigned long rx_missed_errors;
190 unsigned long tx_aborted_errors;
191 unsigned long tx_carrier_errors;
192 unsigned long tx_fifo_errors;
193 unsigned long tx_heartbeat_errors;
194 unsigned long tx_window_errors;
195 unsigned long rx_compressed;
196 unsigned long tx_compressed;
197};
198
199#endif
200
201
202
203enum {
204 IF_PORT_UNKNOWN = 0,
205 IF_PORT_10BASE2,
206 IF_PORT_10BASET,
207 IF_PORT_AUI,
208 IF_PORT_100BASET,
209 IF_PORT_100BASETX,
210 IF_PORT_100BASEFX
211};
212
213#ifdef __KERNEL__
214
215#include <linux/cache.h>
216#include <linux/skbuff.h>
217
218#ifdef CONFIG_RPS
219#include <linux/static_key.h>
220extern struct static_key rps_needed;
221#endif
222
223struct neighbour;
224struct neigh_parms;
225struct sk_buff;
226
227struct netdev_hw_addr {
228 struct list_head list;
229 unsigned char addr[MAX_ADDR_LEN];
230 unsigned char type;
231#define NETDEV_HW_ADDR_T_LAN 1
232#define NETDEV_HW_ADDR_T_SAN 2
233#define NETDEV_HW_ADDR_T_SLAVE 3
234#define NETDEV_HW_ADDR_T_UNICAST 4
235#define NETDEV_HW_ADDR_T_MULTICAST 5
236 bool synced;
237 bool global_use;
238 int refcount;
239 struct rcu_head rcu_head;
240};
241
242struct netdev_hw_addr_list {
243 struct list_head list;
244 int count;
245};
246
247#define netdev_hw_addr_list_count(l) ((l)->count)
248#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
249#define netdev_hw_addr_list_for_each(ha, l) \
250 list_for_each_entry(ha, &(l)->list, list)
251
252#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
253#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
254#define netdev_for_each_uc_addr(ha, dev) \
255 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
256
257#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
258#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
259#define netdev_for_each_mc_addr(ha, dev) \
260 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
261
262struct hh_cache {
263 u16 hh_len;
264 u16 __pad;
265 seqlock_t hh_lock;
266
267
268#define HH_DATA_MOD 16
269#define HH_DATA_OFF(__len) \
270 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
271#define HH_DATA_ALIGN(__len) \
272 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
273 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
274};
275
276
277
278
279
280
281
282
283
284#define LL_RESERVED_SPACE(dev) \
285 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
286#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
287 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
288
289struct header_ops {
290 int (*create) (struct sk_buff *skb, struct net_device *dev,
291 unsigned short type, const void *daddr,
292 const void *saddr, unsigned int len);
293 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
294 int (*rebuild)(struct sk_buff *skb);
295 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
296 void (*cache_update)(struct hh_cache *hh,
297 const struct net_device *dev,
298 const unsigned char *haddr);
299};
300
301
302
303
304
305
306enum netdev_state_t {
307 __LINK_STATE_START,
308 __LINK_STATE_PRESENT,
309 __LINK_STATE_NOCARRIER,
310 __LINK_STATE_LINKWATCH_PENDING,
311 __LINK_STATE_DORMANT,
312};
313
314
315
316
317
318
319struct netdev_boot_setup {
320 char name[IFNAMSIZ];
321 struct ifmap map;
322};
323#define NETDEV_BOOT_SETUP_MAX 8
324
325extern int __init netdev_boot_setup(char *str);
326
327
328
329
330struct napi_struct {
331
332
333
334
335
336
337 struct list_head poll_list;
338
339 unsigned long state;
340 int weight;
341 int (*poll)(struct napi_struct *, int);
342#ifdef CONFIG_NETPOLL
343 spinlock_t poll_lock;
344 int poll_owner;
345#endif
346
347 unsigned int gro_count;
348
349 struct net_device *dev;
350 struct list_head dev_list;
351 struct sk_buff *gro_list;
352 struct sk_buff *skb;
353};
354
355enum {
356 NAPI_STATE_SCHED,
357 NAPI_STATE_DISABLE,
358 NAPI_STATE_NPSVC,
359};
360
361enum gro_result {
362 GRO_MERGED,
363 GRO_MERGED_FREE,
364 GRO_HELD,
365 GRO_NORMAL,
366 GRO_DROP,
367};
368typedef enum gro_result gro_result_t;
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411enum rx_handler_result {
412 RX_HANDLER_CONSUMED,
413 RX_HANDLER_ANOTHER,
414 RX_HANDLER_EXACT,
415 RX_HANDLER_PASS,
416};
417typedef enum rx_handler_result rx_handler_result_t;
418typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
419
420extern void __napi_schedule(struct napi_struct *n);
421
422static inline bool napi_disable_pending(struct napi_struct *n)
423{
424 return test_bit(NAPI_STATE_DISABLE, &n->state);
425}
426
427
428
429
430
431
432
433
434
435
436static inline bool napi_schedule_prep(struct napi_struct *n)
437{
438 return !napi_disable_pending(n) &&
439 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
440}
441
442
443
444
445
446
447
448
449static inline void napi_schedule(struct napi_struct *n)
450{
451 if (napi_schedule_prep(n))
452 __napi_schedule(n);
453}
454
455
456static inline bool napi_reschedule(struct napi_struct *napi)
457{
458 if (napi_schedule_prep(napi)) {
459 __napi_schedule(napi);
460 return true;
461 }
462 return false;
463}
464
465
466
467
468
469
470
471extern void __napi_complete(struct napi_struct *n);
472extern void napi_complete(struct napi_struct *n);
473
474
475
476
477
478
479
480
481static inline void napi_disable(struct napi_struct *n)
482{
483 set_bit(NAPI_STATE_DISABLE, &n->state);
484 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
485 msleep(1);
486 clear_bit(NAPI_STATE_DISABLE, &n->state);
487}
488
489
490
491
492
493
494
495
496static inline void napi_enable(struct napi_struct *n)
497{
498 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
499 smp_mb__before_clear_bit();
500 clear_bit(NAPI_STATE_SCHED, &n->state);
501}
502
503#ifdef CONFIG_SMP
504
505
506
507
508
509
510
511
512static inline void napi_synchronize(const struct napi_struct *n)
513{
514 while (test_bit(NAPI_STATE_SCHED, &n->state))
515 msleep(1);
516}
517#else
518# define napi_synchronize(n) barrier()
519#endif
520
521enum netdev_queue_state_t {
522 __QUEUE_STATE_DRV_XOFF,
523 __QUEUE_STATE_STACK_XOFF,
524 __QUEUE_STATE_FROZEN,
525#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
526 (1 << __QUEUE_STATE_STACK_XOFF))
527#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
528 (1 << __QUEUE_STATE_FROZEN))
529};
530
531
532
533
534
535
536
537
538
539
540struct netdev_queue {
541
542
543
544 struct net_device *dev;
545 struct Qdisc *qdisc;
546 struct Qdisc *qdisc_sleeping;
547#ifdef CONFIG_SYSFS
548 struct kobject kobj;
549#endif
550#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
551 int numa_node;
552#endif
553
554
555
556 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
557 int xmit_lock_owner;
558
559
560
561 unsigned long trans_start;
562
563
564
565
566
567 unsigned long trans_timeout;
568
569 unsigned long state;
570
571#ifdef CONFIG_BQL
572 struct dql dql;
573#endif
574} ____cacheline_aligned_in_smp;
575
576static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
577{
578#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
579 return q->numa_node;
580#else
581 return NUMA_NO_NODE;
582#endif
583}
584
585static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
586{
587#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
588 q->numa_node = node;
589#endif
590}
591
592#ifdef CONFIG_RPS
593
594
595
596
597struct rps_map {
598 unsigned int len;
599 struct rcu_head rcu;
600 u16 cpus[0];
601};
602#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
603
604
605
606
607
608
609struct rps_dev_flow {
610 u16 cpu;
611 u16 filter;
612 unsigned int last_qtail;
613};
614#define RPS_NO_FILTER 0xffff
615
616
617
618
619struct rps_dev_flow_table {
620 unsigned int mask;
621 struct rcu_head rcu;
622 struct work_struct free_work;
623 struct rps_dev_flow flows[0];
624};
625#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
626 ((_num) * sizeof(struct rps_dev_flow)))
627
628
629
630
631
632struct rps_sock_flow_table {
633 unsigned int mask;
634 u16 ents[0];
635};
636#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
637 ((_num) * sizeof(u16)))
638
639#define RPS_NO_CPU 0xffff
640
641static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
642 u32 hash)
643{
644 if (table && hash) {
645 unsigned int cpu, index = hash & table->mask;
646
647
648 cpu = raw_smp_processor_id();
649
650 if (table->ents[index] != cpu)
651 table->ents[index] = cpu;
652 }
653}
654
655static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
656 u32 hash)
657{
658 if (table && hash)
659 table->ents[hash & table->mask] = RPS_NO_CPU;
660}
661
662extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
663
664#ifdef CONFIG_RFS_ACCEL
665extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
666 u32 flow_id, u16 filter_id);
667#endif
668
669
670struct netdev_rx_queue {
671 struct rps_map __rcu *rps_map;
672 struct rps_dev_flow_table __rcu *rps_flow_table;
673 struct kobject kobj;
674 struct net_device *dev;
675} ____cacheline_aligned_in_smp;
676#endif
677
678#ifdef CONFIG_XPS
679
680
681
682
683struct xps_map {
684 unsigned int len;
685 unsigned int alloc_len;
686 struct rcu_head rcu;
687 u16 queues[0];
688};
689#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
690#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
691 / sizeof(u16))
692
693
694
695
696struct xps_dev_maps {
697 struct rcu_head rcu;
698 struct xps_map __rcu *cpu_map[0];
699};
700#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
701 (nr_cpu_ids * sizeof(struct xps_map *)))
702#endif
703
704#define TC_MAX_QUEUE 16
705#define TC_BITMASK 15
706
707struct netdev_tc_txq {
708 u16 count;
709 u16 offset;
710};
711
712#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
713
714
715
716
717struct netdev_fcoe_hbainfo {
718 char manufacturer[64];
719 char serial_number[64];
720 char hardware_version[64];
721 char driver_version[64];
722 char optionrom_version[64];
723 char firmware_version[64];
724 char model[256];
725 char model_description[256];
726};
727#endif
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920struct net_device_ops {
921 int (*ndo_init)(struct net_device *dev);
922 void (*ndo_uninit)(struct net_device *dev);
923 int (*ndo_open)(struct net_device *dev);
924 int (*ndo_stop)(struct net_device *dev);
925 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
926 struct net_device *dev);
927 u16 (*ndo_select_queue)(struct net_device *dev,
928 struct sk_buff *skb);
929 void (*ndo_change_rx_flags)(struct net_device *dev,
930 int flags);
931 void (*ndo_set_rx_mode)(struct net_device *dev);
932 int (*ndo_set_mac_address)(struct net_device *dev,
933 void *addr);
934 int (*ndo_validate_addr)(struct net_device *dev);
935 int (*ndo_do_ioctl)(struct net_device *dev,
936 struct ifreq *ifr, int cmd);
937 int (*ndo_set_config)(struct net_device *dev,
938 struct ifmap *map);
939 int (*ndo_change_mtu)(struct net_device *dev,
940 int new_mtu);
941 int (*ndo_neigh_setup)(struct net_device *dev,
942 struct neigh_parms *);
943 void (*ndo_tx_timeout) (struct net_device *dev);
944
945 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
946 struct rtnl_link_stats64 *storage);
947 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
948
949 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
950 unsigned short vid);
951 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
952 unsigned short vid);
953#ifdef CONFIG_NET_POLL_CONTROLLER
954 void (*ndo_poll_controller)(struct net_device *dev);
955 int (*ndo_netpoll_setup)(struct net_device *dev,
956 struct netpoll_info *info);
957 void (*ndo_netpoll_cleanup)(struct net_device *dev);
958#endif
959 int (*ndo_set_vf_mac)(struct net_device *dev,
960 int queue, u8 *mac);
961 int (*ndo_set_vf_vlan)(struct net_device *dev,
962 int queue, u16 vlan, u8 qos);
963 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
964 int vf, int rate);
965 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
966 int vf, bool setting);
967 int (*ndo_get_vf_config)(struct net_device *dev,
968 int vf,
969 struct ifla_vf_info *ivf);
970 int (*ndo_set_vf_port)(struct net_device *dev,
971 int vf,
972 struct nlattr *port[]);
973 int (*ndo_get_vf_port)(struct net_device *dev,
974 int vf, struct sk_buff *skb);
975 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
976#if IS_ENABLED(CONFIG_FCOE)
977 int (*ndo_fcoe_enable)(struct net_device *dev);
978 int (*ndo_fcoe_disable)(struct net_device *dev);
979 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
980 u16 xid,
981 struct scatterlist *sgl,
982 unsigned int sgc);
983 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
984 u16 xid);
985 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
986 u16 xid,
987 struct scatterlist *sgl,
988 unsigned int sgc);
989 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
990 struct netdev_fcoe_hbainfo *hbainfo);
991#endif
992
993#if IS_ENABLED(CONFIG_LIBFCOE)
994#define NETDEV_FCOE_WWNN 0
995#define NETDEV_FCOE_WWPN 1
996 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
997 u64 *wwn, int type);
998#endif
999
1000#ifdef CONFIG_RFS_ACCEL
1001 int (*ndo_rx_flow_steer)(struct net_device *dev,
1002 const struct sk_buff *skb,
1003 u16 rxq_index,
1004 u32 flow_id);
1005#endif
1006 int (*ndo_add_slave)(struct net_device *dev,
1007 struct net_device *slave_dev);
1008 int (*ndo_del_slave)(struct net_device *dev,
1009 struct net_device *slave_dev);
1010 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1011 netdev_features_t features);
1012 int (*ndo_set_features)(struct net_device *dev,
1013 netdev_features_t features);
1014 int (*ndo_neigh_construct)(struct neighbour *n);
1015 void (*ndo_neigh_destroy)(struct neighbour *n);
1016
1017 int (*ndo_fdb_add)(struct ndmsg *ndm,
1018 struct net_device *dev,
1019 unsigned char *addr,
1020 u16 flags);
1021 int (*ndo_fdb_del)(struct ndmsg *ndm,
1022 struct net_device *dev,
1023 unsigned char *addr);
1024 int (*ndo_fdb_dump)(struct sk_buff *skb,
1025 struct netlink_callback *cb,
1026 struct net_device *dev,
1027 int idx);
1028};
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040struct net_device {
1041
1042
1043
1044
1045
1046
1047 char name[IFNAMSIZ];
1048
1049 struct pm_qos_request pm_qos_req;
1050
1051
1052 struct hlist_node name_hlist;
1053
1054 char *ifalias;
1055
1056
1057
1058
1059
1060 unsigned long mem_end;
1061 unsigned long mem_start;
1062 unsigned long base_addr;
1063 unsigned int irq;
1064
1065
1066
1067
1068
1069
1070 unsigned long state;
1071
1072 struct list_head dev_list;
1073 struct list_head napi_list;
1074 struct list_head unreg_list;
1075
1076
1077 netdev_features_t features;
1078
1079 netdev_features_t hw_features;
1080
1081 netdev_features_t wanted_features;
1082
1083 netdev_features_t vlan_features;
1084
1085
1086 int ifindex;
1087 int iflink;
1088
1089 struct net_device_stats stats;
1090 atomic_long_t rx_dropped;
1091
1092
1093
1094#ifdef CONFIG_WIRELESS_EXT
1095
1096
1097 const struct iw_handler_def * wireless_handlers;
1098
1099 struct iw_public_data * wireless_data;
1100#endif
1101
1102 const struct net_device_ops *netdev_ops;
1103 const struct ethtool_ops *ethtool_ops;
1104
1105
1106 const struct header_ops *header_ops;
1107
1108 unsigned int flags;
1109 unsigned int priv_flags;
1110
1111 unsigned short gflags;
1112 unsigned short padded;
1113
1114 unsigned char operstate;
1115 unsigned char link_mode;
1116
1117 unsigned char if_port;
1118 unsigned char dma;
1119
1120 unsigned int mtu;
1121 unsigned short type;
1122 unsigned short hard_header_len;
1123
1124
1125
1126
1127
1128 unsigned short needed_headroom;
1129 unsigned short needed_tailroom;
1130
1131
1132 unsigned char perm_addr[MAX_ADDR_LEN];
1133 unsigned char addr_assign_type;
1134 unsigned char addr_len;
1135 unsigned char neigh_priv_len;
1136 unsigned short dev_id;
1137
1138 spinlock_t addr_list_lock;
1139 struct netdev_hw_addr_list uc;
1140 struct netdev_hw_addr_list mc;
1141 bool uc_promisc;
1142 unsigned int promiscuity;
1143 unsigned int allmulti;
1144
1145
1146
1147
1148#if IS_ENABLED(CONFIG_VLAN_8021Q)
1149 struct vlan_info __rcu *vlan_info;
1150#endif
1151#if IS_ENABLED(CONFIG_NET_DSA)
1152 struct dsa_switch_tree *dsa_ptr;
1153#endif
1154 void *atalk_ptr;
1155 struct in_device __rcu *ip_ptr;
1156 struct dn_dev __rcu *dn_ptr;
1157 struct inet6_dev __rcu *ip6_ptr;
1158 void *ax25_ptr;
1159 struct wireless_dev *ieee80211_ptr;
1160
1161
1162
1163
1164
1165 unsigned long last_rx;
1166
1167
1168
1169
1170
1171
1172
1173 struct net_device *master;
1174
1175
1176
1177
1178 unsigned char *dev_addr;
1179
1180
1181
1182 struct netdev_hw_addr_list dev_addrs;
1183
1184
1185 unsigned char broadcast[MAX_ADDR_LEN];
1186
1187#ifdef CONFIG_SYSFS
1188 struct kset *queues_kset;
1189#endif
1190
1191#ifdef CONFIG_RPS
1192 struct netdev_rx_queue *_rx;
1193
1194
1195 unsigned int num_rx_queues;
1196
1197
1198 unsigned int real_num_rx_queues;
1199
1200#ifdef CONFIG_RFS_ACCEL
1201
1202
1203
1204 struct cpu_rmap *rx_cpu_rmap;
1205#endif
1206#endif
1207
1208 rx_handler_func_t __rcu *rx_handler;
1209 void __rcu *rx_handler_data;
1210
1211 struct netdev_queue __rcu *ingress_queue;
1212
1213
1214
1215
1216 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1217
1218
1219 unsigned int num_tx_queues;
1220
1221
1222 unsigned int real_num_tx_queues;
1223
1224
1225 struct Qdisc *qdisc;
1226
1227 unsigned long tx_queue_len;
1228 spinlock_t tx_global_lock;
1229
1230#ifdef CONFIG_XPS
1231 struct xps_dev_maps __rcu *xps_maps;
1232#endif
1233
1234
1235
1236
1237
1238
1239
1240 unsigned long trans_start;
1241
1242 int watchdog_timeo;
1243 struct timer_list watchdog_timer;
1244
1245
1246 int __percpu *pcpu_refcnt;
1247
1248
1249 struct list_head todo_list;
1250
1251 struct hlist_node index_hlist;
1252
1253 struct list_head link_watch_list;
1254
1255
1256 enum { NETREG_UNINITIALIZED=0,
1257 NETREG_REGISTERED,
1258 NETREG_UNREGISTERING,
1259 NETREG_UNREGISTERED,
1260 NETREG_RELEASED,
1261 NETREG_DUMMY,
1262 } reg_state:8;
1263
1264 bool dismantle;
1265
1266 enum {
1267 RTNL_LINK_INITIALIZED,
1268 RTNL_LINK_INITIALIZING,
1269 } rtnl_link_state:16;
1270
1271
1272 void (*destructor)(struct net_device *dev);
1273
1274#ifdef CONFIG_NETPOLL
1275 struct netpoll_info *npinfo;
1276#endif
1277
1278#ifdef CONFIG_NET_NS
1279
1280 struct net *nd_net;
1281#endif
1282
1283
1284 union {
1285 void *ml_priv;
1286 struct pcpu_lstats __percpu *lstats;
1287 struct pcpu_tstats __percpu *tstats;
1288 struct pcpu_dstats __percpu *dstats;
1289 };
1290
1291 struct garp_port __rcu *garp_port;
1292
1293
1294 struct device dev;
1295
1296 const struct attribute_group *sysfs_groups[4];
1297
1298
1299 const struct rtnl_link_ops *rtnl_link_ops;
1300
1301
1302#define GSO_MAX_SIZE 65536
1303 unsigned int gso_max_size;
1304
1305#ifdef CONFIG_DCB
1306
1307 const struct dcbnl_rtnl_ops *dcbnl_ops;
1308#endif
1309 u8 num_tc;
1310 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1311 u8 prio_tc_map[TC_BITMASK + 1];
1312
1313#if IS_ENABLED(CONFIG_FCOE)
1314
1315 unsigned int fcoe_ddp_xid;
1316#endif
1317#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1318 struct netprio_map __rcu *priomap;
1319#endif
1320
1321 struct phy_device *phydev;
1322
1323
1324 int group;
1325};
1326#define to_net_dev(d) container_of(d, struct net_device, dev)
1327
1328#define NETDEV_ALIGN 32
1329
1330static inline
1331int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1332{
1333 return dev->prio_tc_map[prio & TC_BITMASK];
1334}
1335
1336static inline
1337int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1338{
1339 if (tc >= dev->num_tc)
1340 return -EINVAL;
1341
1342 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1343 return 0;
1344}
1345
1346static inline
1347void netdev_reset_tc(struct net_device *dev)
1348{
1349 dev->num_tc = 0;
1350 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1351 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1352}
1353
1354static inline
1355int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1356{
1357 if (tc >= dev->num_tc)
1358 return -EINVAL;
1359
1360 dev->tc_to_txq[tc].count = count;
1361 dev->tc_to_txq[tc].offset = offset;
1362 return 0;
1363}
1364
1365static inline
1366int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1367{
1368 if (num_tc > TC_MAX_QUEUE)
1369 return -EINVAL;
1370
1371 dev->num_tc = num_tc;
1372 return 0;
1373}
1374
1375static inline
1376int netdev_get_num_tc(struct net_device *dev)
1377{
1378 return dev->num_tc;
1379}
1380
1381static inline
1382struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1383 unsigned int index)
1384{
1385 return &dev->_tx[index];
1386}
1387
1388static inline void netdev_for_each_tx_queue(struct net_device *dev,
1389 void (*f)(struct net_device *,
1390 struct netdev_queue *,
1391 void *),
1392 void *arg)
1393{
1394 unsigned int i;
1395
1396 for (i = 0; i < dev->num_tx_queues; i++)
1397 f(dev, &dev->_tx[i], arg);
1398}
1399
1400
1401
1402
1403static inline
1404struct net *dev_net(const struct net_device *dev)
1405{
1406 return read_pnet(&dev->nd_net);
1407}
1408
1409static inline
1410void dev_net_set(struct net_device *dev, struct net *net)
1411{
1412#ifdef CONFIG_NET_NS
1413 release_net(dev->nd_net);
1414 dev->nd_net = hold_net(net);
1415#endif
1416}
1417
1418static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1419{
1420#ifdef CONFIG_NET_DSA_TAG_DSA
1421 if (dev->dsa_ptr != NULL)
1422 return dsa_uses_dsa_tags(dev->dsa_ptr);
1423#endif
1424
1425 return 0;
1426}
1427
1428static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1429{
1430#ifdef CONFIG_NET_DSA_TAG_TRAILER
1431 if (dev->dsa_ptr != NULL)
1432 return dsa_uses_trailer_tags(dev->dsa_ptr);
1433#endif
1434
1435 return 0;
1436}
1437
1438
1439
1440
1441
1442
1443
1444static inline void *netdev_priv(const struct net_device *dev)
1445{
1446 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1447}
1448
1449
1450
1451
1452#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1453
1454
1455
1456
1457
1458#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1471 int (*poll)(struct napi_struct *, int), int weight);
1472
1473
1474
1475
1476
1477
1478
1479void netif_napi_del(struct napi_struct *napi);
1480
1481struct napi_gro_cb {
1482
1483 void *frag0;
1484
1485
1486 unsigned int frag0_len;
1487
1488
1489 int data_offset;
1490
1491
1492 int same_flow;
1493
1494
1495 int flush;
1496
1497
1498 int count;
1499
1500
1501 int free;
1502#define NAPI_GRO_FREE 1
1503#define NAPI_GRO_FREE_STOLEN_HEAD 2
1504};
1505
1506#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1507
1508struct packet_type {
1509 __be16 type;
1510 struct net_device *dev;
1511 int (*func) (struct sk_buff *,
1512 struct net_device *,
1513 struct packet_type *,
1514 struct net_device *);
1515 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1516 netdev_features_t features);
1517 int (*gso_send_check)(struct sk_buff *skb);
1518 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1519 struct sk_buff *skb);
1520 int (*gro_complete)(struct sk_buff *skb);
1521 void *af_packet_priv;
1522 struct list_head list;
1523};
1524
1525#include <linux/notifier.h>
1526
1527
1528
1529
1530
1531#define NETDEV_UP 0x0001
1532#define NETDEV_DOWN 0x0002
1533#define NETDEV_REBOOT 0x0003
1534
1535
1536
1537#define NETDEV_CHANGE 0x0004
1538#define NETDEV_REGISTER 0x0005
1539#define NETDEV_UNREGISTER 0x0006
1540#define NETDEV_CHANGEMTU 0x0007
1541#define NETDEV_CHANGEADDR 0x0008
1542#define NETDEV_GOING_DOWN 0x0009
1543#define NETDEV_CHANGENAME 0x000A
1544#define NETDEV_FEAT_CHANGE 0x000B
1545#define NETDEV_BONDING_FAILOVER 0x000C
1546#define NETDEV_PRE_UP 0x000D
1547#define NETDEV_PRE_TYPE_CHANGE 0x000E
1548#define NETDEV_POST_TYPE_CHANGE 0x000F
1549#define NETDEV_POST_INIT 0x0010
1550#define NETDEV_UNREGISTER_BATCH 0x0011
1551#define NETDEV_RELEASE 0x0012
1552#define NETDEV_NOTIFY_PEERS 0x0013
1553#define NETDEV_JOIN 0x0014
1554
1555extern int register_netdevice_notifier(struct notifier_block *nb);
1556extern int unregister_netdevice_notifier(struct notifier_block *nb);
1557extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1558
1559
1560extern rwlock_t dev_base_lock;
1561
1562
1563#define for_each_netdev(net, d) \
1564 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1565#define for_each_netdev_reverse(net, d) \
1566 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1567#define for_each_netdev_rcu(net, d) \
1568 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1569#define for_each_netdev_safe(net, d, n) \
1570 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1571#define for_each_netdev_continue(net, d) \
1572 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1573#define for_each_netdev_continue_rcu(net, d) \
1574 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1575#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1576
1577static inline struct net_device *next_net_device(struct net_device *dev)
1578{
1579 struct list_head *lh;
1580 struct net *net;
1581
1582 net = dev_net(dev);
1583 lh = dev->dev_list.next;
1584 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1585}
1586
1587static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1588{
1589 struct list_head *lh;
1590 struct net *net;
1591
1592 net = dev_net(dev);
1593 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1594 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1595}
1596
1597static inline struct net_device *first_net_device(struct net *net)
1598{
1599 return list_empty(&net->dev_base_head) ? NULL :
1600 net_device_entry(net->dev_base_head.next);
1601}
1602
1603static inline struct net_device *first_net_device_rcu(struct net *net)
1604{
1605 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1606
1607 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1608}
1609
1610extern int netdev_boot_setup_check(struct net_device *dev);
1611extern unsigned long netdev_boot_base(const char *prefix, int unit);
1612extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1613 const char *hwaddr);
1614extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1615extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1616extern void dev_add_pack(struct packet_type *pt);
1617extern void dev_remove_pack(struct packet_type *pt);
1618extern void __dev_remove_pack(struct packet_type *pt);
1619
1620extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1621 unsigned short mask);
1622extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1623extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1624extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1625extern int dev_alloc_name(struct net_device *dev, const char *name);
1626extern int dev_open(struct net_device *dev);
1627extern int dev_close(struct net_device *dev);
1628extern void dev_disable_lro(struct net_device *dev);
1629extern int dev_queue_xmit(struct sk_buff *skb);
1630extern int register_netdevice(struct net_device *dev);
1631extern void unregister_netdevice_queue(struct net_device *dev,
1632 struct list_head *head);
1633extern void unregister_netdevice_many(struct list_head *head);
1634static inline void unregister_netdevice(struct net_device *dev)
1635{
1636 unregister_netdevice_queue(dev, NULL);
1637}
1638
1639extern int netdev_refcnt_read(const struct net_device *dev);
1640extern void free_netdev(struct net_device *dev);
1641extern void synchronize_net(void);
1642extern int init_dummy_netdev(struct net_device *dev);
1643extern void netdev_resync_ops(struct net_device *dev);
1644
1645extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1646extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1647extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1648extern int dev_restart(struct net_device *dev);
1649#ifdef CONFIG_NETPOLL_TRAP
1650extern int netpoll_trap(void);
1651#endif
1652extern int skb_gro_receive(struct sk_buff **head,
1653 struct sk_buff *skb);
1654extern void skb_gro_reset_offset(struct sk_buff *skb);
1655
1656static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1657{
1658 return NAPI_GRO_CB(skb)->data_offset;
1659}
1660
1661static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1662{
1663 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1664}
1665
1666static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1667{
1668 NAPI_GRO_CB(skb)->data_offset += len;
1669}
1670
1671static inline void *skb_gro_header_fast(struct sk_buff *skb,
1672 unsigned int offset)
1673{
1674 return NAPI_GRO_CB(skb)->frag0 + offset;
1675}
1676
1677static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1678{
1679 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1680}
1681
1682static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1683 unsigned int offset)
1684{
1685 if (!pskb_may_pull(skb, hlen))
1686 return NULL;
1687
1688 NAPI_GRO_CB(skb)->frag0 = NULL;
1689 NAPI_GRO_CB(skb)->frag0_len = 0;
1690 return skb->data + offset;
1691}
1692
1693static inline void *skb_gro_mac_header(struct sk_buff *skb)
1694{
1695 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1696}
1697
1698static inline void *skb_gro_network_header(struct sk_buff *skb)
1699{
1700 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1701 skb_network_offset(skb);
1702}
1703
1704static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1705 unsigned short type,
1706 const void *daddr, const void *saddr,
1707 unsigned int len)
1708{
1709 if (!dev->header_ops || !dev->header_ops->create)
1710 return 0;
1711
1712 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1713}
1714
1715static inline int dev_parse_header(const struct sk_buff *skb,
1716 unsigned char *haddr)
1717{
1718 const struct net_device *dev = skb->dev;
1719
1720 if (!dev->header_ops || !dev->header_ops->parse)
1721 return 0;
1722 return dev->header_ops->parse(skb, haddr);
1723}
1724
1725typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1726extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1727static inline int unregister_gifconf(unsigned int family)
1728{
1729 return register_gifconf(family, NULL);
1730}
1731
1732
1733
1734
1735struct softnet_data {
1736 struct Qdisc *output_queue;
1737 struct Qdisc **output_queue_tailp;
1738 struct list_head poll_list;
1739 struct sk_buff *completion_queue;
1740 struct sk_buff_head process_queue;
1741
1742
1743 unsigned int processed;
1744 unsigned int time_squeeze;
1745 unsigned int cpu_collision;
1746 unsigned int received_rps;
1747
1748#ifdef CONFIG_RPS
1749 struct softnet_data *rps_ipi_list;
1750
1751
1752 struct call_single_data csd ____cacheline_aligned_in_smp;
1753 struct softnet_data *rps_ipi_next;
1754 unsigned int cpu;
1755 unsigned int input_queue_head;
1756 unsigned int input_queue_tail;
1757#endif
1758 unsigned int dropped;
1759 struct sk_buff_head input_pkt_queue;
1760 struct napi_struct backlog;
1761};
1762
1763static inline void input_queue_head_incr(struct softnet_data *sd)
1764{
1765#ifdef CONFIG_RPS
1766 sd->input_queue_head++;
1767#endif
1768}
1769
1770static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1771 unsigned int *qtail)
1772{
1773#ifdef CONFIG_RPS
1774 *qtail = ++sd->input_queue_tail;
1775#endif
1776}
1777
1778DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1779
1780extern void __netif_schedule(struct Qdisc *q);
1781
1782static inline void netif_schedule_queue(struct netdev_queue *txq)
1783{
1784 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1785 __netif_schedule(txq->qdisc);
1786}
1787
1788static inline void netif_tx_schedule_all(struct net_device *dev)
1789{
1790 unsigned int i;
1791
1792 for (i = 0; i < dev->num_tx_queues; i++)
1793 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1794}
1795
1796static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1797{
1798 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1799}
1800
1801
1802
1803
1804
1805
1806
1807static inline void netif_start_queue(struct net_device *dev)
1808{
1809 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1810}
1811
1812static inline void netif_tx_start_all_queues(struct net_device *dev)
1813{
1814 unsigned int i;
1815
1816 for (i = 0; i < dev->num_tx_queues; i++) {
1817 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1818 netif_tx_start_queue(txq);
1819 }
1820}
1821
1822static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1823{
1824#ifdef CONFIG_NETPOLL_TRAP
1825 if (netpoll_trap()) {
1826 netif_tx_start_queue(dev_queue);
1827 return;
1828 }
1829#endif
1830 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
1831 __netif_schedule(dev_queue->qdisc);
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841static inline void netif_wake_queue(struct net_device *dev)
1842{
1843 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1844}
1845
1846static inline void netif_tx_wake_all_queues(struct net_device *dev)
1847{
1848 unsigned int i;
1849
1850 for (i = 0; i < dev->num_tx_queues; i++) {
1851 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1852 netif_tx_wake_queue(txq);
1853 }
1854}
1855
1856static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1857{
1858 if (WARN_ON(!dev_queue)) {
1859 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1860 return;
1861 }
1862 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1863}
1864
1865
1866
1867
1868
1869
1870
1871
1872static inline void netif_stop_queue(struct net_device *dev)
1873{
1874 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1875}
1876
1877static inline void netif_tx_stop_all_queues(struct net_device *dev)
1878{
1879 unsigned int i;
1880
1881 for (i = 0; i < dev->num_tx_queues; i++) {
1882 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1883 netif_tx_stop_queue(txq);
1884 }
1885}
1886
1887static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1888{
1889 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1890}
1891
1892
1893
1894
1895
1896
1897
1898static inline bool netif_queue_stopped(const struct net_device *dev)
1899{
1900 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1901}
1902
1903static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
1904{
1905 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1906}
1907
1908static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
1909{
1910 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1911}
1912
1913static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
1914 unsigned int bytes)
1915{
1916#ifdef CONFIG_BQL
1917 dql_queued(&dev_queue->dql, bytes);
1918
1919 if (likely(dql_avail(&dev_queue->dql) >= 0))
1920 return;
1921
1922 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1923
1924
1925
1926
1927
1928
1929 smp_mb();
1930
1931
1932 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
1933 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1934#endif
1935}
1936
1937static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
1938{
1939 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
1940}
1941
1942static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1943 unsigned int pkts, unsigned int bytes)
1944{
1945#ifdef CONFIG_BQL
1946 if (unlikely(!bytes))
1947 return;
1948
1949 dql_completed(&dev_queue->dql, bytes);
1950
1951
1952
1953
1954
1955
1956 smp_mb();
1957
1958 if (dql_avail(&dev_queue->dql) < 0)
1959 return;
1960
1961 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
1962 netif_schedule_queue(dev_queue);
1963#endif
1964}
1965
1966static inline void netdev_completed_queue(struct net_device *dev,
1967 unsigned int pkts, unsigned int bytes)
1968{
1969 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
1970}
1971
1972static inline void netdev_tx_reset_queue(struct netdev_queue *q)
1973{
1974#ifdef CONFIG_BQL
1975 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
1976 dql_reset(&q->dql);
1977#endif
1978}
1979
1980static inline void netdev_reset_queue(struct net_device *dev_queue)
1981{
1982 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
1983}
1984
1985
1986
1987
1988
1989
1990
1991static inline bool netif_running(const struct net_device *dev)
1992{
1993 return test_bit(__LINK_STATE_START, &dev->state);
1994}
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2011{
2012 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2013
2014 netif_tx_start_queue(txq);
2015}
2016
2017
2018
2019
2020
2021
2022
2023
2024static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2025{
2026 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2027#ifdef CONFIG_NETPOLL_TRAP
2028 if (netpoll_trap())
2029 return;
2030#endif
2031 netif_tx_stop_queue(txq);
2032}
2033
2034
2035
2036
2037
2038
2039
2040
2041static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2042 u16 queue_index)
2043{
2044 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2045
2046 return netif_tx_queue_stopped(txq);
2047}
2048
2049static inline bool netif_subqueue_stopped(const struct net_device *dev,
2050 struct sk_buff *skb)
2051{
2052 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2063{
2064 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2065#ifdef CONFIG_NETPOLL_TRAP
2066 if (netpoll_trap())
2067 return;
2068#endif
2069 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2070 __netif_schedule(txq->qdisc);
2071}
2072
2073
2074
2075
2076
2077static inline u16 skb_tx_hash(const struct net_device *dev,
2078 const struct sk_buff *skb)
2079{
2080 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2081}
2082
2083
2084
2085
2086
2087
2088
2089static inline bool netif_is_multiqueue(const struct net_device *dev)
2090{
2091 return dev->num_tx_queues > 1;
2092}
2093
2094extern int netif_set_real_num_tx_queues(struct net_device *dev,
2095 unsigned int txq);
2096
2097#ifdef CONFIG_RPS
2098extern int netif_set_real_num_rx_queues(struct net_device *dev,
2099 unsigned int rxq);
2100#else
2101static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2102 unsigned int rxq)
2103{
2104 return 0;
2105}
2106#endif
2107
2108static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2109 const struct net_device *from_dev)
2110{
2111 netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
2112#ifdef CONFIG_RPS
2113 return netif_set_real_num_rx_queues(to_dev,
2114 from_dev->real_num_rx_queues);
2115#else
2116 return 0;
2117#endif
2118}
2119
2120
2121
2122
2123
2124extern void dev_kfree_skb_irq(struct sk_buff *skb);
2125
2126
2127
2128
2129
2130extern void dev_kfree_skb_any(struct sk_buff *skb);
2131
2132extern int netif_rx(struct sk_buff *skb);
2133extern int netif_rx_ni(struct sk_buff *skb);
2134extern int netif_receive_skb(struct sk_buff *skb);
2135extern gro_result_t dev_gro_receive(struct napi_struct *napi,
2136 struct sk_buff *skb);
2137extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
2138extern gro_result_t napi_gro_receive(struct napi_struct *napi,
2139 struct sk_buff *skb);
2140extern void napi_gro_flush(struct napi_struct *napi);
2141extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
2142extern gro_result_t napi_frags_finish(struct napi_struct *napi,
2143 struct sk_buff *skb,
2144 gro_result_t ret);
2145extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2146
2147static inline void napi_free_frags(struct napi_struct *napi)
2148{
2149 kfree_skb(napi->skb);
2150 napi->skb = NULL;
2151}
2152
2153extern int netdev_rx_handler_register(struct net_device *dev,
2154 rx_handler_func_t *rx_handler,
2155 void *rx_handler_data);
2156extern void netdev_rx_handler_unregister(struct net_device *dev);
2157
2158extern bool dev_valid_name(const char *name);
2159extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2160extern int dev_ethtool(struct net *net, struct ifreq *);
2161extern unsigned int dev_get_flags(const struct net_device *);
2162extern int __dev_change_flags(struct net_device *, unsigned int flags);
2163extern int dev_change_flags(struct net_device *, unsigned int);
2164extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2165extern int dev_change_name(struct net_device *, const char *);
2166extern int dev_set_alias(struct net_device *, const char *, size_t);
2167extern int dev_change_net_namespace(struct net_device *,
2168 struct net *, const char *);
2169extern int dev_set_mtu(struct net_device *, int);
2170extern void dev_set_group(struct net_device *, int);
2171extern int dev_set_mac_address(struct net_device *,
2172 struct sockaddr *);
2173extern int dev_hard_start_xmit(struct sk_buff *skb,
2174 struct net_device *dev,
2175 struct netdev_queue *txq);
2176extern int dev_forward_skb(struct net_device *dev,
2177 struct sk_buff *skb);
2178
2179extern int netdev_budget;
2180
2181
2182extern void netdev_run_todo(void);
2183
2184
2185
2186
2187
2188
2189
2190static inline void dev_put(struct net_device *dev)
2191{
2192 this_cpu_dec(*dev->pcpu_refcnt);
2193}
2194
2195
2196
2197
2198
2199
2200
2201static inline void dev_hold(struct net_device *dev)
2202{
2203 this_cpu_inc(*dev->pcpu_refcnt);
2204}
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215extern void linkwatch_fire_event(struct net_device *dev);
2216extern void linkwatch_forget_dev(struct net_device *dev);
2217
2218
2219
2220
2221
2222
2223
2224static inline bool netif_carrier_ok(const struct net_device *dev)
2225{
2226 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2227}
2228
2229extern unsigned long dev_trans_start(struct net_device *dev);
2230
2231extern void __netdev_watchdog_up(struct net_device *dev);
2232
2233extern void netif_carrier_on(struct net_device *dev);
2234
2235extern void netif_carrier_off(struct net_device *dev);
2236
2237extern void netif_notify_peers(struct net_device *dev);
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252static inline void netif_dormant_on(struct net_device *dev)
2253{
2254 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2255 linkwatch_fire_event(dev);
2256}
2257
2258
2259
2260
2261
2262
2263
2264static inline void netif_dormant_off(struct net_device *dev)
2265{
2266 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2267 linkwatch_fire_event(dev);
2268}
2269
2270
2271
2272
2273
2274
2275
2276static inline bool netif_dormant(const struct net_device *dev)
2277{
2278 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2279}
2280
2281
2282
2283
2284
2285
2286
2287
2288static inline bool netif_oper_up(const struct net_device *dev)
2289{
2290 return (dev->operstate == IF_OPER_UP ||
2291 dev->operstate == IF_OPER_UNKNOWN );
2292}
2293
2294
2295
2296
2297
2298
2299
2300static inline bool netif_device_present(struct net_device *dev)
2301{
2302 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2303}
2304
2305extern void netif_device_detach(struct net_device *dev);
2306
2307extern void netif_device_attach(struct net_device *dev);
2308
2309
2310
2311
2312
2313enum {
2314 NETIF_MSG_DRV = 0x0001,
2315 NETIF_MSG_PROBE = 0x0002,
2316 NETIF_MSG_LINK = 0x0004,
2317 NETIF_MSG_TIMER = 0x0008,
2318 NETIF_MSG_IFDOWN = 0x0010,
2319 NETIF_MSG_IFUP = 0x0020,
2320 NETIF_MSG_RX_ERR = 0x0040,
2321 NETIF_MSG_TX_ERR = 0x0080,
2322 NETIF_MSG_TX_QUEUED = 0x0100,
2323 NETIF_MSG_INTR = 0x0200,
2324 NETIF_MSG_TX_DONE = 0x0400,
2325 NETIF_MSG_RX_STATUS = 0x0800,
2326 NETIF_MSG_PKTDATA = 0x1000,
2327 NETIF_MSG_HW = 0x2000,
2328 NETIF_MSG_WOL = 0x4000,
2329};
2330
2331#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2332#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2333#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2334#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2335#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2336#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2337#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2338#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2339#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2340#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2341#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2342#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2343#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2344#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2345#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2346
2347static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2348{
2349
2350 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2351 return default_msg_enable_bits;
2352 if (debug_value == 0)
2353 return 0;
2354
2355 return (1 << debug_value) - 1;
2356}
2357
2358static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2359{
2360 spin_lock(&txq->_xmit_lock);
2361 txq->xmit_lock_owner = cpu;
2362}
2363
2364static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2365{
2366 spin_lock_bh(&txq->_xmit_lock);
2367 txq->xmit_lock_owner = smp_processor_id();
2368}
2369
2370static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2371{
2372 bool ok = spin_trylock(&txq->_xmit_lock);
2373 if (likely(ok))
2374 txq->xmit_lock_owner = smp_processor_id();
2375 return ok;
2376}
2377
2378static inline void __netif_tx_unlock(struct netdev_queue *txq)
2379{
2380 txq->xmit_lock_owner = -1;
2381 spin_unlock(&txq->_xmit_lock);
2382}
2383
2384static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2385{
2386 txq->xmit_lock_owner = -1;
2387 spin_unlock_bh(&txq->_xmit_lock);
2388}
2389
2390static inline void txq_trans_update(struct netdev_queue *txq)
2391{
2392 if (txq->xmit_lock_owner != -1)
2393 txq->trans_start = jiffies;
2394}
2395
2396
2397
2398
2399
2400
2401
2402static inline void netif_tx_lock(struct net_device *dev)
2403{
2404 unsigned int i;
2405 int cpu;
2406
2407 spin_lock(&dev->tx_global_lock);
2408 cpu = smp_processor_id();
2409 for (i = 0; i < dev->num_tx_queues; i++) {
2410 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2411
2412
2413
2414
2415
2416
2417
2418 __netif_tx_lock(txq, cpu);
2419 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2420 __netif_tx_unlock(txq);
2421 }
2422}
2423
2424static inline void netif_tx_lock_bh(struct net_device *dev)
2425{
2426 local_bh_disable();
2427 netif_tx_lock(dev);
2428}
2429
2430static inline void netif_tx_unlock(struct net_device *dev)
2431{
2432 unsigned int i;
2433
2434 for (i = 0; i < dev->num_tx_queues; i++) {
2435 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2436
2437
2438
2439
2440
2441 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2442 netif_schedule_queue(txq);
2443 }
2444 spin_unlock(&dev->tx_global_lock);
2445}
2446
2447static inline void netif_tx_unlock_bh(struct net_device *dev)
2448{
2449 netif_tx_unlock(dev);
2450 local_bh_enable();
2451}
2452
2453#define HARD_TX_LOCK(dev, txq, cpu) { \
2454 if ((dev->features & NETIF_F_LLTX) == 0) { \
2455 __netif_tx_lock(txq, cpu); \
2456 } \
2457}
2458
2459#define HARD_TX_UNLOCK(dev, txq) { \
2460 if ((dev->features & NETIF_F_LLTX) == 0) { \
2461 __netif_tx_unlock(txq); \
2462 } \
2463}
2464
2465static inline void netif_tx_disable(struct net_device *dev)
2466{
2467 unsigned int i;
2468 int cpu;
2469
2470 local_bh_disable();
2471 cpu = smp_processor_id();
2472 for (i = 0; i < dev->num_tx_queues; i++) {
2473 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2474
2475 __netif_tx_lock(txq, cpu);
2476 netif_tx_stop_queue(txq);
2477 __netif_tx_unlock(txq);
2478 }
2479 local_bh_enable();
2480}
2481
2482static inline void netif_addr_lock(struct net_device *dev)
2483{
2484 spin_lock(&dev->addr_list_lock);
2485}
2486
2487static inline void netif_addr_lock_nested(struct net_device *dev)
2488{
2489 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2490}
2491
2492static inline void netif_addr_lock_bh(struct net_device *dev)
2493{
2494 spin_lock_bh(&dev->addr_list_lock);
2495}
2496
2497static inline void netif_addr_unlock(struct net_device *dev)
2498{
2499 spin_unlock(&dev->addr_list_lock);
2500}
2501
2502static inline void netif_addr_unlock_bh(struct net_device *dev)
2503{
2504 spin_unlock_bh(&dev->addr_list_lock);
2505}
2506
2507
2508
2509
2510
2511#define for_each_dev_addr(dev, ha) \
2512 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2513
2514
2515
2516extern void ether_setup(struct net_device *dev);
2517
2518
2519extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2520 void (*setup)(struct net_device *),
2521 unsigned int txqs, unsigned int rxqs);
2522#define alloc_netdev(sizeof_priv, name, setup) \
2523 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2524
2525#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2526 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2527
2528extern int register_netdev(struct net_device *dev);
2529extern void unregister_netdev(struct net_device *dev);
2530
2531
2532extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2533 struct netdev_hw_addr_list *from_list,
2534 int addr_len, unsigned char addr_type);
2535extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2536 struct netdev_hw_addr_list *from_list,
2537 int addr_len, unsigned char addr_type);
2538extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2539 struct netdev_hw_addr_list *from_list,
2540 int addr_len);
2541extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2542 struct netdev_hw_addr_list *from_list,
2543 int addr_len);
2544extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2545extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2546
2547
2548extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
2549 unsigned char addr_type);
2550extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
2551 unsigned char addr_type);
2552extern int dev_addr_add_multiple(struct net_device *to_dev,
2553 struct net_device *from_dev,
2554 unsigned char addr_type);
2555extern int dev_addr_del_multiple(struct net_device *to_dev,
2556 struct net_device *from_dev,
2557 unsigned char addr_type);
2558extern void dev_addr_flush(struct net_device *dev);
2559extern int dev_addr_init(struct net_device *dev);
2560
2561
2562extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2563extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr);
2564extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2565extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2566extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2567extern void dev_uc_flush(struct net_device *dev);
2568extern void dev_uc_init(struct net_device *dev);
2569
2570
2571extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2572extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2573extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr);
2574extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2575extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2576extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2577extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2578extern void dev_mc_flush(struct net_device *dev);
2579extern void dev_mc_init(struct net_device *dev);
2580
2581
2582extern void dev_set_rx_mode(struct net_device *dev);
2583extern void __dev_set_rx_mode(struct net_device *dev);
2584extern int dev_set_promiscuity(struct net_device *dev, int inc);
2585extern int dev_set_allmulti(struct net_device *dev, int inc);
2586extern void netdev_state_change(struct net_device *dev);
2587extern int netdev_bonding_change(struct net_device *dev,
2588 unsigned long event);
2589extern void netdev_features_change(struct net_device *dev);
2590
2591extern void dev_load(struct net *net, const char *name);
2592extern void dev_mcast_init(void);
2593extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2594 struct rtnl_link_stats64 *storage);
2595extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2596 const struct net_device_stats *netdev_stats);
2597
2598extern int netdev_max_backlog;
2599extern int netdev_tstamp_prequeue;
2600extern int weight_p;
2601extern int bpf_jit_enable;
2602extern int netdev_set_master(struct net_device *dev, struct net_device *master);
2603extern int netdev_set_bond_master(struct net_device *dev,
2604 struct net_device *master);
2605extern int skb_checksum_help(struct sk_buff *skb);
2606extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2607 netdev_features_t features);
2608#ifdef CONFIG_BUG
2609extern void netdev_rx_csum_fault(struct net_device *dev);
2610#else
2611static inline void netdev_rx_csum_fault(struct net_device *dev)
2612{
2613}
2614#endif
2615
2616extern void net_enable_timestamp(void);
2617extern void net_disable_timestamp(void);
2618
2619#ifdef CONFIG_PROC_FS
2620extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2621extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2622extern void dev_seq_stop(struct seq_file *seq, void *v);
2623#endif
2624
2625extern int netdev_class_create_file(struct class_attribute *class_attr);
2626extern void netdev_class_remove_file(struct class_attribute *class_attr);
2627
2628extern struct kobj_ns_type_operations net_ns_type_operations;
2629
2630extern const char *netdev_drivername(const struct net_device *dev);
2631
2632extern void linkwatch_run_queue(void);
2633
2634static inline netdev_features_t netdev_get_wanted_features(
2635 struct net_device *dev)
2636{
2637 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2638}
2639netdev_features_t netdev_increment_features(netdev_features_t all,
2640 netdev_features_t one, netdev_features_t mask);
2641int __netdev_update_features(struct net_device *dev);
2642void netdev_update_features(struct net_device *dev);
2643void netdev_change_features(struct net_device *dev);
2644
2645void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2646 struct net_device *dev);
2647
2648netdev_features_t netif_skb_features(struct sk_buff *skb);
2649
2650static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2651{
2652 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2653
2654
2655 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2656 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2657 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2658 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2659 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2660 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2661
2662 return (features & feature) == feature;
2663}
2664
2665static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2666{
2667 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2668 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2669}
2670
2671static inline bool netif_needs_gso(struct sk_buff *skb,
2672 netdev_features_t features)
2673{
2674 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2675 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2676 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
2677}
2678
2679static inline void netif_set_gso_max_size(struct net_device *dev,
2680 unsigned int size)
2681{
2682 dev->gso_max_size = size;
2683}
2684
2685static inline bool netif_is_bond_slave(struct net_device *dev)
2686{
2687 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2688}
2689
2690static inline bool netif_supports_nofcs(struct net_device *dev)
2691{
2692 return dev->priv_flags & IFF_SUPP_NOFCS;
2693}
2694
2695extern struct pernet_operations __net_initdata loopback_net_ops;
2696
2697
2698
2699
2700
2701static inline const char *netdev_name(const struct net_device *dev)
2702{
2703 if (dev->reg_state != NETREG_REGISTERED)
2704 return "(unregistered net_device)";
2705 return dev->name;
2706}
2707
2708extern int __netdev_printk(const char *level, const struct net_device *dev,
2709 struct va_format *vaf);
2710
2711extern __printf(3, 4)
2712int netdev_printk(const char *level, const struct net_device *dev,
2713 const char *format, ...);
2714extern __printf(2, 3)
2715int netdev_emerg(const struct net_device *dev, const char *format, ...);
2716extern __printf(2, 3)
2717int netdev_alert(const struct net_device *dev, const char *format, ...);
2718extern __printf(2, 3)
2719int netdev_crit(const struct net_device *dev, const char *format, ...);
2720extern __printf(2, 3)
2721int netdev_err(const struct net_device *dev, const char *format, ...);
2722extern __printf(2, 3)
2723int netdev_warn(const struct net_device *dev, const char *format, ...);
2724extern __printf(2, 3)
2725int netdev_notice(const struct net_device *dev, const char *format, ...);
2726extern __printf(2, 3)
2727int netdev_info(const struct net_device *dev, const char *format, ...);
2728
2729#define MODULE_ALIAS_NETDEV(device) \
2730 MODULE_ALIAS("netdev-" device)
2731
2732#if defined(CONFIG_DYNAMIC_DEBUG)
2733#define netdev_dbg(__dev, format, args...) \
2734do { \
2735 dynamic_netdev_dbg(__dev, format, ##args); \
2736} while (0)
2737#elif defined(DEBUG)
2738#define netdev_dbg(__dev, format, args...) \
2739 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2740#else
2741#define netdev_dbg(__dev, format, args...) \
2742({ \
2743 if (0) \
2744 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2745 0; \
2746})
2747#endif
2748
2749#if defined(VERBOSE_DEBUG)
2750#define netdev_vdbg netdev_dbg
2751#else
2752
2753#define netdev_vdbg(dev, format, args...) \
2754({ \
2755 if (0) \
2756 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2757 0; \
2758})
2759#endif
2760
2761
2762
2763
2764
2765
2766#define netdev_WARN(dev, format, args...) \
2767 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2768
2769
2770
2771#define netif_printk(priv, type, level, dev, fmt, args...) \
2772do { \
2773 if (netif_msg_##type(priv)) \
2774 netdev_printk(level, (dev), fmt, ##args); \
2775} while (0)
2776
2777#define netif_level(level, priv, type, dev, fmt, args...) \
2778do { \
2779 if (netif_msg_##type(priv)) \
2780 netdev_##level(dev, fmt, ##args); \
2781} while (0)
2782
2783#define netif_emerg(priv, type, dev, fmt, args...) \
2784 netif_level(emerg, priv, type, dev, fmt, ##args)
2785#define netif_alert(priv, type, dev, fmt, args...) \
2786 netif_level(alert, priv, type, dev, fmt, ##args)
2787#define netif_crit(priv, type, dev, fmt, args...) \
2788 netif_level(crit, priv, type, dev, fmt, ##args)
2789#define netif_err(priv, type, dev, fmt, args...) \
2790 netif_level(err, priv, type, dev, fmt, ##args)
2791#define netif_warn(priv, type, dev, fmt, args...) \
2792 netif_level(warn, priv, type, dev, fmt, ##args)
2793#define netif_notice(priv, type, dev, fmt, args...) \
2794 netif_level(notice, priv, type, dev, fmt, ##args)
2795#define netif_info(priv, type, dev, fmt, args...) \
2796 netif_level(info, priv, type, dev, fmt, ##args)
2797
2798#if defined(CONFIG_DYNAMIC_DEBUG)
2799#define netif_dbg(priv, type, netdev, format, args...) \
2800do { \
2801 if (netif_msg_##type(priv)) \
2802 dynamic_netdev_dbg(netdev, format, ##args); \
2803} while (0)
2804#elif defined(DEBUG)
2805#define netif_dbg(priv, type, dev, format, args...) \
2806 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2807#else
2808#define netif_dbg(priv, type, dev, format, args...) \
2809({ \
2810 if (0) \
2811 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2812 0; \
2813})
2814#endif
2815
2816#if defined(VERBOSE_DEBUG)
2817#define netif_vdbg netif_dbg
2818#else
2819#define netif_vdbg(priv, type, dev, format, args...) \
2820({ \
2821 if (0) \
2822 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2823 0; \
2824})
2825#endif
2826
2827#endif
2828
2829#endif
2830