1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31#include <linux/if_link.h>
32
33#ifdef __KERNEL__
34#include <linux/pm_qos.h>
35#include <linux/timer.h>
36#include <linux/bug.h>
37#include <linux/delay.h>
38#include <linux/atomic.h>
39#include <asm/cache.h>
40#include <asm/byteorder.h>
41
42#include <linux/percpu.h>
43#include <linux/rculist.h>
44#include <linux/dmaengine.h>
45#include <linux/workqueue.h>
46#include <linux/dynamic_queue_limits.h>
47
48#include <linux/ethtool.h>
49#include <net/net_namespace.h>
50#include <net/dsa.h>
51#ifdef CONFIG_DCB
52#include <net/dcbnl.h>
53#endif
54#include <net/netprio_cgroup.h>
55
56#include <linux/netdev_features.h>
57#include <linux/neighbour.h>
58
59struct netpoll_info;
60struct device;
61struct phy_device;
62
63struct wireless_dev;
64
65#define SET_ETHTOOL_OPS(netdev,ops) \
66 ( (netdev)->ethtool_ops = (ops) )
67
68
69#define NET_ADDR_PERM 0
70#define NET_ADDR_RANDOM 1
71#define NET_ADDR_STOLEN 2
72
73
74#define NET_RX_SUCCESS 0
75#define NET_RX_DROP 1
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define NET_XMIT_SUCCESS 0x00
96#define NET_XMIT_DROP 0x01
97#define NET_XMIT_CN 0x02
98#define NET_XMIT_POLICED 0x03
99#define NET_XMIT_MASK 0x0f
100
101
102
103
104#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
105#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106
107
108#define NETDEV_TX_MASK 0xf0
109
110enum netdev_tx {
111 __NETDEV_TX_MIN = INT_MIN,
112 NETDEV_TX_OK = 0x00,
113 NETDEV_TX_BUSY = 0x10,
114 NETDEV_TX_LOCKED = 0x20,
115};
116typedef enum netdev_tx netdev_tx_t;
117
118
119
120
121
122static inline bool dev_xmit_complete(int rc)
123{
124
125
126
127
128
129
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
136#endif
137
138#define MAX_ADDR_LEN 32
139
140
141#define INIT_NETDEV_GROUP 0
142
143#ifdef __KERNEL__
144
145
146
147
148
149#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
150# if defined(CONFIG_MAC80211_MESH)
151# define LL_MAX_HEADER 128
152# else
153# define LL_MAX_HEADER 96
154# endif
155#elif IS_ENABLED(CONFIG_TR)
156# define LL_MAX_HEADER 48
157#else
158# define LL_MAX_HEADER 32
159#endif
160
161#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
162 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
163#define MAX_HEADER LL_MAX_HEADER
164#else
165#define MAX_HEADER (LL_MAX_HEADER + 48)
166#endif
167
168
169
170
171
172
173struct net_device_stats {
174 unsigned long rx_packets;
175 unsigned long tx_packets;
176 unsigned long rx_bytes;
177 unsigned long tx_bytes;
178 unsigned long rx_errors;
179 unsigned long tx_errors;
180 unsigned long rx_dropped;
181 unsigned long tx_dropped;
182 unsigned long multicast;
183 unsigned long collisions;
184 unsigned long rx_length_errors;
185 unsigned long rx_over_errors;
186 unsigned long rx_crc_errors;
187 unsigned long rx_frame_errors;
188 unsigned long rx_fifo_errors;
189 unsigned long rx_missed_errors;
190 unsigned long tx_aborted_errors;
191 unsigned long tx_carrier_errors;
192 unsigned long tx_fifo_errors;
193 unsigned long tx_heartbeat_errors;
194 unsigned long tx_window_errors;
195 unsigned long rx_compressed;
196 unsigned long tx_compressed;
197};
198
199#endif
200
201
202
203enum {
204 IF_PORT_UNKNOWN = 0,
205 IF_PORT_10BASE2,
206 IF_PORT_10BASET,
207 IF_PORT_AUI,
208 IF_PORT_100BASET,
209 IF_PORT_100BASETX,
210 IF_PORT_100BASEFX
211};
212
213#ifdef __KERNEL__
214
215#include <linux/cache.h>
216#include <linux/skbuff.h>
217
218#ifdef CONFIG_RPS
219#include <linux/static_key.h>
220extern struct static_key rps_needed;
221#endif
222
223struct neighbour;
224struct neigh_parms;
225struct sk_buff;
226
227struct netdev_hw_addr {
228 struct list_head list;
229 unsigned char addr[MAX_ADDR_LEN];
230 unsigned char type;
231#define NETDEV_HW_ADDR_T_LAN 1
232#define NETDEV_HW_ADDR_T_SAN 2
233#define NETDEV_HW_ADDR_T_SLAVE 3
234#define NETDEV_HW_ADDR_T_UNICAST 4
235#define NETDEV_HW_ADDR_T_MULTICAST 5
236 bool synced;
237 bool global_use;
238 int refcount;
239 struct rcu_head rcu_head;
240};
241
242struct netdev_hw_addr_list {
243 struct list_head list;
244 int count;
245};
246
247#define netdev_hw_addr_list_count(l) ((l)->count)
248#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
249#define netdev_hw_addr_list_for_each(ha, l) \
250 list_for_each_entry(ha, &(l)->list, list)
251
252#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
253#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
254#define netdev_for_each_uc_addr(ha, dev) \
255 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
256
257#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
258#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
259#define netdev_for_each_mc_addr(ha, dev) \
260 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
261
262struct hh_cache {
263 u16 hh_len;
264 u16 __pad;
265 seqlock_t hh_lock;
266
267
268#define HH_DATA_MOD 16
269#define HH_DATA_OFF(__len) \
270 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
271#define HH_DATA_ALIGN(__len) \
272 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
273 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
274};
275
276
277
278
279
280
281
282
283
284#define LL_RESERVED_SPACE(dev) \
285 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
286#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
287 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
288
289struct header_ops {
290 int (*create) (struct sk_buff *skb, struct net_device *dev,
291 unsigned short type, const void *daddr,
292 const void *saddr, unsigned int len);
293 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
294 int (*rebuild)(struct sk_buff *skb);
295 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
296 void (*cache_update)(struct hh_cache *hh,
297 const struct net_device *dev,
298 const unsigned char *haddr);
299};
300
301
302
303
304
305
306enum netdev_state_t {
307 __LINK_STATE_START,
308 __LINK_STATE_PRESENT,
309 __LINK_STATE_NOCARRIER,
310 __LINK_STATE_LINKWATCH_PENDING,
311 __LINK_STATE_DORMANT,
312};
313
314
315
316
317
318
319struct netdev_boot_setup {
320 char name[IFNAMSIZ];
321 struct ifmap map;
322};
323#define NETDEV_BOOT_SETUP_MAX 8
324
325extern int __init netdev_boot_setup(char *str);
326
327
328
329
330struct napi_struct {
331
332
333
334
335
336
337 struct list_head poll_list;
338
339 unsigned long state;
340 int weight;
341 int (*poll)(struct napi_struct *, int);
342#ifdef CONFIG_NETPOLL
343 spinlock_t poll_lock;
344 int poll_owner;
345#endif
346
347 unsigned int gro_count;
348
349 struct net_device *dev;
350 struct list_head dev_list;
351 struct sk_buff *gro_list;
352 struct sk_buff *skb;
353};
354
355enum {
356 NAPI_STATE_SCHED,
357 NAPI_STATE_DISABLE,
358 NAPI_STATE_NPSVC,
359};
360
361enum gro_result {
362 GRO_MERGED,
363 GRO_MERGED_FREE,
364 GRO_HELD,
365 GRO_NORMAL,
366 GRO_DROP,
367};
368typedef enum gro_result gro_result_t;
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411enum rx_handler_result {
412 RX_HANDLER_CONSUMED,
413 RX_HANDLER_ANOTHER,
414 RX_HANDLER_EXACT,
415 RX_HANDLER_PASS,
416};
417typedef enum rx_handler_result rx_handler_result_t;
418typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
419
420extern void __napi_schedule(struct napi_struct *n);
421
422static inline bool napi_disable_pending(struct napi_struct *n)
423{
424 return test_bit(NAPI_STATE_DISABLE, &n->state);
425}
426
427
428
429
430
431
432
433
434
435
436static inline bool napi_schedule_prep(struct napi_struct *n)
437{
438 return !napi_disable_pending(n) &&
439 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
440}
441
442
443
444
445
446
447
448
449static inline void napi_schedule(struct napi_struct *n)
450{
451 if (napi_schedule_prep(n))
452 __napi_schedule(n);
453}
454
455
456static inline bool napi_reschedule(struct napi_struct *napi)
457{
458 if (napi_schedule_prep(napi)) {
459 __napi_schedule(napi);
460 return true;
461 }
462 return false;
463}
464
465
466
467
468
469
470
471extern void __napi_complete(struct napi_struct *n);
472extern void napi_complete(struct napi_struct *n);
473
474
475
476
477
478
479
480
481static inline void napi_disable(struct napi_struct *n)
482{
483 set_bit(NAPI_STATE_DISABLE, &n->state);
484 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
485 msleep(1);
486 clear_bit(NAPI_STATE_DISABLE, &n->state);
487}
488
489
490
491
492
493
494
495
496static inline void napi_enable(struct napi_struct *n)
497{
498 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
499 smp_mb__before_clear_bit();
500 clear_bit(NAPI_STATE_SCHED, &n->state);
501}
502
503#ifdef CONFIG_SMP
504
505
506
507
508
509
510
511
512static inline void napi_synchronize(const struct napi_struct *n)
513{
514 while (test_bit(NAPI_STATE_SCHED, &n->state))
515 msleep(1);
516}
517#else
518# define napi_synchronize(n) barrier()
519#endif
520
521enum netdev_queue_state_t {
522 __QUEUE_STATE_DRV_XOFF,
523 __QUEUE_STATE_STACK_XOFF,
524 __QUEUE_STATE_FROZEN,
525#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
526 (1 << __QUEUE_STATE_STACK_XOFF))
527#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
528 (1 << __QUEUE_STATE_FROZEN))
529};
530
531
532
533
534
535
536
537
538
539
540struct netdev_queue {
541
542
543
544 struct net_device *dev;
545 struct Qdisc *qdisc;
546 struct Qdisc *qdisc_sleeping;
547#ifdef CONFIG_SYSFS
548 struct kobject kobj;
549#endif
550#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
551 int numa_node;
552#endif
553
554
555
556 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
557 int xmit_lock_owner;
558
559
560
561 unsigned long trans_start;
562
563
564
565
566
567 unsigned long trans_timeout;
568
569 unsigned long state;
570
571#ifdef CONFIG_BQL
572 struct dql dql;
573#endif
574} ____cacheline_aligned_in_smp;
575
576static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
577{
578#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
579 return q->numa_node;
580#else
581 return NUMA_NO_NODE;
582#endif
583}
584
585static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
586{
587#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
588 q->numa_node = node;
589#endif
590}
591
592#ifdef CONFIG_RPS
593
594
595
596
597struct rps_map {
598 unsigned int len;
599 struct rcu_head rcu;
600 u16 cpus[0];
601};
602#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
603
604
605
606
607
608
609struct rps_dev_flow {
610 u16 cpu;
611 u16 filter;
612 unsigned int last_qtail;
613};
614#define RPS_NO_FILTER 0xffff
615
616
617
618
619struct rps_dev_flow_table {
620 unsigned int mask;
621 struct rcu_head rcu;
622 struct work_struct free_work;
623 struct rps_dev_flow flows[0];
624};
625#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
626 ((_num) * sizeof(struct rps_dev_flow)))
627
628
629
630
631
632struct rps_sock_flow_table {
633 unsigned int mask;
634 u16 ents[0];
635};
636#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
637 ((_num) * sizeof(u16)))
638
639#define RPS_NO_CPU 0xffff
640
641static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
642 u32 hash)
643{
644 if (table && hash) {
645 unsigned int cpu, index = hash & table->mask;
646
647
648 cpu = raw_smp_processor_id();
649
650 if (table->ents[index] != cpu)
651 table->ents[index] = cpu;
652 }
653}
654
655static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
656 u32 hash)
657{
658 if (table && hash)
659 table->ents[hash & table->mask] = RPS_NO_CPU;
660}
661
662extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
663
664#ifdef CONFIG_RFS_ACCEL
665extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
666 u32 flow_id, u16 filter_id);
667#endif
668
669
670struct netdev_rx_queue {
671 struct rps_map __rcu *rps_map;
672 struct rps_dev_flow_table __rcu *rps_flow_table;
673 struct kobject kobj;
674 struct net_device *dev;
675} ____cacheline_aligned_in_smp;
676#endif
677
678#ifdef CONFIG_XPS
679
680
681
682
683struct xps_map {
684 unsigned int len;
685 unsigned int alloc_len;
686 struct rcu_head rcu;
687 u16 queues[0];
688};
689#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
690#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
691 / sizeof(u16))
692
693
694
695
696struct xps_dev_maps {
697 struct rcu_head rcu;
698 struct xps_map __rcu *cpu_map[0];
699};
700#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
701 (nr_cpu_ids * sizeof(struct xps_map *)))
702#endif
703
704#define TC_MAX_QUEUE 16
705#define TC_BITMASK 15
706
707struct netdev_tc_txq {
708 u16 count;
709 u16 offset;
710};
711
712#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
713
714
715
716
717struct netdev_fcoe_hbainfo {
718 char manufacturer[64];
719 char serial_number[64];
720 char hardware_version[64];
721 char driver_version[64];
722 char optionrom_version[64];
723 char firmware_version[64];
724 char model[256];
725 char model_description[256];
726};
727#endif
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920struct net_device_ops {
921 int (*ndo_init)(struct net_device *dev);
922 void (*ndo_uninit)(struct net_device *dev);
923 int (*ndo_open)(struct net_device *dev);
924 int (*ndo_stop)(struct net_device *dev);
925 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
926 struct net_device *dev);
927 u16 (*ndo_select_queue)(struct net_device *dev,
928 struct sk_buff *skb);
929 void (*ndo_change_rx_flags)(struct net_device *dev,
930 int flags);
931 void (*ndo_set_rx_mode)(struct net_device *dev);
932 int (*ndo_set_mac_address)(struct net_device *dev,
933 void *addr);
934 int (*ndo_validate_addr)(struct net_device *dev);
935 int (*ndo_do_ioctl)(struct net_device *dev,
936 struct ifreq *ifr, int cmd);
937 int (*ndo_set_config)(struct net_device *dev,
938 struct ifmap *map);
939 int (*ndo_change_mtu)(struct net_device *dev,
940 int new_mtu);
941 int (*ndo_neigh_setup)(struct net_device *dev,
942 struct neigh_parms *);
943 void (*ndo_tx_timeout) (struct net_device *dev);
944
945 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
946 struct rtnl_link_stats64 *storage);
947 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
948
949 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
950 unsigned short vid);
951 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
952 unsigned short vid);
953#ifdef CONFIG_NET_POLL_CONTROLLER
954 void (*ndo_poll_controller)(struct net_device *dev);
955 int (*ndo_netpoll_setup)(struct net_device *dev,
956 struct netpoll_info *info,
957 gfp_t gfp);
958 void (*ndo_netpoll_cleanup)(struct net_device *dev);
959#endif
960 int (*ndo_set_vf_mac)(struct net_device *dev,
961 int queue, u8 *mac);
962 int (*ndo_set_vf_vlan)(struct net_device *dev,
963 int queue, u16 vlan, u8 qos);
964 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
965 int vf, int rate);
966 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
967 int vf, bool setting);
968 int (*ndo_get_vf_config)(struct net_device *dev,
969 int vf,
970 struct ifla_vf_info *ivf);
971 int (*ndo_set_vf_port)(struct net_device *dev,
972 int vf,
973 struct nlattr *port[]);
974 int (*ndo_get_vf_port)(struct net_device *dev,
975 int vf, struct sk_buff *skb);
976 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
977#if IS_ENABLED(CONFIG_FCOE)
978 int (*ndo_fcoe_enable)(struct net_device *dev);
979 int (*ndo_fcoe_disable)(struct net_device *dev);
980 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
981 u16 xid,
982 struct scatterlist *sgl,
983 unsigned int sgc);
984 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
985 u16 xid);
986 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
987 u16 xid,
988 struct scatterlist *sgl,
989 unsigned int sgc);
990 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
991 struct netdev_fcoe_hbainfo *hbainfo);
992#endif
993
994#if IS_ENABLED(CONFIG_LIBFCOE)
995#define NETDEV_FCOE_WWNN 0
996#define NETDEV_FCOE_WWPN 1
997 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
998 u64 *wwn, int type);
999#endif
1000
1001#ifdef CONFIG_RFS_ACCEL
1002 int (*ndo_rx_flow_steer)(struct net_device *dev,
1003 const struct sk_buff *skb,
1004 u16 rxq_index,
1005 u32 flow_id);
1006#endif
1007 int (*ndo_add_slave)(struct net_device *dev,
1008 struct net_device *slave_dev);
1009 int (*ndo_del_slave)(struct net_device *dev,
1010 struct net_device *slave_dev);
1011 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1012 netdev_features_t features);
1013 int (*ndo_set_features)(struct net_device *dev,
1014 netdev_features_t features);
1015 int (*ndo_neigh_construct)(struct neighbour *n);
1016 void (*ndo_neigh_destroy)(struct neighbour *n);
1017
1018 int (*ndo_fdb_add)(struct ndmsg *ndm,
1019 struct net_device *dev,
1020 unsigned char *addr,
1021 u16 flags);
1022 int (*ndo_fdb_del)(struct ndmsg *ndm,
1023 struct net_device *dev,
1024 unsigned char *addr);
1025 int (*ndo_fdb_dump)(struct sk_buff *skb,
1026 struct netlink_callback *cb,
1027 struct net_device *dev,
1028 int idx);
1029};
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041struct net_device {
1042
1043
1044
1045
1046
1047
1048 char name[IFNAMSIZ];
1049
1050
1051 struct hlist_node name_hlist;
1052
1053
1054 char *ifalias;
1055
1056
1057
1058
1059
1060 unsigned long mem_end;
1061 unsigned long mem_start;
1062 unsigned long base_addr;
1063 unsigned int irq;
1064
1065
1066
1067
1068
1069
1070 unsigned long state;
1071
1072 struct list_head dev_list;
1073 struct list_head napi_list;
1074 struct list_head unreg_list;
1075
1076
1077 netdev_features_t features;
1078
1079 netdev_features_t hw_features;
1080
1081 netdev_features_t wanted_features;
1082
1083 netdev_features_t vlan_features;
1084
1085
1086 int ifindex;
1087 int iflink;
1088
1089 struct net_device_stats stats;
1090 atomic_long_t rx_dropped;
1091
1092
1093
1094#ifdef CONFIG_WIRELESS_EXT
1095
1096
1097 const struct iw_handler_def * wireless_handlers;
1098
1099 struct iw_public_data * wireless_data;
1100#endif
1101
1102 const struct net_device_ops *netdev_ops;
1103 const struct ethtool_ops *ethtool_ops;
1104
1105
1106 const struct header_ops *header_ops;
1107
1108 unsigned int flags;
1109 unsigned int priv_flags;
1110
1111 unsigned short gflags;
1112 unsigned short padded;
1113
1114 unsigned char operstate;
1115 unsigned char link_mode;
1116
1117 unsigned char if_port;
1118 unsigned char dma;
1119
1120 unsigned int mtu;
1121 unsigned short type;
1122 unsigned short hard_header_len;
1123
1124
1125
1126
1127
1128 unsigned short needed_headroom;
1129 unsigned short needed_tailroom;
1130
1131
1132 unsigned char perm_addr[MAX_ADDR_LEN];
1133 unsigned char addr_assign_type;
1134 unsigned char addr_len;
1135 unsigned char neigh_priv_len;
1136 unsigned short dev_id;
1137
1138 spinlock_t addr_list_lock;
1139 struct netdev_hw_addr_list uc;
1140 struct netdev_hw_addr_list mc;
1141 bool uc_promisc;
1142 unsigned int promiscuity;
1143 unsigned int allmulti;
1144
1145
1146
1147
1148#if IS_ENABLED(CONFIG_VLAN_8021Q)
1149 struct vlan_info __rcu *vlan_info;
1150#endif
1151#if IS_ENABLED(CONFIG_NET_DSA)
1152 struct dsa_switch_tree *dsa_ptr;
1153#endif
1154 void *atalk_ptr;
1155 struct in_device __rcu *ip_ptr;
1156 struct dn_dev __rcu *dn_ptr;
1157 struct inet6_dev __rcu *ip6_ptr;
1158 void *ax25_ptr;
1159 struct wireless_dev *ieee80211_ptr;
1160
1161
1162
1163
1164
1165 unsigned long last_rx;
1166
1167
1168
1169
1170
1171
1172
1173 struct net_device *master;
1174
1175
1176
1177
1178 unsigned char *dev_addr;
1179
1180
1181
1182 struct netdev_hw_addr_list dev_addrs;
1183
1184
1185 unsigned char broadcast[MAX_ADDR_LEN];
1186
1187#ifdef CONFIG_SYSFS
1188 struct kset *queues_kset;
1189#endif
1190
1191#ifdef CONFIG_RPS
1192 struct netdev_rx_queue *_rx;
1193
1194
1195 unsigned int num_rx_queues;
1196
1197
1198 unsigned int real_num_rx_queues;
1199
1200#ifdef CONFIG_RFS_ACCEL
1201
1202
1203
1204 struct cpu_rmap *rx_cpu_rmap;
1205#endif
1206#endif
1207
1208 rx_handler_func_t __rcu *rx_handler;
1209 void __rcu *rx_handler_data;
1210
1211 struct netdev_queue __rcu *ingress_queue;
1212
1213
1214
1215
1216 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1217
1218
1219 unsigned int num_tx_queues;
1220
1221
1222 unsigned int real_num_tx_queues;
1223
1224
1225 struct Qdisc *qdisc;
1226
1227 unsigned long tx_queue_len;
1228 spinlock_t tx_global_lock;
1229
1230#ifdef CONFIG_XPS
1231 struct xps_dev_maps __rcu *xps_maps;
1232#endif
1233
1234
1235
1236
1237
1238
1239
1240 unsigned long trans_start;
1241
1242 int watchdog_timeo;
1243 struct timer_list watchdog_timer;
1244
1245
1246 int __percpu *pcpu_refcnt;
1247
1248
1249 struct list_head todo_list;
1250
1251 struct hlist_node index_hlist;
1252
1253 struct list_head link_watch_list;
1254
1255
1256 enum { NETREG_UNINITIALIZED=0,
1257 NETREG_REGISTERED,
1258 NETREG_UNREGISTERING,
1259 NETREG_UNREGISTERED,
1260 NETREG_RELEASED,
1261 NETREG_DUMMY,
1262 } reg_state:8;
1263
1264 bool dismantle;
1265
1266 enum {
1267 RTNL_LINK_INITIALIZED,
1268 RTNL_LINK_INITIALIZING,
1269 } rtnl_link_state:16;
1270
1271
1272 void (*destructor)(struct net_device *dev);
1273
1274#ifdef CONFIG_NETPOLL
1275 struct netpoll_info *npinfo;
1276#endif
1277
1278#ifdef CONFIG_NET_NS
1279
1280 struct net *nd_net;
1281#endif
1282
1283
1284 union {
1285 void *ml_priv;
1286 struct pcpu_lstats __percpu *lstats;
1287 struct pcpu_tstats __percpu *tstats;
1288 struct pcpu_dstats __percpu *dstats;
1289 };
1290
1291 struct garp_port __rcu *garp_port;
1292
1293
1294 struct device dev;
1295
1296 const struct attribute_group *sysfs_groups[4];
1297
1298
1299 const struct rtnl_link_ops *rtnl_link_ops;
1300
1301
1302#define GSO_MAX_SIZE 65536
1303 unsigned int gso_max_size;
1304#define GSO_MAX_SEGS 65535
1305 u16 gso_max_segs;
1306
1307#ifdef CONFIG_DCB
1308
1309 const struct dcbnl_rtnl_ops *dcbnl_ops;
1310#endif
1311 u8 num_tc;
1312 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1313 u8 prio_tc_map[TC_BITMASK + 1];
1314
1315#if IS_ENABLED(CONFIG_FCOE)
1316
1317 unsigned int fcoe_ddp_xid;
1318#endif
1319#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1320 struct netprio_map __rcu *priomap;
1321#endif
1322
1323 struct phy_device *phydev;
1324
1325
1326 int group;
1327
1328 struct pm_qos_request pm_qos_req;
1329};
1330#define to_net_dev(d) container_of(d, struct net_device, dev)
1331
1332#define NETDEV_ALIGN 32
1333
1334static inline
1335int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1336{
1337 return dev->prio_tc_map[prio & TC_BITMASK];
1338}
1339
1340static inline
1341int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1342{
1343 if (tc >= dev->num_tc)
1344 return -EINVAL;
1345
1346 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1347 return 0;
1348}
1349
1350static inline
1351void netdev_reset_tc(struct net_device *dev)
1352{
1353 dev->num_tc = 0;
1354 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1355 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1356}
1357
1358static inline
1359int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1360{
1361 if (tc >= dev->num_tc)
1362 return -EINVAL;
1363
1364 dev->tc_to_txq[tc].count = count;
1365 dev->tc_to_txq[tc].offset = offset;
1366 return 0;
1367}
1368
1369static inline
1370int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1371{
1372 if (num_tc > TC_MAX_QUEUE)
1373 return -EINVAL;
1374
1375 dev->num_tc = num_tc;
1376 return 0;
1377}
1378
1379static inline
1380int netdev_get_num_tc(struct net_device *dev)
1381{
1382 return dev->num_tc;
1383}
1384
1385static inline
1386struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1387 unsigned int index)
1388{
1389 return &dev->_tx[index];
1390}
1391
1392static inline void netdev_for_each_tx_queue(struct net_device *dev,
1393 void (*f)(struct net_device *,
1394 struct netdev_queue *,
1395 void *),
1396 void *arg)
1397{
1398 unsigned int i;
1399
1400 for (i = 0; i < dev->num_tx_queues; i++)
1401 f(dev, &dev->_tx[i], arg);
1402}
1403
1404
1405
1406
1407static inline
1408struct net *dev_net(const struct net_device *dev)
1409{
1410 return read_pnet(&dev->nd_net);
1411}
1412
1413static inline
1414void dev_net_set(struct net_device *dev, struct net *net)
1415{
1416#ifdef CONFIG_NET_NS
1417 release_net(dev->nd_net);
1418 dev->nd_net = hold_net(net);
1419#endif
1420}
1421
1422static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1423{
1424#ifdef CONFIG_NET_DSA_TAG_DSA
1425 if (dev->dsa_ptr != NULL)
1426 return dsa_uses_dsa_tags(dev->dsa_ptr);
1427#endif
1428
1429 return 0;
1430}
1431
1432static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1433{
1434#ifdef CONFIG_NET_DSA_TAG_TRAILER
1435 if (dev->dsa_ptr != NULL)
1436 return dsa_uses_trailer_tags(dev->dsa_ptr);
1437#endif
1438
1439 return 0;
1440}
1441
1442
1443
1444
1445
1446
1447
1448static inline void *netdev_priv(const struct net_device *dev)
1449{
1450 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1451}
1452
1453
1454
1455
1456#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1457
1458
1459
1460
1461
1462#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1475 int (*poll)(struct napi_struct *, int), int weight);
1476
1477
1478
1479
1480
1481
1482
1483void netif_napi_del(struct napi_struct *napi);
1484
1485struct napi_gro_cb {
1486
1487 void *frag0;
1488
1489
1490 unsigned int frag0_len;
1491
1492
1493 int data_offset;
1494
1495
1496 int same_flow;
1497
1498
1499 int flush;
1500
1501
1502 int count;
1503
1504
1505 int free;
1506#define NAPI_GRO_FREE 1
1507#define NAPI_GRO_FREE_STOLEN_HEAD 2
1508};
1509
1510#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1511
1512struct packet_type {
1513 __be16 type;
1514 struct net_device *dev;
1515 int (*func) (struct sk_buff *,
1516 struct net_device *,
1517 struct packet_type *,
1518 struct net_device *);
1519 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1520 netdev_features_t features);
1521 int (*gso_send_check)(struct sk_buff *skb);
1522 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1523 struct sk_buff *skb);
1524 int (*gro_complete)(struct sk_buff *skb);
1525 bool (*id_match)(struct packet_type *ptype,
1526 struct sock *sk);
1527 void *af_packet_priv;
1528 struct list_head list;
1529};
1530
1531#include <linux/notifier.h>
1532
1533
1534
1535
1536
1537#define NETDEV_UP 0x0001
1538#define NETDEV_DOWN 0x0002
1539#define NETDEV_REBOOT 0x0003
1540
1541
1542
1543#define NETDEV_CHANGE 0x0004
1544#define NETDEV_REGISTER 0x0005
1545#define NETDEV_UNREGISTER 0x0006
1546#define NETDEV_CHANGEMTU 0x0007
1547#define NETDEV_CHANGEADDR 0x0008
1548#define NETDEV_GOING_DOWN 0x0009
1549#define NETDEV_CHANGENAME 0x000A
1550#define NETDEV_FEAT_CHANGE 0x000B
1551#define NETDEV_BONDING_FAILOVER 0x000C
1552#define NETDEV_PRE_UP 0x000D
1553#define NETDEV_PRE_TYPE_CHANGE 0x000E
1554#define NETDEV_POST_TYPE_CHANGE 0x000F
1555#define NETDEV_POST_INIT 0x0010
1556#define NETDEV_UNREGISTER_BATCH 0x0011
1557#define NETDEV_RELEASE 0x0012
1558#define NETDEV_NOTIFY_PEERS 0x0013
1559#define NETDEV_JOIN 0x0014
1560
1561extern int register_netdevice_notifier(struct notifier_block *nb);
1562extern int unregister_netdevice_notifier(struct notifier_block *nb);
1563extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1564
1565
1566extern rwlock_t dev_base_lock;
1567
1568
1569#define for_each_netdev(net, d) \
1570 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1571#define for_each_netdev_reverse(net, d) \
1572 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1573#define for_each_netdev_rcu(net, d) \
1574 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1575#define for_each_netdev_safe(net, d, n) \
1576 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1577#define for_each_netdev_continue(net, d) \
1578 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1579#define for_each_netdev_continue_rcu(net, d) \
1580 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1581#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1582
1583static inline struct net_device *next_net_device(struct net_device *dev)
1584{
1585 struct list_head *lh;
1586 struct net *net;
1587
1588 net = dev_net(dev);
1589 lh = dev->dev_list.next;
1590 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1591}
1592
1593static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1594{
1595 struct list_head *lh;
1596 struct net *net;
1597
1598 net = dev_net(dev);
1599 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1600 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1601}
1602
1603static inline struct net_device *first_net_device(struct net *net)
1604{
1605 return list_empty(&net->dev_base_head) ? NULL :
1606 net_device_entry(net->dev_base_head.next);
1607}
1608
1609static inline struct net_device *first_net_device_rcu(struct net *net)
1610{
1611 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1612
1613 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1614}
1615
1616extern int netdev_boot_setup_check(struct net_device *dev);
1617extern unsigned long netdev_boot_base(const char *prefix, int unit);
1618extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1619 const char *hwaddr);
1620extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1621extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1622extern void dev_add_pack(struct packet_type *pt);
1623extern void dev_remove_pack(struct packet_type *pt);
1624extern void __dev_remove_pack(struct packet_type *pt);
1625
1626extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1627 unsigned short mask);
1628extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1629extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1630extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1631extern int dev_alloc_name(struct net_device *dev, const char *name);
1632extern int dev_open(struct net_device *dev);
1633extern int dev_close(struct net_device *dev);
1634extern void dev_disable_lro(struct net_device *dev);
1635extern int dev_loopback_xmit(struct sk_buff *newskb);
1636extern int dev_queue_xmit(struct sk_buff *skb);
1637extern int register_netdevice(struct net_device *dev);
1638extern void unregister_netdevice_queue(struct net_device *dev,
1639 struct list_head *head);
1640extern void unregister_netdevice_many(struct list_head *head);
1641static inline void unregister_netdevice(struct net_device *dev)
1642{
1643 unregister_netdevice_queue(dev, NULL);
1644}
1645
1646extern int netdev_refcnt_read(const struct net_device *dev);
1647extern void free_netdev(struct net_device *dev);
1648extern void synchronize_net(void);
1649extern int init_dummy_netdev(struct net_device *dev);
1650extern void netdev_resync_ops(struct net_device *dev);
1651
1652extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1653extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1654extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1655extern int dev_restart(struct net_device *dev);
1656#ifdef CONFIG_NETPOLL_TRAP
1657extern int netpoll_trap(void);
1658#endif
1659extern int skb_gro_receive(struct sk_buff **head,
1660 struct sk_buff *skb);
1661extern void skb_gro_reset_offset(struct sk_buff *skb);
1662
1663static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1664{
1665 return NAPI_GRO_CB(skb)->data_offset;
1666}
1667
1668static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1669{
1670 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1671}
1672
1673static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1674{
1675 NAPI_GRO_CB(skb)->data_offset += len;
1676}
1677
1678static inline void *skb_gro_header_fast(struct sk_buff *skb,
1679 unsigned int offset)
1680{
1681 return NAPI_GRO_CB(skb)->frag0 + offset;
1682}
1683
1684static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1685{
1686 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1687}
1688
1689static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1690 unsigned int offset)
1691{
1692 if (!pskb_may_pull(skb, hlen))
1693 return NULL;
1694
1695 NAPI_GRO_CB(skb)->frag0 = NULL;
1696 NAPI_GRO_CB(skb)->frag0_len = 0;
1697 return skb->data + offset;
1698}
1699
1700static inline void *skb_gro_mac_header(struct sk_buff *skb)
1701{
1702 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1703}
1704
1705static inline void *skb_gro_network_header(struct sk_buff *skb)
1706{
1707 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1708 skb_network_offset(skb);
1709}
1710
1711static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1712 unsigned short type,
1713 const void *daddr, const void *saddr,
1714 unsigned int len)
1715{
1716 if (!dev->header_ops || !dev->header_ops->create)
1717 return 0;
1718
1719 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1720}
1721
1722static inline int dev_parse_header(const struct sk_buff *skb,
1723 unsigned char *haddr)
1724{
1725 const struct net_device *dev = skb->dev;
1726
1727 if (!dev->header_ops || !dev->header_ops->parse)
1728 return 0;
1729 return dev->header_ops->parse(skb, haddr);
1730}
1731
1732typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1733extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1734static inline int unregister_gifconf(unsigned int family)
1735{
1736 return register_gifconf(family, NULL);
1737}
1738
1739
1740
1741
1742struct softnet_data {
1743 struct Qdisc *output_queue;
1744 struct Qdisc **output_queue_tailp;
1745 struct list_head poll_list;
1746 struct sk_buff *completion_queue;
1747 struct sk_buff_head process_queue;
1748
1749
1750 unsigned int processed;
1751 unsigned int time_squeeze;
1752 unsigned int cpu_collision;
1753 unsigned int received_rps;
1754
1755#ifdef CONFIG_RPS
1756 struct softnet_data *rps_ipi_list;
1757
1758
1759 struct call_single_data csd ____cacheline_aligned_in_smp;
1760 struct softnet_data *rps_ipi_next;
1761 unsigned int cpu;
1762 unsigned int input_queue_head;
1763 unsigned int input_queue_tail;
1764#endif
1765 unsigned int dropped;
1766 struct sk_buff_head input_pkt_queue;
1767 struct napi_struct backlog;
1768};
1769
1770static inline void input_queue_head_incr(struct softnet_data *sd)
1771{
1772#ifdef CONFIG_RPS
1773 sd->input_queue_head++;
1774#endif
1775}
1776
1777static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1778 unsigned int *qtail)
1779{
1780#ifdef CONFIG_RPS
1781 *qtail = ++sd->input_queue_tail;
1782#endif
1783}
1784
1785DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1786
1787extern void __netif_schedule(struct Qdisc *q);
1788
1789static inline void netif_schedule_queue(struct netdev_queue *txq)
1790{
1791 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1792 __netif_schedule(txq->qdisc);
1793}
1794
1795static inline void netif_tx_schedule_all(struct net_device *dev)
1796{
1797 unsigned int i;
1798
1799 for (i = 0; i < dev->num_tx_queues; i++)
1800 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1801}
1802
1803static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1804{
1805 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1806}
1807
1808
1809
1810
1811
1812
1813
1814static inline void netif_start_queue(struct net_device *dev)
1815{
1816 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1817}
1818
1819static inline void netif_tx_start_all_queues(struct net_device *dev)
1820{
1821 unsigned int i;
1822
1823 for (i = 0; i < dev->num_tx_queues; i++) {
1824 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1825 netif_tx_start_queue(txq);
1826 }
1827}
1828
1829static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1830{
1831#ifdef CONFIG_NETPOLL_TRAP
1832 if (netpoll_trap()) {
1833 netif_tx_start_queue(dev_queue);
1834 return;
1835 }
1836#endif
1837 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
1838 __netif_schedule(dev_queue->qdisc);
1839}
1840
1841
1842
1843
1844
1845
1846
1847
1848static inline void netif_wake_queue(struct net_device *dev)
1849{
1850 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1851}
1852
1853static inline void netif_tx_wake_all_queues(struct net_device *dev)
1854{
1855 unsigned int i;
1856
1857 for (i = 0; i < dev->num_tx_queues; i++) {
1858 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1859 netif_tx_wake_queue(txq);
1860 }
1861}
1862
1863static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1864{
1865 if (WARN_ON(!dev_queue)) {
1866 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1867 return;
1868 }
1869 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879static inline void netif_stop_queue(struct net_device *dev)
1880{
1881 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1882}
1883
1884static inline void netif_tx_stop_all_queues(struct net_device *dev)
1885{
1886 unsigned int i;
1887
1888 for (i = 0; i < dev->num_tx_queues; i++) {
1889 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1890 netif_tx_stop_queue(txq);
1891 }
1892}
1893
1894static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1895{
1896 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1897}
1898
1899
1900
1901
1902
1903
1904
1905static inline bool netif_queue_stopped(const struct net_device *dev)
1906{
1907 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1908}
1909
1910static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
1911{
1912 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1913}
1914
1915static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
1916{
1917 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1918}
1919
1920static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
1921 unsigned int bytes)
1922{
1923#ifdef CONFIG_BQL
1924 dql_queued(&dev_queue->dql, bytes);
1925
1926 if (likely(dql_avail(&dev_queue->dql) >= 0))
1927 return;
1928
1929 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1930
1931
1932
1933
1934
1935
1936 smp_mb();
1937
1938
1939 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
1940 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1941#endif
1942}
1943
1944static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
1945{
1946 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
1947}
1948
1949static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1950 unsigned int pkts, unsigned int bytes)
1951{
1952#ifdef CONFIG_BQL
1953 if (unlikely(!bytes))
1954 return;
1955
1956 dql_completed(&dev_queue->dql, bytes);
1957
1958
1959
1960
1961
1962
1963 smp_mb();
1964
1965 if (dql_avail(&dev_queue->dql) < 0)
1966 return;
1967
1968 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
1969 netif_schedule_queue(dev_queue);
1970#endif
1971}
1972
1973static inline void netdev_completed_queue(struct net_device *dev,
1974 unsigned int pkts, unsigned int bytes)
1975{
1976 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
1977}
1978
1979static inline void netdev_tx_reset_queue(struct netdev_queue *q)
1980{
1981#ifdef CONFIG_BQL
1982 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
1983 dql_reset(&q->dql);
1984#endif
1985}
1986
1987static inline void netdev_reset_queue(struct net_device *dev_queue)
1988{
1989 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
1990}
1991
1992
1993
1994
1995
1996
1997
1998static inline bool netif_running(const struct net_device *dev)
1999{
2000 return test_bit(__LINK_STATE_START, &dev->state);
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2018{
2019 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2020
2021 netif_tx_start_queue(txq);
2022}
2023
2024
2025
2026
2027
2028
2029
2030
2031static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2032{
2033 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2034#ifdef CONFIG_NETPOLL_TRAP
2035 if (netpoll_trap())
2036 return;
2037#endif
2038 netif_tx_stop_queue(txq);
2039}
2040
2041
2042
2043
2044
2045
2046
2047
2048static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2049 u16 queue_index)
2050{
2051 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2052
2053 return netif_tx_queue_stopped(txq);
2054}
2055
2056static inline bool netif_subqueue_stopped(const struct net_device *dev,
2057 struct sk_buff *skb)
2058{
2059 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2060}
2061
2062
2063
2064
2065
2066
2067
2068
2069static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2070{
2071 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2072#ifdef CONFIG_NETPOLL_TRAP
2073 if (netpoll_trap())
2074 return;
2075#endif
2076 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2077 __netif_schedule(txq->qdisc);
2078}
2079
2080
2081
2082
2083
2084static inline u16 skb_tx_hash(const struct net_device *dev,
2085 const struct sk_buff *skb)
2086{
2087 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2088}
2089
2090
2091
2092
2093
2094
2095
2096static inline bool netif_is_multiqueue(const struct net_device *dev)
2097{
2098 return dev->num_tx_queues > 1;
2099}
2100
2101extern int netif_set_real_num_tx_queues(struct net_device *dev,
2102 unsigned int txq);
2103
2104#ifdef CONFIG_RPS
2105extern int netif_set_real_num_rx_queues(struct net_device *dev,
2106 unsigned int rxq);
2107#else
2108static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2109 unsigned int rxq)
2110{
2111 return 0;
2112}
2113#endif
2114
2115static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2116 const struct net_device *from_dev)
2117{
2118 int err;
2119
2120 err = netif_set_real_num_tx_queues(to_dev,
2121 from_dev->real_num_tx_queues);
2122 if (err)
2123 return err;
2124#ifdef CONFIG_RPS
2125 return netif_set_real_num_rx_queues(to_dev,
2126 from_dev->real_num_rx_queues);
2127#else
2128 return 0;
2129#endif
2130}
2131
2132#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2133extern int netif_get_num_default_rss_queues(void);
2134
2135
2136
2137
2138
2139extern void dev_kfree_skb_irq(struct sk_buff *skb);
2140
2141
2142
2143
2144
2145extern void dev_kfree_skb_any(struct sk_buff *skb);
2146
2147extern int netif_rx(struct sk_buff *skb);
2148extern int netif_rx_ni(struct sk_buff *skb);
2149extern int netif_receive_skb(struct sk_buff *skb);
2150extern gro_result_t dev_gro_receive(struct napi_struct *napi,
2151 struct sk_buff *skb);
2152extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
2153extern gro_result_t napi_gro_receive(struct napi_struct *napi,
2154 struct sk_buff *skb);
2155extern void napi_gro_flush(struct napi_struct *napi);
2156extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
2157extern gro_result_t napi_frags_finish(struct napi_struct *napi,
2158 struct sk_buff *skb,
2159 gro_result_t ret);
2160extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2161
2162static inline void napi_free_frags(struct napi_struct *napi)
2163{
2164 kfree_skb(napi->skb);
2165 napi->skb = NULL;
2166}
2167
2168extern int netdev_rx_handler_register(struct net_device *dev,
2169 rx_handler_func_t *rx_handler,
2170 void *rx_handler_data);
2171extern void netdev_rx_handler_unregister(struct net_device *dev);
2172
2173extern bool dev_valid_name(const char *name);
2174extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2175extern int dev_ethtool(struct net *net, struct ifreq *);
2176extern unsigned int dev_get_flags(const struct net_device *);
2177extern int __dev_change_flags(struct net_device *, unsigned int flags);
2178extern int dev_change_flags(struct net_device *, unsigned int);
2179extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2180extern int dev_change_name(struct net_device *, const char *);
2181extern int dev_set_alias(struct net_device *, const char *, size_t);
2182extern int dev_change_net_namespace(struct net_device *,
2183 struct net *, const char *);
2184extern int dev_set_mtu(struct net_device *, int);
2185extern void dev_set_group(struct net_device *, int);
2186extern int dev_set_mac_address(struct net_device *,
2187 struct sockaddr *);
2188extern int dev_hard_start_xmit(struct sk_buff *skb,
2189 struct net_device *dev,
2190 struct netdev_queue *txq);
2191extern int dev_forward_skb(struct net_device *dev,
2192 struct sk_buff *skb);
2193
2194extern int netdev_budget;
2195
2196
2197extern void netdev_run_todo(void);
2198
2199
2200
2201
2202
2203
2204
2205static inline void dev_put(struct net_device *dev)
2206{
2207 this_cpu_dec(*dev->pcpu_refcnt);
2208}
2209
2210
2211
2212
2213
2214
2215
2216static inline void dev_hold(struct net_device *dev)
2217{
2218 this_cpu_inc(*dev->pcpu_refcnt);
2219}
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230extern void linkwatch_fire_event(struct net_device *dev);
2231extern void linkwatch_forget_dev(struct net_device *dev);
2232
2233
2234
2235
2236
2237
2238
2239static inline bool netif_carrier_ok(const struct net_device *dev)
2240{
2241 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2242}
2243
2244extern unsigned long dev_trans_start(struct net_device *dev);
2245
2246extern void __netdev_watchdog_up(struct net_device *dev);
2247
2248extern void netif_carrier_on(struct net_device *dev);
2249
2250extern void netif_carrier_off(struct net_device *dev);
2251
2252extern void netif_notify_peers(struct net_device *dev);
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267static inline void netif_dormant_on(struct net_device *dev)
2268{
2269 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2270 linkwatch_fire_event(dev);
2271}
2272
2273
2274
2275
2276
2277
2278
2279static inline void netif_dormant_off(struct net_device *dev)
2280{
2281 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2282 linkwatch_fire_event(dev);
2283}
2284
2285
2286
2287
2288
2289
2290
2291static inline bool netif_dormant(const struct net_device *dev)
2292{
2293 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2294}
2295
2296
2297
2298
2299
2300
2301
2302
2303static inline bool netif_oper_up(const struct net_device *dev)
2304{
2305 return (dev->operstate == IF_OPER_UP ||
2306 dev->operstate == IF_OPER_UNKNOWN );
2307}
2308
2309
2310
2311
2312
2313
2314
2315static inline bool netif_device_present(struct net_device *dev)
2316{
2317 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2318}
2319
2320extern void netif_device_detach(struct net_device *dev);
2321
2322extern void netif_device_attach(struct net_device *dev);
2323
2324
2325
2326
2327
2328enum {
2329 NETIF_MSG_DRV = 0x0001,
2330 NETIF_MSG_PROBE = 0x0002,
2331 NETIF_MSG_LINK = 0x0004,
2332 NETIF_MSG_TIMER = 0x0008,
2333 NETIF_MSG_IFDOWN = 0x0010,
2334 NETIF_MSG_IFUP = 0x0020,
2335 NETIF_MSG_RX_ERR = 0x0040,
2336 NETIF_MSG_TX_ERR = 0x0080,
2337 NETIF_MSG_TX_QUEUED = 0x0100,
2338 NETIF_MSG_INTR = 0x0200,
2339 NETIF_MSG_TX_DONE = 0x0400,
2340 NETIF_MSG_RX_STATUS = 0x0800,
2341 NETIF_MSG_PKTDATA = 0x1000,
2342 NETIF_MSG_HW = 0x2000,
2343 NETIF_MSG_WOL = 0x4000,
2344};
2345
2346#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2347#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2348#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2349#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2350#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2351#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2352#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2353#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2354#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2355#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2356#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2357#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2358#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2359#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2360#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2361
2362static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2363{
2364
2365 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2366 return default_msg_enable_bits;
2367 if (debug_value == 0)
2368 return 0;
2369
2370 return (1 << debug_value) - 1;
2371}
2372
2373static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2374{
2375 spin_lock(&txq->_xmit_lock);
2376 txq->xmit_lock_owner = cpu;
2377}
2378
2379static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2380{
2381 spin_lock_bh(&txq->_xmit_lock);
2382 txq->xmit_lock_owner = smp_processor_id();
2383}
2384
2385static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2386{
2387 bool ok = spin_trylock(&txq->_xmit_lock);
2388 if (likely(ok))
2389 txq->xmit_lock_owner = smp_processor_id();
2390 return ok;
2391}
2392
2393static inline void __netif_tx_unlock(struct netdev_queue *txq)
2394{
2395 txq->xmit_lock_owner = -1;
2396 spin_unlock(&txq->_xmit_lock);
2397}
2398
2399static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2400{
2401 txq->xmit_lock_owner = -1;
2402 spin_unlock_bh(&txq->_xmit_lock);
2403}
2404
2405static inline void txq_trans_update(struct netdev_queue *txq)
2406{
2407 if (txq->xmit_lock_owner != -1)
2408 txq->trans_start = jiffies;
2409}
2410
2411
2412
2413
2414
2415
2416
2417static inline void netif_tx_lock(struct net_device *dev)
2418{
2419 unsigned int i;
2420 int cpu;
2421
2422 spin_lock(&dev->tx_global_lock);
2423 cpu = smp_processor_id();
2424 for (i = 0; i < dev->num_tx_queues; i++) {
2425 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2426
2427
2428
2429
2430
2431
2432
2433 __netif_tx_lock(txq, cpu);
2434 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2435 __netif_tx_unlock(txq);
2436 }
2437}
2438
2439static inline void netif_tx_lock_bh(struct net_device *dev)
2440{
2441 local_bh_disable();
2442 netif_tx_lock(dev);
2443}
2444
2445static inline void netif_tx_unlock(struct net_device *dev)
2446{
2447 unsigned int i;
2448
2449 for (i = 0; i < dev->num_tx_queues; i++) {
2450 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2451
2452
2453
2454
2455
2456 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2457 netif_schedule_queue(txq);
2458 }
2459 spin_unlock(&dev->tx_global_lock);
2460}
2461
2462static inline void netif_tx_unlock_bh(struct net_device *dev)
2463{
2464 netif_tx_unlock(dev);
2465 local_bh_enable();
2466}
2467
2468#define HARD_TX_LOCK(dev, txq, cpu) { \
2469 if ((dev->features & NETIF_F_LLTX) == 0) { \
2470 __netif_tx_lock(txq, cpu); \
2471 } \
2472}
2473
2474#define HARD_TX_UNLOCK(dev, txq) { \
2475 if ((dev->features & NETIF_F_LLTX) == 0) { \
2476 __netif_tx_unlock(txq); \
2477 } \
2478}
2479
2480static inline void netif_tx_disable(struct net_device *dev)
2481{
2482 unsigned int i;
2483 int cpu;
2484
2485 local_bh_disable();
2486 cpu = smp_processor_id();
2487 for (i = 0; i < dev->num_tx_queues; i++) {
2488 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2489
2490 __netif_tx_lock(txq, cpu);
2491 netif_tx_stop_queue(txq);
2492 __netif_tx_unlock(txq);
2493 }
2494 local_bh_enable();
2495}
2496
2497static inline void netif_addr_lock(struct net_device *dev)
2498{
2499 spin_lock(&dev->addr_list_lock);
2500}
2501
2502static inline void netif_addr_lock_nested(struct net_device *dev)
2503{
2504 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2505}
2506
2507static inline void netif_addr_lock_bh(struct net_device *dev)
2508{
2509 spin_lock_bh(&dev->addr_list_lock);
2510}
2511
2512static inline void netif_addr_unlock(struct net_device *dev)
2513{
2514 spin_unlock(&dev->addr_list_lock);
2515}
2516
2517static inline void netif_addr_unlock_bh(struct net_device *dev)
2518{
2519 spin_unlock_bh(&dev->addr_list_lock);
2520}
2521
2522
2523
2524
2525
2526#define for_each_dev_addr(dev, ha) \
2527 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2528
2529
2530
2531extern void ether_setup(struct net_device *dev);
2532
2533
2534extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2535 void (*setup)(struct net_device *),
2536 unsigned int txqs, unsigned int rxqs);
2537#define alloc_netdev(sizeof_priv, name, setup) \
2538 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2539
2540#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2541 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2542
2543extern int register_netdev(struct net_device *dev);
2544extern void unregister_netdev(struct net_device *dev);
2545
2546
2547extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2548 struct netdev_hw_addr_list *from_list,
2549 int addr_len, unsigned char addr_type);
2550extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2551 struct netdev_hw_addr_list *from_list,
2552 int addr_len, unsigned char addr_type);
2553extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2554 struct netdev_hw_addr_list *from_list,
2555 int addr_len);
2556extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2557 struct netdev_hw_addr_list *from_list,
2558 int addr_len);
2559extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2560extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2561
2562
2563extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
2564 unsigned char addr_type);
2565extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
2566 unsigned char addr_type);
2567extern int dev_addr_add_multiple(struct net_device *to_dev,
2568 struct net_device *from_dev,
2569 unsigned char addr_type);
2570extern int dev_addr_del_multiple(struct net_device *to_dev,
2571 struct net_device *from_dev,
2572 unsigned char addr_type);
2573extern void dev_addr_flush(struct net_device *dev);
2574extern int dev_addr_init(struct net_device *dev);
2575
2576
2577extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2578extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr);
2579extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2580extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2581extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2582extern void dev_uc_flush(struct net_device *dev);
2583extern void dev_uc_init(struct net_device *dev);
2584
2585
2586extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2587extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2588extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr);
2589extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2590extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2591extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2592extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2593extern void dev_mc_flush(struct net_device *dev);
2594extern void dev_mc_init(struct net_device *dev);
2595
2596
2597extern void dev_set_rx_mode(struct net_device *dev);
2598extern void __dev_set_rx_mode(struct net_device *dev);
2599extern int dev_set_promiscuity(struct net_device *dev, int inc);
2600extern int dev_set_allmulti(struct net_device *dev, int inc);
2601extern void netdev_state_change(struct net_device *dev);
2602extern int netdev_bonding_change(struct net_device *dev,
2603 unsigned long event);
2604extern void netdev_features_change(struct net_device *dev);
2605
2606extern void dev_load(struct net *net, const char *name);
2607extern void dev_mcast_init(void);
2608extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2609 struct rtnl_link_stats64 *storage);
2610extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2611 const struct net_device_stats *netdev_stats);
2612
2613extern int netdev_max_backlog;
2614extern int netdev_tstamp_prequeue;
2615extern int weight_p;
2616extern int bpf_jit_enable;
2617extern int netdev_set_master(struct net_device *dev, struct net_device *master);
2618extern int netdev_set_bond_master(struct net_device *dev,
2619 struct net_device *master);
2620extern int skb_checksum_help(struct sk_buff *skb);
2621extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2622 netdev_features_t features);
2623#ifdef CONFIG_BUG
2624extern void netdev_rx_csum_fault(struct net_device *dev);
2625#else
2626static inline void netdev_rx_csum_fault(struct net_device *dev)
2627{
2628}
2629#endif
2630
2631extern void net_enable_timestamp(void);
2632extern void net_disable_timestamp(void);
2633
2634#ifdef CONFIG_PROC_FS
2635extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2636extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2637extern void dev_seq_stop(struct seq_file *seq, void *v);
2638#endif
2639
2640extern int netdev_class_create_file(struct class_attribute *class_attr);
2641extern void netdev_class_remove_file(struct class_attribute *class_attr);
2642
2643extern struct kobj_ns_type_operations net_ns_type_operations;
2644
2645extern const char *netdev_drivername(const struct net_device *dev);
2646
2647extern void linkwatch_run_queue(void);
2648
2649static inline netdev_features_t netdev_get_wanted_features(
2650 struct net_device *dev)
2651{
2652 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2653}
2654netdev_features_t netdev_increment_features(netdev_features_t all,
2655 netdev_features_t one, netdev_features_t mask);
2656int __netdev_update_features(struct net_device *dev);
2657void netdev_update_features(struct net_device *dev);
2658void netdev_change_features(struct net_device *dev);
2659
2660void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2661 struct net_device *dev);
2662
2663netdev_features_t netif_skb_features(struct sk_buff *skb);
2664
2665static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2666{
2667 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2668
2669
2670 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2671 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2672 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2673 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2674 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2675 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2676
2677 return (features & feature) == feature;
2678}
2679
2680static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2681{
2682 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2683 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2684}
2685
2686static inline bool netif_needs_gso(struct sk_buff *skb,
2687 netdev_features_t features)
2688{
2689 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2690 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2691 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
2692}
2693
2694static inline void netif_set_gso_max_size(struct net_device *dev,
2695 unsigned int size)
2696{
2697 dev->gso_max_size = size;
2698}
2699
2700static inline bool netif_is_bond_slave(struct net_device *dev)
2701{
2702 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2703}
2704
2705static inline bool netif_supports_nofcs(struct net_device *dev)
2706{
2707 return dev->priv_flags & IFF_SUPP_NOFCS;
2708}
2709
2710extern struct pernet_operations __net_initdata loopback_net_ops;
2711
2712
2713
2714
2715
2716static inline const char *netdev_name(const struct net_device *dev)
2717{
2718 if (dev->reg_state != NETREG_REGISTERED)
2719 return "(unregistered net_device)";
2720 return dev->name;
2721}
2722
2723extern int __netdev_printk(const char *level, const struct net_device *dev,
2724 struct va_format *vaf);
2725
2726extern __printf(3, 4)
2727int netdev_printk(const char *level, const struct net_device *dev,
2728 const char *format, ...);
2729extern __printf(2, 3)
2730int netdev_emerg(const struct net_device *dev, const char *format, ...);
2731extern __printf(2, 3)
2732int netdev_alert(const struct net_device *dev, const char *format, ...);
2733extern __printf(2, 3)
2734int netdev_crit(const struct net_device *dev, const char *format, ...);
2735extern __printf(2, 3)
2736int netdev_err(const struct net_device *dev, const char *format, ...);
2737extern __printf(2, 3)
2738int netdev_warn(const struct net_device *dev, const char *format, ...);
2739extern __printf(2, 3)
2740int netdev_notice(const struct net_device *dev, const char *format, ...);
2741extern __printf(2, 3)
2742int netdev_info(const struct net_device *dev, const char *format, ...);
2743
2744#define MODULE_ALIAS_NETDEV(device) \
2745 MODULE_ALIAS("netdev-" device)
2746
2747#if defined(CONFIG_DYNAMIC_DEBUG)
2748#define netdev_dbg(__dev, format, args...) \
2749do { \
2750 dynamic_netdev_dbg(__dev, format, ##args); \
2751} while (0)
2752#elif defined(DEBUG)
2753#define netdev_dbg(__dev, format, args...) \
2754 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2755#else
2756#define netdev_dbg(__dev, format, args...) \
2757({ \
2758 if (0) \
2759 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2760 0; \
2761})
2762#endif
2763
2764#if defined(VERBOSE_DEBUG)
2765#define netdev_vdbg netdev_dbg
2766#else
2767
2768#define netdev_vdbg(dev, format, args...) \
2769({ \
2770 if (0) \
2771 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2772 0; \
2773})
2774#endif
2775
2776
2777
2778
2779
2780
2781#define netdev_WARN(dev, format, args...) \
2782 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2783
2784
2785
2786#define netif_printk(priv, type, level, dev, fmt, args...) \
2787do { \
2788 if (netif_msg_##type(priv)) \
2789 netdev_printk(level, (dev), fmt, ##args); \
2790} while (0)
2791
2792#define netif_level(level, priv, type, dev, fmt, args...) \
2793do { \
2794 if (netif_msg_##type(priv)) \
2795 netdev_##level(dev, fmt, ##args); \
2796} while (0)
2797
2798#define netif_emerg(priv, type, dev, fmt, args...) \
2799 netif_level(emerg, priv, type, dev, fmt, ##args)
2800#define netif_alert(priv, type, dev, fmt, args...) \
2801 netif_level(alert, priv, type, dev, fmt, ##args)
2802#define netif_crit(priv, type, dev, fmt, args...) \
2803 netif_level(crit, priv, type, dev, fmt, ##args)
2804#define netif_err(priv, type, dev, fmt, args...) \
2805 netif_level(err, priv, type, dev, fmt, ##args)
2806#define netif_warn(priv, type, dev, fmt, args...) \
2807 netif_level(warn, priv, type, dev, fmt, ##args)
2808#define netif_notice(priv, type, dev, fmt, args...) \
2809 netif_level(notice, priv, type, dev, fmt, ##args)
2810#define netif_info(priv, type, dev, fmt, args...) \
2811 netif_level(info, priv, type, dev, fmt, ##args)
2812
2813#if defined(CONFIG_DYNAMIC_DEBUG)
2814#define netif_dbg(priv, type, netdev, format, args...) \
2815do { \
2816 if (netif_msg_##type(priv)) \
2817 dynamic_netdev_dbg(netdev, format, ##args); \
2818} while (0)
2819#elif defined(DEBUG)
2820#define netif_dbg(priv, type, dev, format, args...) \
2821 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2822#else
2823#define netif_dbg(priv, type, dev, format, args...) \
2824({ \
2825 if (0) \
2826 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2827 0; \
2828})
2829#endif
2830
2831#if defined(VERBOSE_DEBUG)
2832#define netif_vdbg netif_dbg
2833#else
2834#define netif_vdbg(priv, type, dev, format, args...) \
2835({ \
2836 if (0) \
2837 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2838 0; \
2839})
2840#endif
2841
2842#endif
2843
2844#endif
2845