1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/pm_qos.h>
29#include <linux/timer.h>
30#include <linux/bug.h>
31#include <linux/delay.h>
32#include <linux/atomic.h>
33#include <asm/cache.h>
34#include <asm/byteorder.h>
35
36#include <linux/percpu.h>
37#include <linux/rculist.h>
38#include <linux/dmaengine.h>
39#include <linux/workqueue.h>
40#include <linux/dynamic_queue_limits.h>
41
42#include <linux/ethtool.h>
43#include <net/net_namespace.h>
44#include <net/dsa.h>
45#ifdef CONFIG_DCB
46#include <net/dcbnl.h>
47#endif
48#include <net/netprio_cgroup.h>
49
50#include <linux/netdev_features.h>
51#include <linux/neighbour.h>
52#include <uapi/linux/netdevice.h>
53
54struct netpoll_info;
55struct device;
56struct phy_device;
57
58struct wireless_dev;
59
60#define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) )
62
63extern void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops);
65
66
67#define NET_ADDR_PERM 0
68#define NET_ADDR_RANDOM 1
69#define NET_ADDR_STOLEN 2
70#define NET_ADDR_SET 3
71
72
73
74#define NET_RX_SUCCESS 0
75#define NET_RX_DROP 1
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define NET_XMIT_SUCCESS 0x00
96#define NET_XMIT_DROP 0x01
97#define NET_XMIT_CN 0x02
98#define NET_XMIT_POLICED 0x03
99#define NET_XMIT_MASK 0x0f
100
101
102
103
104#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
105#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106
107
108#define NETDEV_TX_MASK 0xf0
109
110enum netdev_tx {
111 __NETDEV_TX_MIN = INT_MIN,
112 NETDEV_TX_OK = 0x00,
113 NETDEV_TX_BUSY = 0x10,
114 NETDEV_TX_LOCKED = 0x20,
115};
116typedef enum netdev_tx netdev_tx_t;
117
118
119
120
121
122static inline bool dev_xmit_complete(int rc)
123{
124
125
126
127
128
129
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
136
137
138
139
140
141#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
142# if defined(CONFIG_MAC80211_MESH)
143# define LL_MAX_HEADER 128
144# else
145# define LL_MAX_HEADER 96
146# endif
147#else
148# define LL_MAX_HEADER 32
149#endif
150
151#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
153#define MAX_HEADER LL_MAX_HEADER
154#else
155#define MAX_HEADER (LL_MAX_HEADER + 48)
156#endif
157
158
159
160
161
162
163struct net_device_stats {
164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_errors;
169 unsigned long tx_errors;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172 unsigned long multicast;
173 unsigned long collisions;
174 unsigned long rx_length_errors;
175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors;
180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors;
185 unsigned long rx_compressed;
186 unsigned long tx_compressed;
187};
188
189
190#include <linux/cache.h>
191#include <linux/skbuff.h>
192
193#ifdef CONFIG_RPS
194#include <linux/static_key.h>
195extern struct static_key rps_needed;
196#endif
197
198struct neighbour;
199struct neigh_parms;
200struct sk_buff;
201
202struct netdev_hw_addr {
203 struct list_head list;
204 unsigned char addr[MAX_ADDR_LEN];
205 unsigned char type;
206#define NETDEV_HW_ADDR_T_LAN 1
207#define NETDEV_HW_ADDR_T_SAN 2
208#define NETDEV_HW_ADDR_T_SLAVE 3
209#define NETDEV_HW_ADDR_T_UNICAST 4
210#define NETDEV_HW_ADDR_T_MULTICAST 5
211 bool global_use;
212 int sync_cnt;
213 int refcount;
214 int synced;
215 struct rcu_head rcu_head;
216};
217
218struct netdev_hw_addr_list {
219 struct list_head list;
220 int count;
221};
222
223#define netdev_hw_addr_list_count(l) ((l)->count)
224#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225#define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list)
227
228#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
230#define netdev_for_each_uc_addr(ha, dev) \
231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
232
233#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
235#define netdev_for_each_mc_addr(ha, dev) \
236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
237
238struct hh_cache {
239 u16 hh_len;
240 u16 __pad;
241 seqlock_t hh_lock;
242
243
244#define HH_DATA_MOD 16
245#define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247#define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250};
251
252
253
254
255
256
257
258
259
260#define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264
265struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
268 const void *saddr, unsigned int len);
269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*rebuild)(struct sk_buff *skb);
271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
272 void (*cache_update)(struct hh_cache *hh,
273 const struct net_device *dev,
274 const unsigned char *haddr);
275};
276
277
278
279
280
281
282enum netdev_state_t {
283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
285 __LINK_STATE_NOCARRIER,
286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
288};
289
290
291
292
293
294
295struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298};
299#define NETDEV_BOOT_SETUP_MAX 8
300
301extern int __init netdev_boot_setup(char *str);
302
303
304
305
306struct napi_struct {
307
308
309
310
311
312
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
317 unsigned int gro_count;
318 int (*poll)(struct napi_struct *, int);
319#ifdef CONFIG_NETPOLL
320 spinlock_t poll_lock;
321 int poll_owner;
322#endif
323 struct net_device *dev;
324 struct sk_buff *gro_list;
325 struct sk_buff *skb;
326 struct list_head dev_list;
327};
328
329enum {
330 NAPI_STATE_SCHED,
331 NAPI_STATE_DISABLE,
332 NAPI_STATE_NPSVC,
333};
334
335enum gro_result {
336 GRO_MERGED,
337 GRO_MERGED_FREE,
338 GRO_HELD,
339 GRO_NORMAL,
340 GRO_DROP,
341};
342typedef enum gro_result gro_result_t;
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385enum rx_handler_result {
386 RX_HANDLER_CONSUMED,
387 RX_HANDLER_ANOTHER,
388 RX_HANDLER_EXACT,
389 RX_HANDLER_PASS,
390};
391typedef enum rx_handler_result rx_handler_result_t;
392typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
393
394extern void __napi_schedule(struct napi_struct *n);
395
396static inline bool napi_disable_pending(struct napi_struct *n)
397{
398 return test_bit(NAPI_STATE_DISABLE, &n->state);
399}
400
401
402
403
404
405
406
407
408
409
410static inline bool napi_schedule_prep(struct napi_struct *n)
411{
412 return !napi_disable_pending(n) &&
413 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
414}
415
416
417
418
419
420
421
422
423static inline void napi_schedule(struct napi_struct *n)
424{
425 if (napi_schedule_prep(n))
426 __napi_schedule(n);
427}
428
429
430static inline bool napi_reschedule(struct napi_struct *napi)
431{
432 if (napi_schedule_prep(napi)) {
433 __napi_schedule(napi);
434 return true;
435 }
436 return false;
437}
438
439
440
441
442
443
444
445extern void __napi_complete(struct napi_struct *n);
446extern void napi_complete(struct napi_struct *n);
447
448
449
450
451
452
453
454
455static inline void napi_disable(struct napi_struct *n)
456{
457 set_bit(NAPI_STATE_DISABLE, &n->state);
458 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
459 msleep(1);
460 clear_bit(NAPI_STATE_DISABLE, &n->state);
461}
462
463
464
465
466
467
468
469
470static inline void napi_enable(struct napi_struct *n)
471{
472 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
473 smp_mb__before_clear_bit();
474 clear_bit(NAPI_STATE_SCHED, &n->state);
475}
476
477#ifdef CONFIG_SMP
478
479
480
481
482
483
484
485
486static inline void napi_synchronize(const struct napi_struct *n)
487{
488 while (test_bit(NAPI_STATE_SCHED, &n->state))
489 msleep(1);
490}
491#else
492# define napi_synchronize(n) barrier()
493#endif
494
495enum netdev_queue_state_t {
496 __QUEUE_STATE_DRV_XOFF,
497 __QUEUE_STATE_STACK_XOFF,
498 __QUEUE_STATE_FROZEN,
499#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
500 (1 << __QUEUE_STATE_STACK_XOFF))
501#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
502 (1 << __QUEUE_STATE_FROZEN))
503};
504
505
506
507
508
509
510
511
512
513
514struct netdev_queue {
515
516
517
518 struct net_device *dev;
519 struct Qdisc *qdisc;
520 struct Qdisc *qdisc_sleeping;
521#ifdef CONFIG_SYSFS
522 struct kobject kobj;
523#endif
524#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
525 int numa_node;
526#endif
527
528
529
530 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
531 int xmit_lock_owner;
532
533
534
535 unsigned long trans_start;
536
537
538
539
540
541 unsigned long trans_timeout;
542
543 unsigned long state;
544
545#ifdef CONFIG_BQL
546 struct dql dql;
547#endif
548} ____cacheline_aligned_in_smp;
549
550static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
551{
552#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
553 return q->numa_node;
554#else
555 return NUMA_NO_NODE;
556#endif
557}
558
559static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
560{
561#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
562 q->numa_node = node;
563#endif
564}
565
566#ifdef CONFIG_RPS
567
568
569
570
571struct rps_map {
572 unsigned int len;
573 struct rcu_head rcu;
574 u16 cpus[0];
575};
576#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
577
578
579
580
581
582
583struct rps_dev_flow {
584 u16 cpu;
585 u16 filter;
586 unsigned int last_qtail;
587};
588#define RPS_NO_FILTER 0xffff
589
590
591
592
593struct rps_dev_flow_table {
594 unsigned int mask;
595 struct rcu_head rcu;
596 struct rps_dev_flow flows[0];
597};
598#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
599 ((_num) * sizeof(struct rps_dev_flow)))
600
601
602
603
604
605struct rps_sock_flow_table {
606 unsigned int mask;
607 u16 ents[0];
608};
609#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
610 ((_num) * sizeof(u16)))
611
612#define RPS_NO_CPU 0xffff
613
614static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
615 u32 hash)
616{
617 if (table && hash) {
618 unsigned int cpu, index = hash & table->mask;
619
620
621 cpu = raw_smp_processor_id();
622
623 if (table->ents[index] != cpu)
624 table->ents[index] = cpu;
625 }
626}
627
628static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
629 u32 hash)
630{
631 if (table && hash)
632 table->ents[hash & table->mask] = RPS_NO_CPU;
633}
634
635extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
636
637#ifdef CONFIG_RFS_ACCEL
638extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
639 u32 flow_id, u16 filter_id);
640#endif
641
642
643struct netdev_rx_queue {
644 struct rps_map __rcu *rps_map;
645 struct rps_dev_flow_table __rcu *rps_flow_table;
646 struct kobject kobj;
647 struct net_device *dev;
648} ____cacheline_aligned_in_smp;
649#endif
650
651#ifdef CONFIG_XPS
652
653
654
655
656struct xps_map {
657 unsigned int len;
658 unsigned int alloc_len;
659 struct rcu_head rcu;
660 u16 queues[0];
661};
662#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
663#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
664 / sizeof(u16))
665
666
667
668
669struct xps_dev_maps {
670 struct rcu_head rcu;
671 struct xps_map __rcu *cpu_map[0];
672};
673#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
674 (nr_cpu_ids * sizeof(struct xps_map *)))
675#endif
676
677#define TC_MAX_QUEUE 16
678#define TC_BITMASK 15
679
680struct netdev_tc_txq {
681 u16 count;
682 u16 offset;
683};
684
685#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
686
687
688
689
690struct netdev_fcoe_hbainfo {
691 char manufacturer[64];
692 char serial_number[64];
693 char hardware_version[64];
694 char driver_version[64];
695 char optionrom_version[64];
696 char firmware_version[64];
697 char model[256];
698 char model_description[256];
699};
700#endif
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906struct net_device_ops {
907 int (*ndo_init)(struct net_device *dev);
908 void (*ndo_uninit)(struct net_device *dev);
909 int (*ndo_open)(struct net_device *dev);
910 int (*ndo_stop)(struct net_device *dev);
911 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
912 struct net_device *dev);
913 u16 (*ndo_select_queue)(struct net_device *dev,
914 struct sk_buff *skb);
915 void (*ndo_change_rx_flags)(struct net_device *dev,
916 int flags);
917 void (*ndo_set_rx_mode)(struct net_device *dev);
918 int (*ndo_set_mac_address)(struct net_device *dev,
919 void *addr);
920 int (*ndo_validate_addr)(struct net_device *dev);
921 int (*ndo_do_ioctl)(struct net_device *dev,
922 struct ifreq *ifr, int cmd);
923 int (*ndo_set_config)(struct net_device *dev,
924 struct ifmap *map);
925 int (*ndo_change_mtu)(struct net_device *dev,
926 int new_mtu);
927 int (*ndo_neigh_setup)(struct net_device *dev,
928 struct neigh_parms *);
929 void (*ndo_tx_timeout) (struct net_device *dev);
930
931 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
932 struct rtnl_link_stats64 *storage);
933 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
934
935 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
936 __be16 proto, u16 vid);
937 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
938 __be16 proto, u16 vid);
939#ifdef CONFIG_NET_POLL_CONTROLLER
940 void (*ndo_poll_controller)(struct net_device *dev);
941 int (*ndo_netpoll_setup)(struct net_device *dev,
942 struct netpoll_info *info,
943 gfp_t gfp);
944 void (*ndo_netpoll_cleanup)(struct net_device *dev);
945#endif
946 int (*ndo_set_vf_mac)(struct net_device *dev,
947 int queue, u8 *mac);
948 int (*ndo_set_vf_vlan)(struct net_device *dev,
949 int queue, u16 vlan, u8 qos);
950 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
951 int vf, int rate);
952 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
953 int vf, bool setting);
954 int (*ndo_get_vf_config)(struct net_device *dev,
955 int vf,
956 struct ifla_vf_info *ivf);
957 int (*ndo_set_vf_port)(struct net_device *dev,
958 int vf,
959 struct nlattr *port[]);
960 int (*ndo_get_vf_port)(struct net_device *dev,
961 int vf, struct sk_buff *skb);
962 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
963#if IS_ENABLED(CONFIG_FCOE)
964 int (*ndo_fcoe_enable)(struct net_device *dev);
965 int (*ndo_fcoe_disable)(struct net_device *dev);
966 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
967 u16 xid,
968 struct scatterlist *sgl,
969 unsigned int sgc);
970 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
971 u16 xid);
972 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
973 u16 xid,
974 struct scatterlist *sgl,
975 unsigned int sgc);
976 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
977 struct netdev_fcoe_hbainfo *hbainfo);
978#endif
979
980#if IS_ENABLED(CONFIG_LIBFCOE)
981#define NETDEV_FCOE_WWNN 0
982#define NETDEV_FCOE_WWPN 1
983 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
984 u64 *wwn, int type);
985#endif
986
987#ifdef CONFIG_RFS_ACCEL
988 int (*ndo_rx_flow_steer)(struct net_device *dev,
989 const struct sk_buff *skb,
990 u16 rxq_index,
991 u32 flow_id);
992#endif
993 int (*ndo_add_slave)(struct net_device *dev,
994 struct net_device *slave_dev);
995 int (*ndo_del_slave)(struct net_device *dev,
996 struct net_device *slave_dev);
997 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
998 netdev_features_t features);
999 int (*ndo_set_features)(struct net_device *dev,
1000 netdev_features_t features);
1001 int (*ndo_neigh_construct)(struct neighbour *n);
1002 void (*ndo_neigh_destroy)(struct neighbour *n);
1003
1004 int (*ndo_fdb_add)(struct ndmsg *ndm,
1005 struct nlattr *tb[],
1006 struct net_device *dev,
1007 const unsigned char *addr,
1008 u16 flags);
1009 int (*ndo_fdb_del)(struct ndmsg *ndm,
1010 struct nlattr *tb[],
1011 struct net_device *dev,
1012 const unsigned char *addr);
1013 int (*ndo_fdb_dump)(struct sk_buff *skb,
1014 struct netlink_callback *cb,
1015 struct net_device *dev,
1016 int idx);
1017
1018 int (*ndo_bridge_setlink)(struct net_device *dev,
1019 struct nlmsghdr *nlh);
1020 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1021 u32 pid, u32 seq,
1022 struct net_device *dev,
1023 u32 filter_mask);
1024 int (*ndo_bridge_dellink)(struct net_device *dev,
1025 struct nlmsghdr *nlh);
1026 int (*ndo_change_carrier)(struct net_device *dev,
1027 bool new_carrier);
1028};
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040struct net_device {
1041
1042
1043
1044
1045
1046
1047 char name[IFNAMSIZ];
1048
1049
1050 struct hlist_node name_hlist;
1051
1052
1053 char *ifalias;
1054
1055
1056
1057
1058
1059 unsigned long mem_end;
1060 unsigned long mem_start;
1061 unsigned long base_addr;
1062 unsigned int irq;
1063
1064
1065
1066
1067
1068
1069 unsigned long state;
1070
1071 struct list_head dev_list;
1072 struct list_head napi_list;
1073 struct list_head unreg_list;
1074 struct list_head upper_dev_list;
1075
1076
1077
1078 netdev_features_t features;
1079
1080 netdev_features_t hw_features;
1081
1082 netdev_features_t wanted_features;
1083
1084 netdev_features_t vlan_features;
1085
1086
1087
1088
1089
1090 netdev_features_t hw_enc_features;
1091
1092
1093 int ifindex;
1094 int iflink;
1095
1096 struct net_device_stats stats;
1097 atomic_long_t rx_dropped;
1098
1099
1100
1101#ifdef CONFIG_WIRELESS_EXT
1102
1103
1104 const struct iw_handler_def * wireless_handlers;
1105
1106 struct iw_public_data * wireless_data;
1107#endif
1108
1109 const struct net_device_ops *netdev_ops;
1110 const struct ethtool_ops *ethtool_ops;
1111
1112
1113 const struct header_ops *header_ops;
1114
1115 unsigned int flags;
1116 unsigned int priv_flags;
1117
1118 unsigned short gflags;
1119 unsigned short padded;
1120
1121 unsigned char operstate;
1122 unsigned char link_mode;
1123
1124 unsigned char if_port;
1125 unsigned char dma;
1126
1127 unsigned int mtu;
1128 unsigned short type;
1129 unsigned short hard_header_len;
1130
1131
1132
1133
1134
1135 unsigned short needed_headroom;
1136 unsigned short needed_tailroom;
1137
1138
1139 unsigned char perm_addr[MAX_ADDR_LEN];
1140 unsigned char addr_assign_type;
1141 unsigned char addr_len;
1142 unsigned char neigh_priv_len;
1143 unsigned short dev_id;
1144
1145 spinlock_t addr_list_lock;
1146 struct netdev_hw_addr_list uc;
1147 struct netdev_hw_addr_list mc;
1148 struct netdev_hw_addr_list dev_addrs;
1149
1150
1151#ifdef CONFIG_SYSFS
1152 struct kset *queues_kset;
1153#endif
1154
1155 bool uc_promisc;
1156 unsigned int promiscuity;
1157 unsigned int allmulti;
1158
1159
1160
1161
1162#if IS_ENABLED(CONFIG_VLAN_8021Q)
1163 struct vlan_info __rcu *vlan_info;
1164#endif
1165#if IS_ENABLED(CONFIG_NET_DSA)
1166 struct dsa_switch_tree *dsa_ptr;
1167#endif
1168 void *atalk_ptr;
1169 struct in_device __rcu *ip_ptr;
1170 struct dn_dev __rcu *dn_ptr;
1171 struct inet6_dev __rcu *ip6_ptr;
1172 void *ax25_ptr;
1173 struct wireless_dev *ieee80211_ptr;
1174
1175
1176
1177
1178
1179 unsigned long last_rx;
1180
1181
1182
1183
1184
1185
1186
1187
1188 unsigned char *dev_addr;
1189
1190
1191
1192
1193#ifdef CONFIG_RPS
1194 struct netdev_rx_queue *_rx;
1195
1196
1197 unsigned int num_rx_queues;
1198
1199
1200 unsigned int real_num_rx_queues;
1201
1202#endif
1203
1204 rx_handler_func_t __rcu *rx_handler;
1205 void __rcu *rx_handler_data;
1206
1207 struct netdev_queue __rcu *ingress_queue;
1208 unsigned char broadcast[MAX_ADDR_LEN];
1209
1210
1211
1212
1213
1214 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1215
1216
1217 unsigned int num_tx_queues;
1218
1219
1220 unsigned int real_num_tx_queues;
1221
1222
1223 struct Qdisc *qdisc;
1224
1225 unsigned long tx_queue_len;
1226 spinlock_t tx_global_lock;
1227
1228#ifdef CONFIG_XPS
1229 struct xps_dev_maps __rcu *xps_maps;
1230#endif
1231#ifdef CONFIG_RFS_ACCEL
1232
1233
1234
1235 struct cpu_rmap *rx_cpu_rmap;
1236#endif
1237
1238
1239
1240
1241
1242
1243
1244 unsigned long trans_start;
1245
1246 int watchdog_timeo;
1247 struct timer_list watchdog_timer;
1248
1249
1250 int __percpu *pcpu_refcnt;
1251
1252
1253 struct list_head todo_list;
1254
1255 struct hlist_node index_hlist;
1256
1257 struct list_head link_watch_list;
1258
1259
1260 enum { NETREG_UNINITIALIZED=0,
1261 NETREG_REGISTERED,
1262 NETREG_UNREGISTERING,
1263 NETREG_UNREGISTERED,
1264 NETREG_RELEASED,
1265 NETREG_DUMMY,
1266 } reg_state:8;
1267
1268 bool dismantle;
1269
1270 enum {
1271 RTNL_LINK_INITIALIZED,
1272 RTNL_LINK_INITIALIZING,
1273 } rtnl_link_state:16;
1274
1275
1276 void (*destructor)(struct net_device *dev);
1277
1278#ifdef CONFIG_NETPOLL
1279 struct netpoll_info __rcu *npinfo;
1280#endif
1281
1282#ifdef CONFIG_NET_NS
1283
1284 struct net *nd_net;
1285#endif
1286
1287
1288 union {
1289 void *ml_priv;
1290 struct pcpu_lstats __percpu *lstats;
1291 struct pcpu_tstats __percpu *tstats;
1292 struct pcpu_dstats __percpu *dstats;
1293 struct pcpu_vstats __percpu *vstats;
1294 };
1295
1296 struct garp_port __rcu *garp_port;
1297
1298 struct mrp_port __rcu *mrp_port;
1299
1300
1301 struct device dev;
1302
1303 const struct attribute_group *sysfs_groups[4];
1304
1305
1306 const struct rtnl_link_ops *rtnl_link_ops;
1307
1308
1309#define GSO_MAX_SIZE 65536
1310 unsigned int gso_max_size;
1311#define GSO_MAX_SEGS 65535
1312 u16 gso_max_segs;
1313
1314#ifdef CONFIG_DCB
1315
1316 const struct dcbnl_rtnl_ops *dcbnl_ops;
1317#endif
1318 u8 num_tc;
1319 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1320 u8 prio_tc_map[TC_BITMASK + 1];
1321
1322#if IS_ENABLED(CONFIG_FCOE)
1323
1324 unsigned int fcoe_ddp_xid;
1325#endif
1326#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1327 struct netprio_map __rcu *priomap;
1328#endif
1329
1330 struct phy_device *phydev;
1331
1332 struct lock_class_key *qdisc_tx_busylock;
1333
1334
1335 int group;
1336
1337 struct pm_qos_request pm_qos_req;
1338};
1339#define to_net_dev(d) container_of(d, struct net_device, dev)
1340
1341#define NETDEV_ALIGN 32
1342
1343static inline
1344int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1345{
1346 return dev->prio_tc_map[prio & TC_BITMASK];
1347}
1348
1349static inline
1350int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1351{
1352 if (tc >= dev->num_tc)
1353 return -EINVAL;
1354
1355 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1356 return 0;
1357}
1358
1359static inline
1360void netdev_reset_tc(struct net_device *dev)
1361{
1362 dev->num_tc = 0;
1363 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1364 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1365}
1366
1367static inline
1368int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1369{
1370 if (tc >= dev->num_tc)
1371 return -EINVAL;
1372
1373 dev->tc_to_txq[tc].count = count;
1374 dev->tc_to_txq[tc].offset = offset;
1375 return 0;
1376}
1377
1378static inline
1379int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1380{
1381 if (num_tc > TC_MAX_QUEUE)
1382 return -EINVAL;
1383
1384 dev->num_tc = num_tc;
1385 return 0;
1386}
1387
1388static inline
1389int netdev_get_num_tc(struct net_device *dev)
1390{
1391 return dev->num_tc;
1392}
1393
1394static inline
1395struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1396 unsigned int index)
1397{
1398 return &dev->_tx[index];
1399}
1400
1401static inline void netdev_for_each_tx_queue(struct net_device *dev,
1402 void (*f)(struct net_device *,
1403 struct netdev_queue *,
1404 void *),
1405 void *arg)
1406{
1407 unsigned int i;
1408
1409 for (i = 0; i < dev->num_tx_queues; i++)
1410 f(dev, &dev->_tx[i], arg);
1411}
1412
1413extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1414 struct sk_buff *skb);
1415extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1416
1417
1418
1419
1420static inline
1421struct net *dev_net(const struct net_device *dev)
1422{
1423 return read_pnet(&dev->nd_net);
1424}
1425
1426static inline
1427void dev_net_set(struct net_device *dev, struct net *net)
1428{
1429#ifdef CONFIG_NET_NS
1430 release_net(dev->nd_net);
1431 dev->nd_net = hold_net(net);
1432#endif
1433}
1434
1435static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1436{
1437#ifdef CONFIG_NET_DSA_TAG_DSA
1438 if (dev->dsa_ptr != NULL)
1439 return dsa_uses_dsa_tags(dev->dsa_ptr);
1440#endif
1441
1442 return 0;
1443}
1444
1445static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1446{
1447#ifdef CONFIG_NET_DSA_TAG_TRAILER
1448 if (dev->dsa_ptr != NULL)
1449 return dsa_uses_trailer_tags(dev->dsa_ptr);
1450#endif
1451
1452 return 0;
1453}
1454
1455
1456
1457
1458
1459
1460
1461static inline void *netdev_priv(const struct net_device *dev)
1462{
1463 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1464}
1465
1466
1467
1468
1469#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1470
1471
1472
1473
1474
1475#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1476
1477
1478
1479
1480#define NAPI_POLL_WEIGHT 64
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1493 int (*poll)(struct napi_struct *, int), int weight);
1494
1495
1496
1497
1498
1499
1500
1501void netif_napi_del(struct napi_struct *napi);
1502
1503struct napi_gro_cb {
1504
1505 void *frag0;
1506
1507
1508 unsigned int frag0_len;
1509
1510
1511 int data_offset;
1512
1513
1514 int flush;
1515
1516
1517 u16 count;
1518
1519
1520 u8 same_flow;
1521
1522
1523 u8 free;
1524#define NAPI_GRO_FREE 1
1525#define NAPI_GRO_FREE_STOLEN_HEAD 2
1526
1527
1528 unsigned long age;
1529
1530
1531 int proto;
1532
1533
1534 struct sk_buff *last;
1535};
1536
1537#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1538
1539struct packet_type {
1540 __be16 type;
1541 struct net_device *dev;
1542 int (*func) (struct sk_buff *,
1543 struct net_device *,
1544 struct packet_type *,
1545 struct net_device *);
1546 bool (*id_match)(struct packet_type *ptype,
1547 struct sock *sk);
1548 void *af_packet_priv;
1549 struct list_head list;
1550};
1551
1552struct offload_callbacks {
1553 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1554 netdev_features_t features);
1555 int (*gso_send_check)(struct sk_buff *skb);
1556 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1557 struct sk_buff *skb);
1558 int (*gro_complete)(struct sk_buff *skb);
1559};
1560
1561struct packet_offload {
1562 __be16 type;
1563 struct offload_callbacks callbacks;
1564 struct list_head list;
1565};
1566
1567#include <linux/notifier.h>
1568
1569
1570
1571
1572
1573#define NETDEV_UP 0x0001
1574#define NETDEV_DOWN 0x0002
1575#define NETDEV_REBOOT 0x0003
1576
1577
1578
1579#define NETDEV_CHANGE 0x0004
1580#define NETDEV_REGISTER 0x0005
1581#define NETDEV_UNREGISTER 0x0006
1582#define NETDEV_CHANGEMTU 0x0007
1583#define NETDEV_CHANGEADDR 0x0008
1584#define NETDEV_GOING_DOWN 0x0009
1585#define NETDEV_CHANGENAME 0x000A
1586#define NETDEV_FEAT_CHANGE 0x000B
1587#define NETDEV_BONDING_FAILOVER 0x000C
1588#define NETDEV_PRE_UP 0x000D
1589#define NETDEV_PRE_TYPE_CHANGE 0x000E
1590#define NETDEV_POST_TYPE_CHANGE 0x000F
1591#define NETDEV_POST_INIT 0x0010
1592#define NETDEV_UNREGISTER_FINAL 0x0011
1593#define NETDEV_RELEASE 0x0012
1594#define NETDEV_NOTIFY_PEERS 0x0013
1595#define NETDEV_JOIN 0x0014
1596
1597extern int register_netdevice_notifier(struct notifier_block *nb);
1598extern int unregister_netdevice_notifier(struct notifier_block *nb);
1599extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1600
1601
1602extern rwlock_t dev_base_lock;
1603
1604extern seqcount_t devnet_rename_seq;
1605
1606
1607#define for_each_netdev(net, d) \
1608 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1609#define for_each_netdev_reverse(net, d) \
1610 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1611#define for_each_netdev_rcu(net, d) \
1612 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1613#define for_each_netdev_safe(net, d, n) \
1614 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1615#define for_each_netdev_continue(net, d) \
1616 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1617#define for_each_netdev_continue_rcu(net, d) \
1618 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1619#define for_each_netdev_in_bond_rcu(bond, slave) \
1620 for_each_netdev_rcu(&init_net, slave) \
1621 if (netdev_master_upper_dev_get_rcu(slave) == bond)
1622#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1623
1624static inline struct net_device *next_net_device(struct net_device *dev)
1625{
1626 struct list_head *lh;
1627 struct net *net;
1628
1629 net = dev_net(dev);
1630 lh = dev->dev_list.next;
1631 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1632}
1633
1634static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1635{
1636 struct list_head *lh;
1637 struct net *net;
1638
1639 net = dev_net(dev);
1640 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1641 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1642}
1643
1644static inline struct net_device *first_net_device(struct net *net)
1645{
1646 return list_empty(&net->dev_base_head) ? NULL :
1647 net_device_entry(net->dev_base_head.next);
1648}
1649
1650static inline struct net_device *first_net_device_rcu(struct net *net)
1651{
1652 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1653
1654 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1655}
1656
1657extern int netdev_boot_setup_check(struct net_device *dev);
1658extern unsigned long netdev_boot_base(const char *prefix, int unit);
1659extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1660 const char *hwaddr);
1661extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1662extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1663extern void dev_add_pack(struct packet_type *pt);
1664extern void dev_remove_pack(struct packet_type *pt);
1665extern void __dev_remove_pack(struct packet_type *pt);
1666extern void dev_add_offload(struct packet_offload *po);
1667extern void dev_remove_offload(struct packet_offload *po);
1668extern void __dev_remove_offload(struct packet_offload *po);
1669
1670extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1671 unsigned short mask);
1672extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1673extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1674extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1675extern int dev_alloc_name(struct net_device *dev, const char *name);
1676extern int dev_open(struct net_device *dev);
1677extern int dev_close(struct net_device *dev);
1678extern void dev_disable_lro(struct net_device *dev);
1679extern int dev_loopback_xmit(struct sk_buff *newskb);
1680extern int dev_queue_xmit(struct sk_buff *skb);
1681extern int register_netdevice(struct net_device *dev);
1682extern void unregister_netdevice_queue(struct net_device *dev,
1683 struct list_head *head);
1684extern void unregister_netdevice_many(struct list_head *head);
1685static inline void unregister_netdevice(struct net_device *dev)
1686{
1687 unregister_netdevice_queue(dev, NULL);
1688}
1689
1690extern int netdev_refcnt_read(const struct net_device *dev);
1691extern void free_netdev(struct net_device *dev);
1692extern void synchronize_net(void);
1693extern int init_dummy_netdev(struct net_device *dev);
1694
1695extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1696extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1697extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1698extern int netdev_get_name(struct net *net, char *name, int ifindex);
1699extern int dev_restart(struct net_device *dev);
1700#ifdef CONFIG_NETPOLL_TRAP
1701extern int netpoll_trap(void);
1702#endif
1703extern int skb_gro_receive(struct sk_buff **head,
1704 struct sk_buff *skb);
1705
1706static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1707{
1708 return NAPI_GRO_CB(skb)->data_offset;
1709}
1710
1711static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1712{
1713 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1714}
1715
1716static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1717{
1718 NAPI_GRO_CB(skb)->data_offset += len;
1719}
1720
1721static inline void *skb_gro_header_fast(struct sk_buff *skb,
1722 unsigned int offset)
1723{
1724 return NAPI_GRO_CB(skb)->frag0 + offset;
1725}
1726
1727static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1728{
1729 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1730}
1731
1732static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1733 unsigned int offset)
1734{
1735 if (!pskb_may_pull(skb, hlen))
1736 return NULL;
1737
1738 NAPI_GRO_CB(skb)->frag0 = NULL;
1739 NAPI_GRO_CB(skb)->frag0_len = 0;
1740 return skb->data + offset;
1741}
1742
1743static inline void *skb_gro_mac_header(struct sk_buff *skb)
1744{
1745 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1746}
1747
1748static inline void *skb_gro_network_header(struct sk_buff *skb)
1749{
1750 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1751 skb_network_offset(skb);
1752}
1753
1754static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1755 unsigned short type,
1756 const void *daddr, const void *saddr,
1757 unsigned int len)
1758{
1759 if (!dev->header_ops || !dev->header_ops->create)
1760 return 0;
1761
1762 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1763}
1764
1765static inline int dev_parse_header(const struct sk_buff *skb,
1766 unsigned char *haddr)
1767{
1768 const struct net_device *dev = skb->dev;
1769
1770 if (!dev->header_ops || !dev->header_ops->parse)
1771 return 0;
1772 return dev->header_ops->parse(skb, haddr);
1773}
1774
1775typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1776extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1777static inline int unregister_gifconf(unsigned int family)
1778{
1779 return register_gifconf(family, NULL);
1780}
1781
1782
1783
1784
1785struct softnet_data {
1786 struct Qdisc *output_queue;
1787 struct Qdisc **output_queue_tailp;
1788 struct list_head poll_list;
1789 struct sk_buff *completion_queue;
1790 struct sk_buff_head process_queue;
1791
1792
1793 unsigned int processed;
1794 unsigned int time_squeeze;
1795 unsigned int cpu_collision;
1796 unsigned int received_rps;
1797
1798#ifdef CONFIG_RPS
1799 struct softnet_data *rps_ipi_list;
1800
1801
1802 struct call_single_data csd ____cacheline_aligned_in_smp;
1803 struct softnet_data *rps_ipi_next;
1804 unsigned int cpu;
1805 unsigned int input_queue_head;
1806 unsigned int input_queue_tail;
1807#endif
1808 unsigned int dropped;
1809 struct sk_buff_head input_pkt_queue;
1810 struct napi_struct backlog;
1811};
1812
1813static inline void input_queue_head_incr(struct softnet_data *sd)
1814{
1815#ifdef CONFIG_RPS
1816 sd->input_queue_head++;
1817#endif
1818}
1819
1820static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1821 unsigned int *qtail)
1822{
1823#ifdef CONFIG_RPS
1824 *qtail = ++sd->input_queue_tail;
1825#endif
1826}
1827
1828DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1829
1830extern void __netif_schedule(struct Qdisc *q);
1831
1832static inline void netif_schedule_queue(struct netdev_queue *txq)
1833{
1834 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1835 __netif_schedule(txq->qdisc);
1836}
1837
1838static inline void netif_tx_schedule_all(struct net_device *dev)
1839{
1840 unsigned int i;
1841
1842 for (i = 0; i < dev->num_tx_queues; i++)
1843 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1844}
1845
1846static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1847{
1848 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1849}
1850
1851
1852
1853
1854
1855
1856
1857static inline void netif_start_queue(struct net_device *dev)
1858{
1859 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1860}
1861
1862static inline void netif_tx_start_all_queues(struct net_device *dev)
1863{
1864 unsigned int i;
1865
1866 for (i = 0; i < dev->num_tx_queues; i++) {
1867 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1868 netif_tx_start_queue(txq);
1869 }
1870}
1871
1872static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1873{
1874#ifdef CONFIG_NETPOLL_TRAP
1875 if (netpoll_trap()) {
1876 netif_tx_start_queue(dev_queue);
1877 return;
1878 }
1879#endif
1880 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
1881 __netif_schedule(dev_queue->qdisc);
1882}
1883
1884
1885
1886
1887
1888
1889
1890
1891static inline void netif_wake_queue(struct net_device *dev)
1892{
1893 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1894}
1895
1896static inline void netif_tx_wake_all_queues(struct net_device *dev)
1897{
1898 unsigned int i;
1899
1900 for (i = 0; i < dev->num_tx_queues; i++) {
1901 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1902 netif_tx_wake_queue(txq);
1903 }
1904}
1905
1906static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1907{
1908 if (WARN_ON(!dev_queue)) {
1909 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1910 return;
1911 }
1912 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1913}
1914
1915
1916
1917
1918
1919
1920
1921
1922static inline void netif_stop_queue(struct net_device *dev)
1923{
1924 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1925}
1926
1927static inline void netif_tx_stop_all_queues(struct net_device *dev)
1928{
1929 unsigned int i;
1930
1931 for (i = 0; i < dev->num_tx_queues; i++) {
1932 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1933 netif_tx_stop_queue(txq);
1934 }
1935}
1936
1937static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1938{
1939 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1940}
1941
1942
1943
1944
1945
1946
1947
1948static inline bool netif_queue_stopped(const struct net_device *dev)
1949{
1950 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1951}
1952
1953static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
1954{
1955 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1956}
1957
1958static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
1959{
1960 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1961}
1962
1963static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
1964 unsigned int bytes)
1965{
1966#ifdef CONFIG_BQL
1967 dql_queued(&dev_queue->dql, bytes);
1968
1969 if (likely(dql_avail(&dev_queue->dql) >= 0))
1970 return;
1971
1972 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1973
1974
1975
1976
1977
1978
1979 smp_mb();
1980
1981
1982 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
1983 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1984#endif
1985}
1986
1987static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
1988{
1989 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
1990}
1991
1992static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1993 unsigned int pkts, unsigned int bytes)
1994{
1995#ifdef CONFIG_BQL
1996 if (unlikely(!bytes))
1997 return;
1998
1999 dql_completed(&dev_queue->dql, bytes);
2000
2001
2002
2003
2004
2005
2006 smp_mb();
2007
2008 if (dql_avail(&dev_queue->dql) < 0)
2009 return;
2010
2011 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2012 netif_schedule_queue(dev_queue);
2013#endif
2014}
2015
2016static inline void netdev_completed_queue(struct net_device *dev,
2017 unsigned int pkts, unsigned int bytes)
2018{
2019 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2020}
2021
2022static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2023{
2024#ifdef CONFIG_BQL
2025 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
2026 dql_reset(&q->dql);
2027#endif
2028}
2029
2030static inline void netdev_reset_queue(struct net_device *dev_queue)
2031{
2032 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
2033}
2034
2035
2036
2037
2038
2039
2040
2041static inline bool netif_running(const struct net_device *dev)
2042{
2043 return test_bit(__LINK_STATE_START, &dev->state);
2044}
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2061{
2062 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2063
2064 netif_tx_start_queue(txq);
2065}
2066
2067
2068
2069
2070
2071
2072
2073
2074static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2075{
2076 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2077#ifdef CONFIG_NETPOLL_TRAP
2078 if (netpoll_trap())
2079 return;
2080#endif
2081 netif_tx_stop_queue(txq);
2082}
2083
2084
2085
2086
2087
2088
2089
2090
2091static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2092 u16 queue_index)
2093{
2094 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2095
2096 return netif_tx_queue_stopped(txq);
2097}
2098
2099static inline bool netif_subqueue_stopped(const struct net_device *dev,
2100 struct sk_buff *skb)
2101{
2102 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2103}
2104
2105
2106
2107
2108
2109
2110
2111
2112static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2113{
2114 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2115#ifdef CONFIG_NETPOLL_TRAP
2116 if (netpoll_trap())
2117 return;
2118#endif
2119 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2120 __netif_schedule(txq->qdisc);
2121}
2122
2123#ifdef CONFIG_XPS
2124extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
2125 u16 index);
2126#else
2127static inline int netif_set_xps_queue(struct net_device *dev,
2128 struct cpumask *mask,
2129 u16 index)
2130{
2131 return 0;
2132}
2133#endif
2134
2135
2136
2137
2138
2139static inline u16 skb_tx_hash(const struct net_device *dev,
2140 const struct sk_buff *skb)
2141{
2142 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2143}
2144
2145
2146
2147
2148
2149
2150
2151static inline bool netif_is_multiqueue(const struct net_device *dev)
2152{
2153 return dev->num_tx_queues > 1;
2154}
2155
2156extern int netif_set_real_num_tx_queues(struct net_device *dev,
2157 unsigned int txq);
2158
2159#ifdef CONFIG_RPS
2160extern int netif_set_real_num_rx_queues(struct net_device *dev,
2161 unsigned int rxq);
2162#else
2163static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2164 unsigned int rxq)
2165{
2166 return 0;
2167}
2168#endif
2169
2170static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2171 const struct net_device *from_dev)
2172{
2173 int err;
2174
2175 err = netif_set_real_num_tx_queues(to_dev,
2176 from_dev->real_num_tx_queues);
2177 if (err)
2178 return err;
2179#ifdef CONFIG_RPS
2180 return netif_set_real_num_rx_queues(to_dev,
2181 from_dev->real_num_rx_queues);
2182#else
2183 return 0;
2184#endif
2185}
2186
2187#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2188extern int netif_get_num_default_rss_queues(void);
2189
2190
2191
2192
2193
2194extern void dev_kfree_skb_irq(struct sk_buff *skb);
2195
2196
2197
2198
2199
2200extern void dev_kfree_skb_any(struct sk_buff *skb);
2201
2202extern int netif_rx(struct sk_buff *skb);
2203extern int netif_rx_ni(struct sk_buff *skb);
2204extern int netif_receive_skb(struct sk_buff *skb);
2205extern gro_result_t napi_gro_receive(struct napi_struct *napi,
2206 struct sk_buff *skb);
2207extern void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2208extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
2209extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2210
2211static inline void napi_free_frags(struct napi_struct *napi)
2212{
2213 kfree_skb(napi->skb);
2214 napi->skb = NULL;
2215}
2216
2217extern int netdev_rx_handler_register(struct net_device *dev,
2218 rx_handler_func_t *rx_handler,
2219 void *rx_handler_data);
2220extern void netdev_rx_handler_unregister(struct net_device *dev);
2221
2222extern bool dev_valid_name(const char *name);
2223extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2224extern int dev_ethtool(struct net *net, struct ifreq *);
2225extern unsigned int dev_get_flags(const struct net_device *);
2226extern int __dev_change_flags(struct net_device *, unsigned int flags);
2227extern int dev_change_flags(struct net_device *, unsigned int);
2228extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2229extern int dev_change_name(struct net_device *, const char *);
2230extern int dev_set_alias(struct net_device *, const char *, size_t);
2231extern int dev_change_net_namespace(struct net_device *,
2232 struct net *, const char *);
2233extern int dev_set_mtu(struct net_device *, int);
2234extern void dev_set_group(struct net_device *, int);
2235extern int dev_set_mac_address(struct net_device *,
2236 struct sockaddr *);
2237extern int dev_change_carrier(struct net_device *,
2238 bool new_carrier);
2239extern int dev_hard_start_xmit(struct sk_buff *skb,
2240 struct net_device *dev,
2241 struct netdev_queue *txq);
2242extern int dev_forward_skb(struct net_device *dev,
2243 struct sk_buff *skb);
2244
2245extern int netdev_budget;
2246
2247
2248extern void netdev_run_todo(void);
2249
2250
2251
2252
2253
2254
2255
2256static inline void dev_put(struct net_device *dev)
2257{
2258 this_cpu_dec(*dev->pcpu_refcnt);
2259}
2260
2261
2262
2263
2264
2265
2266
2267static inline void dev_hold(struct net_device *dev)
2268{
2269 this_cpu_inc(*dev->pcpu_refcnt);
2270}
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281extern void linkwatch_init_dev(struct net_device *dev);
2282extern void linkwatch_fire_event(struct net_device *dev);
2283extern void linkwatch_forget_dev(struct net_device *dev);
2284
2285
2286
2287
2288
2289
2290
2291static inline bool netif_carrier_ok(const struct net_device *dev)
2292{
2293 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2294}
2295
2296extern unsigned long dev_trans_start(struct net_device *dev);
2297
2298extern void __netdev_watchdog_up(struct net_device *dev);
2299
2300extern void netif_carrier_on(struct net_device *dev);
2301
2302extern void netif_carrier_off(struct net_device *dev);
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317static inline void netif_dormant_on(struct net_device *dev)
2318{
2319 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2320 linkwatch_fire_event(dev);
2321}
2322
2323
2324
2325
2326
2327
2328
2329static inline void netif_dormant_off(struct net_device *dev)
2330{
2331 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2332 linkwatch_fire_event(dev);
2333}
2334
2335
2336
2337
2338
2339
2340
2341static inline bool netif_dormant(const struct net_device *dev)
2342{
2343 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2344}
2345
2346
2347
2348
2349
2350
2351
2352
2353static inline bool netif_oper_up(const struct net_device *dev)
2354{
2355 return (dev->operstate == IF_OPER_UP ||
2356 dev->operstate == IF_OPER_UNKNOWN );
2357}
2358
2359
2360
2361
2362
2363
2364
2365static inline bool netif_device_present(struct net_device *dev)
2366{
2367 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2368}
2369
2370extern void netif_device_detach(struct net_device *dev);
2371
2372extern void netif_device_attach(struct net_device *dev);
2373
2374
2375
2376
2377
2378enum {
2379 NETIF_MSG_DRV = 0x0001,
2380 NETIF_MSG_PROBE = 0x0002,
2381 NETIF_MSG_LINK = 0x0004,
2382 NETIF_MSG_TIMER = 0x0008,
2383 NETIF_MSG_IFDOWN = 0x0010,
2384 NETIF_MSG_IFUP = 0x0020,
2385 NETIF_MSG_RX_ERR = 0x0040,
2386 NETIF_MSG_TX_ERR = 0x0080,
2387 NETIF_MSG_TX_QUEUED = 0x0100,
2388 NETIF_MSG_INTR = 0x0200,
2389 NETIF_MSG_TX_DONE = 0x0400,
2390 NETIF_MSG_RX_STATUS = 0x0800,
2391 NETIF_MSG_PKTDATA = 0x1000,
2392 NETIF_MSG_HW = 0x2000,
2393 NETIF_MSG_WOL = 0x4000,
2394};
2395
2396#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2397#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2398#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2399#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2400#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2401#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2402#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2403#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2404#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2405#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2406#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2407#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2408#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2409#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2410#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2411
2412static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2413{
2414
2415 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2416 return default_msg_enable_bits;
2417 if (debug_value == 0)
2418 return 0;
2419
2420 return (1 << debug_value) - 1;
2421}
2422
2423static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2424{
2425 spin_lock(&txq->_xmit_lock);
2426 txq->xmit_lock_owner = cpu;
2427}
2428
2429static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2430{
2431 spin_lock_bh(&txq->_xmit_lock);
2432 txq->xmit_lock_owner = smp_processor_id();
2433}
2434
2435static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2436{
2437 bool ok = spin_trylock(&txq->_xmit_lock);
2438 if (likely(ok))
2439 txq->xmit_lock_owner = smp_processor_id();
2440 return ok;
2441}
2442
2443static inline void __netif_tx_unlock(struct netdev_queue *txq)
2444{
2445 txq->xmit_lock_owner = -1;
2446 spin_unlock(&txq->_xmit_lock);
2447}
2448
2449static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2450{
2451 txq->xmit_lock_owner = -1;
2452 spin_unlock_bh(&txq->_xmit_lock);
2453}
2454
2455static inline void txq_trans_update(struct netdev_queue *txq)
2456{
2457 if (txq->xmit_lock_owner != -1)
2458 txq->trans_start = jiffies;
2459}
2460
2461
2462
2463
2464
2465
2466
2467static inline void netif_tx_lock(struct net_device *dev)
2468{
2469 unsigned int i;
2470 int cpu;
2471
2472 spin_lock(&dev->tx_global_lock);
2473 cpu = smp_processor_id();
2474 for (i = 0; i < dev->num_tx_queues; i++) {
2475 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2476
2477
2478
2479
2480
2481
2482
2483 __netif_tx_lock(txq, cpu);
2484 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2485 __netif_tx_unlock(txq);
2486 }
2487}
2488
2489static inline void netif_tx_lock_bh(struct net_device *dev)
2490{
2491 local_bh_disable();
2492 netif_tx_lock(dev);
2493}
2494
2495static inline void netif_tx_unlock(struct net_device *dev)
2496{
2497 unsigned int i;
2498
2499 for (i = 0; i < dev->num_tx_queues; i++) {
2500 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2501
2502
2503
2504
2505
2506 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2507 netif_schedule_queue(txq);
2508 }
2509 spin_unlock(&dev->tx_global_lock);
2510}
2511
2512static inline void netif_tx_unlock_bh(struct net_device *dev)
2513{
2514 netif_tx_unlock(dev);
2515 local_bh_enable();
2516}
2517
2518#define HARD_TX_LOCK(dev, txq, cpu) { \
2519 if ((dev->features & NETIF_F_LLTX) == 0) { \
2520 __netif_tx_lock(txq, cpu); \
2521 } \
2522}
2523
2524#define HARD_TX_UNLOCK(dev, txq) { \
2525 if ((dev->features & NETIF_F_LLTX) == 0) { \
2526 __netif_tx_unlock(txq); \
2527 } \
2528}
2529
2530static inline void netif_tx_disable(struct net_device *dev)
2531{
2532 unsigned int i;
2533 int cpu;
2534
2535 local_bh_disable();
2536 cpu = smp_processor_id();
2537 for (i = 0; i < dev->num_tx_queues; i++) {
2538 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2539
2540 __netif_tx_lock(txq, cpu);
2541 netif_tx_stop_queue(txq);
2542 __netif_tx_unlock(txq);
2543 }
2544 local_bh_enable();
2545}
2546
2547static inline void netif_addr_lock(struct net_device *dev)
2548{
2549 spin_lock(&dev->addr_list_lock);
2550}
2551
2552static inline void netif_addr_lock_nested(struct net_device *dev)
2553{
2554 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2555}
2556
2557static inline void netif_addr_lock_bh(struct net_device *dev)
2558{
2559 spin_lock_bh(&dev->addr_list_lock);
2560}
2561
2562static inline void netif_addr_unlock(struct net_device *dev)
2563{
2564 spin_unlock(&dev->addr_list_lock);
2565}
2566
2567static inline void netif_addr_unlock_bh(struct net_device *dev)
2568{
2569 spin_unlock_bh(&dev->addr_list_lock);
2570}
2571
2572
2573
2574
2575
2576#define for_each_dev_addr(dev, ha) \
2577 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2578
2579
2580
2581extern void ether_setup(struct net_device *dev);
2582
2583
2584extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2585 void (*setup)(struct net_device *),
2586 unsigned int txqs, unsigned int rxqs);
2587#define alloc_netdev(sizeof_priv, name, setup) \
2588 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2589
2590#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2591 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2592
2593extern int register_netdev(struct net_device *dev);
2594extern void unregister_netdev(struct net_device *dev);
2595
2596
2597extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2598 struct netdev_hw_addr_list *from_list,
2599 int addr_len, unsigned char addr_type);
2600extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2601 struct netdev_hw_addr_list *from_list,
2602 int addr_len, unsigned char addr_type);
2603extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2604 struct netdev_hw_addr_list *from_list,
2605 int addr_len);
2606extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2607 struct netdev_hw_addr_list *from_list,
2608 int addr_len);
2609extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2610extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2611
2612
2613extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
2614 unsigned char addr_type);
2615extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
2616 unsigned char addr_type);
2617extern int dev_addr_add_multiple(struct net_device *to_dev,
2618 struct net_device *from_dev,
2619 unsigned char addr_type);
2620extern int dev_addr_del_multiple(struct net_device *to_dev,
2621 struct net_device *from_dev,
2622 unsigned char addr_type);
2623extern void dev_addr_flush(struct net_device *dev);
2624extern int dev_addr_init(struct net_device *dev);
2625
2626
2627extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2628extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2629extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
2630extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2631extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
2632extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2633extern void dev_uc_flush(struct net_device *dev);
2634extern void dev_uc_init(struct net_device *dev);
2635
2636
2637extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2638extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2639extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2640extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2641extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
2642extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2643extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
2644extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2645extern void dev_mc_flush(struct net_device *dev);
2646extern void dev_mc_init(struct net_device *dev);
2647
2648
2649extern void dev_set_rx_mode(struct net_device *dev);
2650extern void __dev_set_rx_mode(struct net_device *dev);
2651extern int dev_set_promiscuity(struct net_device *dev, int inc);
2652extern int dev_set_allmulti(struct net_device *dev, int inc);
2653extern void netdev_state_change(struct net_device *dev);
2654extern void netdev_notify_peers(struct net_device *dev);
2655extern void netdev_features_change(struct net_device *dev);
2656
2657extern void dev_load(struct net *net, const char *name);
2658extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2659 struct rtnl_link_stats64 *storage);
2660extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2661 const struct net_device_stats *netdev_stats);
2662
2663extern int netdev_max_backlog;
2664extern int netdev_tstamp_prequeue;
2665extern int weight_p;
2666extern int bpf_jit_enable;
2667
2668extern bool netdev_has_upper_dev(struct net_device *dev,
2669 struct net_device *upper_dev);
2670extern bool netdev_has_any_upper_dev(struct net_device *dev);
2671extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2672extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
2673extern int netdev_upper_dev_link(struct net_device *dev,
2674 struct net_device *upper_dev);
2675extern int netdev_master_upper_dev_link(struct net_device *dev,
2676 struct net_device *upper_dev);
2677extern void netdev_upper_dev_unlink(struct net_device *dev,
2678 struct net_device *upper_dev);
2679extern int skb_checksum_help(struct sk_buff *skb);
2680extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2681 netdev_features_t features, bool tx_path);
2682extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2683 netdev_features_t features);
2684
2685static inline
2686struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
2687{
2688 return __skb_gso_segment(skb, features, true);
2689}
2690__be16 skb_network_protocol(struct sk_buff *skb);
2691
2692static inline bool can_checksum_protocol(netdev_features_t features,
2693 __be16 protocol)
2694{
2695 return ((features & NETIF_F_GEN_CSUM) ||
2696 ((features & NETIF_F_V4_CSUM) &&
2697 protocol == htons(ETH_P_IP)) ||
2698 ((features & NETIF_F_V6_CSUM) &&
2699 protocol == htons(ETH_P_IPV6)) ||
2700 ((features & NETIF_F_FCOE_CRC) &&
2701 protocol == htons(ETH_P_FCOE)));
2702}
2703
2704#ifdef CONFIG_BUG
2705extern void netdev_rx_csum_fault(struct net_device *dev);
2706#else
2707static inline void netdev_rx_csum_fault(struct net_device *dev)
2708{
2709}
2710#endif
2711
2712extern void net_enable_timestamp(void);
2713extern void net_disable_timestamp(void);
2714
2715#ifdef CONFIG_PROC_FS
2716extern int __init dev_proc_init(void);
2717#else
2718#define dev_proc_init() 0
2719#endif
2720
2721extern int netdev_class_create_file(struct class_attribute *class_attr);
2722extern void netdev_class_remove_file(struct class_attribute *class_attr);
2723
2724extern struct kobj_ns_type_operations net_ns_type_operations;
2725
2726extern const char *netdev_drivername(const struct net_device *dev);
2727
2728extern void linkwatch_run_queue(void);
2729
2730static inline netdev_features_t netdev_get_wanted_features(
2731 struct net_device *dev)
2732{
2733 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2734}
2735netdev_features_t netdev_increment_features(netdev_features_t all,
2736 netdev_features_t one, netdev_features_t mask);
2737
2738
2739
2740
2741
2742static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
2743 netdev_features_t mask)
2744{
2745 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
2746}
2747
2748int __netdev_update_features(struct net_device *dev);
2749void netdev_update_features(struct net_device *dev);
2750void netdev_change_features(struct net_device *dev);
2751
2752void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2753 struct net_device *dev);
2754
2755netdev_features_t netif_skb_features(struct sk_buff *skb);
2756
2757static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2758{
2759 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2760
2761
2762 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2763 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2764 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2765 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2766 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2767 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2768
2769 return (features & feature) == feature;
2770}
2771
2772static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2773{
2774 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2775 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2776}
2777
2778static inline bool netif_needs_gso(struct sk_buff *skb,
2779 netdev_features_t features)
2780{
2781 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2782 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2783 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
2784}
2785
2786static inline void netif_set_gso_max_size(struct net_device *dev,
2787 unsigned int size)
2788{
2789 dev->gso_max_size = size;
2790}
2791
2792static inline bool netif_is_bond_master(struct net_device *dev)
2793{
2794 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
2795}
2796
2797static inline bool netif_is_bond_slave(struct net_device *dev)
2798{
2799 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2800}
2801
2802static inline bool netif_supports_nofcs(struct net_device *dev)
2803{
2804 return dev->priv_flags & IFF_SUPP_NOFCS;
2805}
2806
2807extern struct pernet_operations __net_initdata loopback_net_ops;
2808
2809
2810
2811
2812
2813static inline const char *netdev_name(const struct net_device *dev)
2814{
2815 if (dev->reg_state != NETREG_REGISTERED)
2816 return "(unregistered net_device)";
2817 return dev->name;
2818}
2819
2820extern __printf(3, 4)
2821int netdev_printk(const char *level, const struct net_device *dev,
2822 const char *format, ...);
2823extern __printf(2, 3)
2824int netdev_emerg(const struct net_device *dev, const char *format, ...);
2825extern __printf(2, 3)
2826int netdev_alert(const struct net_device *dev, const char *format, ...);
2827extern __printf(2, 3)
2828int netdev_crit(const struct net_device *dev, const char *format, ...);
2829extern __printf(2, 3)
2830int netdev_err(const struct net_device *dev, const char *format, ...);
2831extern __printf(2, 3)
2832int netdev_warn(const struct net_device *dev, const char *format, ...);
2833extern __printf(2, 3)
2834int netdev_notice(const struct net_device *dev, const char *format, ...);
2835extern __printf(2, 3)
2836int netdev_info(const struct net_device *dev, const char *format, ...);
2837
2838#define MODULE_ALIAS_NETDEV(device) \
2839 MODULE_ALIAS("netdev-" device)
2840
2841#if defined(CONFIG_DYNAMIC_DEBUG)
2842#define netdev_dbg(__dev, format, args...) \
2843do { \
2844 dynamic_netdev_dbg(__dev, format, ##args); \
2845} while (0)
2846#elif defined(DEBUG)
2847#define netdev_dbg(__dev, format, args...) \
2848 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2849#else
2850#define netdev_dbg(__dev, format, args...) \
2851({ \
2852 if (0) \
2853 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2854 0; \
2855})
2856#endif
2857
2858#if defined(VERBOSE_DEBUG)
2859#define netdev_vdbg netdev_dbg
2860#else
2861
2862#define netdev_vdbg(dev, format, args...) \
2863({ \
2864 if (0) \
2865 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2866 0; \
2867})
2868#endif
2869
2870
2871
2872
2873
2874
2875#define netdev_WARN(dev, format, args...) \
2876 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2877
2878
2879
2880#define netif_printk(priv, type, level, dev, fmt, args...) \
2881do { \
2882 if (netif_msg_##type(priv)) \
2883 netdev_printk(level, (dev), fmt, ##args); \
2884} while (0)
2885
2886#define netif_level(level, priv, type, dev, fmt, args...) \
2887do { \
2888 if (netif_msg_##type(priv)) \
2889 netdev_##level(dev, fmt, ##args); \
2890} while (0)
2891
2892#define netif_emerg(priv, type, dev, fmt, args...) \
2893 netif_level(emerg, priv, type, dev, fmt, ##args)
2894#define netif_alert(priv, type, dev, fmt, args...) \
2895 netif_level(alert, priv, type, dev, fmt, ##args)
2896#define netif_crit(priv, type, dev, fmt, args...) \
2897 netif_level(crit, priv, type, dev, fmt, ##args)
2898#define netif_err(priv, type, dev, fmt, args...) \
2899 netif_level(err, priv, type, dev, fmt, ##args)
2900#define netif_warn(priv, type, dev, fmt, args...) \
2901 netif_level(warn, priv, type, dev, fmt, ##args)
2902#define netif_notice(priv, type, dev, fmt, args...) \
2903 netif_level(notice, priv, type, dev, fmt, ##args)
2904#define netif_info(priv, type, dev, fmt, args...) \
2905 netif_level(info, priv, type, dev, fmt, ##args)
2906
2907#if defined(CONFIG_DYNAMIC_DEBUG)
2908#define netif_dbg(priv, type, netdev, format, args...) \
2909do { \
2910 if (netif_msg_##type(priv)) \
2911 dynamic_netdev_dbg(netdev, format, ##args); \
2912} while (0)
2913#elif defined(DEBUG)
2914#define netif_dbg(priv, type, dev, format, args...) \
2915 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2916#else
2917#define netif_dbg(priv, type, dev, format, args...) \
2918({ \
2919 if (0) \
2920 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2921 0; \
2922})
2923#endif
2924
2925#if defined(VERBOSE_DEBUG)
2926#define netif_vdbg netif_dbg
2927#else
2928#define netif_vdbg(priv, type, dev, format, args...) \
2929({ \
2930 if (0) \
2931 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2932 0; \
2933})
2934#endif
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963#define PTYPE_HASH_SIZE (16)
2964#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
2965
2966#endif
2967