1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/pm_qos.h>
29#include <linux/timer.h>
30#include <linux/bug.h>
31#include <linux/delay.h>
32#include <linux/atomic.h>
33#include <asm/cache.h>
34#include <asm/byteorder.h>
35
36#include <linux/percpu.h>
37#include <linux/rculist.h>
38#include <linux/dmaengine.h>
39#include <linux/workqueue.h>
40#include <linux/dynamic_queue_limits.h>
41
42#include <linux/ethtool.h>
43#include <net/net_namespace.h>
44#include <net/dsa.h>
45#ifdef CONFIG_DCB
46#include <net/dcbnl.h>
47#endif
48#include <net/netprio_cgroup.h>
49
50#include <linux/netdev_features.h>
51#include <linux/neighbour.h>
52#include <uapi/linux/netdevice.h>
53
54struct netpoll_info;
55struct device;
56struct phy_device;
57
58struct wireless_dev;
59
60#define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) )
62
63extern void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops);
65
66
67#define NET_ADDR_PERM 0
68#define NET_ADDR_RANDOM 1
69#define NET_ADDR_STOLEN 2
70#define NET_ADDR_SET 3
71
72
73
74#define NET_RX_SUCCESS 0
75#define NET_RX_DROP 1
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define NET_XMIT_SUCCESS 0x00
96#define NET_XMIT_DROP 0x01
97#define NET_XMIT_CN 0x02
98#define NET_XMIT_POLICED 0x03
99#define NET_XMIT_MASK 0x0f
100
101
102
103
104#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
105#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106
107
108#define NETDEV_TX_MASK 0xf0
109
110enum netdev_tx {
111 __NETDEV_TX_MIN = INT_MIN,
112 NETDEV_TX_OK = 0x00,
113 NETDEV_TX_BUSY = 0x10,
114 NETDEV_TX_LOCKED = 0x20,
115};
116typedef enum netdev_tx netdev_tx_t;
117
118
119
120
121
122static inline bool dev_xmit_complete(int rc)
123{
124
125
126
127
128
129
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
136
137
138
139
140
141#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
142# if defined(CONFIG_MAC80211_MESH)
143# define LL_MAX_HEADER 128
144# else
145# define LL_MAX_HEADER 96
146# endif
147#else
148# define LL_MAX_HEADER 32
149#endif
150
151#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
153#define MAX_HEADER LL_MAX_HEADER
154#else
155#define MAX_HEADER (LL_MAX_HEADER + 48)
156#endif
157
158
159
160
161
162
163struct net_device_stats {
164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_errors;
169 unsigned long tx_errors;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172 unsigned long multicast;
173 unsigned long collisions;
174 unsigned long rx_length_errors;
175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors;
180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors;
185 unsigned long rx_compressed;
186 unsigned long tx_compressed;
187};
188
189
190#include <linux/cache.h>
191#include <linux/skbuff.h>
192
193#ifdef CONFIG_RPS
194#include <linux/static_key.h>
195extern struct static_key rps_needed;
196#endif
197
198struct neighbour;
199struct neigh_parms;
200struct sk_buff;
201
202struct netdev_hw_addr {
203 struct list_head list;
204 unsigned char addr[MAX_ADDR_LEN];
205 unsigned char type;
206#define NETDEV_HW_ADDR_T_LAN 1
207#define NETDEV_HW_ADDR_T_SAN 2
208#define NETDEV_HW_ADDR_T_SLAVE 3
209#define NETDEV_HW_ADDR_T_UNICAST 4
210#define NETDEV_HW_ADDR_T_MULTICAST 5
211 bool global_use;
212 int sync_cnt;
213 int refcount;
214 int synced;
215 struct rcu_head rcu_head;
216};
217
218struct netdev_hw_addr_list {
219 struct list_head list;
220 int count;
221};
222
223#define netdev_hw_addr_list_count(l) ((l)->count)
224#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225#define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list)
227
228#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
230#define netdev_for_each_uc_addr(ha, dev) \
231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
232
233#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
235#define netdev_for_each_mc_addr(ha, dev) \
236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
237
238struct hh_cache {
239 u16 hh_len;
240 u16 __pad;
241 seqlock_t hh_lock;
242
243
244#define HH_DATA_MOD 16
245#define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247#define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250};
251
252
253
254
255
256
257
258
259
260#define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264
265struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
268 const void *saddr, unsigned int len);
269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*rebuild)(struct sk_buff *skb);
271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
272 void (*cache_update)(struct hh_cache *hh,
273 const struct net_device *dev,
274 const unsigned char *haddr);
275};
276
277
278
279
280
281
282enum netdev_state_t {
283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
285 __LINK_STATE_NOCARRIER,
286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
288};
289
290
291
292
293
294
295struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298};
299#define NETDEV_BOOT_SETUP_MAX 8
300
301extern int __init netdev_boot_setup(char *str);
302
303
304
305
306struct napi_struct {
307
308
309
310
311
312
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
317 unsigned int gro_count;
318 int (*poll)(struct napi_struct *, int);
319#ifdef CONFIG_NETPOLL
320 spinlock_t poll_lock;
321 int poll_owner;
322#endif
323 struct net_device *dev;
324 struct sk_buff *gro_list;
325 struct sk_buff *skb;
326 struct list_head dev_list;
327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
329};
330
331enum {
332 NAPI_STATE_SCHED,
333 NAPI_STATE_DISABLE,
334 NAPI_STATE_NPSVC,
335 NAPI_STATE_HASHED,
336};
337
338enum gro_result {
339 GRO_MERGED,
340 GRO_MERGED_FREE,
341 GRO_HELD,
342 GRO_NORMAL,
343 GRO_DROP,
344};
345typedef enum gro_result gro_result_t;
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388enum rx_handler_result {
389 RX_HANDLER_CONSUMED,
390 RX_HANDLER_ANOTHER,
391 RX_HANDLER_EXACT,
392 RX_HANDLER_PASS,
393};
394typedef enum rx_handler_result rx_handler_result_t;
395typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
396
397extern void __napi_schedule(struct napi_struct *n);
398
399static inline bool napi_disable_pending(struct napi_struct *n)
400{
401 return test_bit(NAPI_STATE_DISABLE, &n->state);
402}
403
404
405
406
407
408
409
410
411
412
413static inline bool napi_schedule_prep(struct napi_struct *n)
414{
415 return !napi_disable_pending(n) &&
416 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
417}
418
419
420
421
422
423
424
425
426static inline void napi_schedule(struct napi_struct *n)
427{
428 if (napi_schedule_prep(n))
429 __napi_schedule(n);
430}
431
432
433static inline bool napi_reschedule(struct napi_struct *napi)
434{
435 if (napi_schedule_prep(napi)) {
436 __napi_schedule(napi);
437 return true;
438 }
439 return false;
440}
441
442
443
444
445
446
447
448extern void __napi_complete(struct napi_struct *n);
449extern void napi_complete(struct napi_struct *n);
450
451
452
453
454
455
456
457
458extern struct napi_struct *napi_by_id(unsigned int napi_id);
459
460
461
462
463
464
465
466extern void napi_hash_add(struct napi_struct *napi);
467
468
469
470
471
472
473
474
475extern void napi_hash_del(struct napi_struct *napi);
476
477
478
479
480
481
482
483
484static inline void napi_disable(struct napi_struct *n)
485{
486 set_bit(NAPI_STATE_DISABLE, &n->state);
487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
488 msleep(1);
489 clear_bit(NAPI_STATE_DISABLE, &n->state);
490}
491
492
493
494
495
496
497
498
499static inline void napi_enable(struct napi_struct *n)
500{
501 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
502 smp_mb__before_clear_bit();
503 clear_bit(NAPI_STATE_SCHED, &n->state);
504}
505
506#ifdef CONFIG_SMP
507
508
509
510
511
512
513
514
515static inline void napi_synchronize(const struct napi_struct *n)
516{
517 while (test_bit(NAPI_STATE_SCHED, &n->state))
518 msleep(1);
519}
520#else
521# define napi_synchronize(n) barrier()
522#endif
523
524enum netdev_queue_state_t {
525 __QUEUE_STATE_DRV_XOFF,
526 __QUEUE_STATE_STACK_XOFF,
527 __QUEUE_STATE_FROZEN,
528#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
529 (1 << __QUEUE_STATE_STACK_XOFF))
530#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
531 (1 << __QUEUE_STATE_FROZEN))
532};
533
534
535
536
537
538
539
540
541
542
543struct netdev_queue {
544
545
546
547 struct net_device *dev;
548 struct Qdisc *qdisc;
549 struct Qdisc *qdisc_sleeping;
550#ifdef CONFIG_SYSFS
551 struct kobject kobj;
552#endif
553#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
554 int numa_node;
555#endif
556
557
558
559 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
560 int xmit_lock_owner;
561
562
563
564 unsigned long trans_start;
565
566
567
568
569
570 unsigned long trans_timeout;
571
572 unsigned long state;
573
574#ifdef CONFIG_BQL
575 struct dql dql;
576#endif
577} ____cacheline_aligned_in_smp;
578
579static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
580{
581#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
582 return q->numa_node;
583#else
584 return NUMA_NO_NODE;
585#endif
586}
587
588static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
589{
590#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
591 q->numa_node = node;
592#endif
593}
594
595#ifdef CONFIG_RPS
596
597
598
599
600struct rps_map {
601 unsigned int len;
602 struct rcu_head rcu;
603 u16 cpus[0];
604};
605#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
606
607
608
609
610
611
612struct rps_dev_flow {
613 u16 cpu;
614 u16 filter;
615 unsigned int last_qtail;
616};
617#define RPS_NO_FILTER 0xffff
618
619
620
621
622struct rps_dev_flow_table {
623 unsigned int mask;
624 struct rcu_head rcu;
625 struct rps_dev_flow flows[0];
626};
627#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
628 ((_num) * sizeof(struct rps_dev_flow)))
629
630
631
632
633
634struct rps_sock_flow_table {
635 unsigned int mask;
636 u16 ents[0];
637};
638#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
639 ((_num) * sizeof(u16)))
640
641#define RPS_NO_CPU 0xffff
642
643static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
644 u32 hash)
645{
646 if (table && hash) {
647 unsigned int cpu, index = hash & table->mask;
648
649
650 cpu = raw_smp_processor_id();
651
652 if (table->ents[index] != cpu)
653 table->ents[index] = cpu;
654 }
655}
656
657static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
658 u32 hash)
659{
660 if (table && hash)
661 table->ents[hash & table->mask] = RPS_NO_CPU;
662}
663
664extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
665
666#ifdef CONFIG_RFS_ACCEL
667extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
668 u32 flow_id, u16 filter_id);
669#endif
670
671
672struct netdev_rx_queue {
673 struct rps_map __rcu *rps_map;
674 struct rps_dev_flow_table __rcu *rps_flow_table;
675 struct kobject kobj;
676 struct net_device *dev;
677} ____cacheline_aligned_in_smp;
678#endif
679
680#ifdef CONFIG_XPS
681
682
683
684
685struct xps_map {
686 unsigned int len;
687 unsigned int alloc_len;
688 struct rcu_head rcu;
689 u16 queues[0];
690};
691#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
692#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
693 / sizeof(u16))
694
695
696
697
698struct xps_dev_maps {
699 struct rcu_head rcu;
700 struct xps_map __rcu *cpu_map[0];
701};
702#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
703 (nr_cpu_ids * sizeof(struct xps_map *)))
704#endif
705
706#define TC_MAX_QUEUE 16
707#define TC_BITMASK 15
708
709struct netdev_tc_txq {
710 u16 count;
711 u16 offset;
712};
713
714#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
715
716
717
718
719struct netdev_fcoe_hbainfo {
720 char manufacturer[64];
721 char serial_number[64];
722 char hardware_version[64];
723 char driver_version[64];
724 char optionrom_version[64];
725 char firmware_version[64];
726 char model[256];
727 char model_description[256];
728};
729#endif
730
731#define MAX_PHYS_PORT_ID_LEN 32
732
733
734
735
736struct netdev_phys_port_id {
737 unsigned char id[MAX_PHYS_PORT_ID_LEN];
738 unsigned char id_len;
739};
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965struct net_device_ops {
966 int (*ndo_init)(struct net_device *dev);
967 void (*ndo_uninit)(struct net_device *dev);
968 int (*ndo_open)(struct net_device *dev);
969 int (*ndo_stop)(struct net_device *dev);
970 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
971 struct net_device *dev);
972 u16 (*ndo_select_queue)(struct net_device *dev,
973 struct sk_buff *skb);
974 void (*ndo_change_rx_flags)(struct net_device *dev,
975 int flags);
976 void (*ndo_set_rx_mode)(struct net_device *dev);
977 int (*ndo_set_mac_address)(struct net_device *dev,
978 void *addr);
979 int (*ndo_validate_addr)(struct net_device *dev);
980 int (*ndo_do_ioctl)(struct net_device *dev,
981 struct ifreq *ifr, int cmd);
982 int (*ndo_set_config)(struct net_device *dev,
983 struct ifmap *map);
984 int (*ndo_change_mtu)(struct net_device *dev,
985 int new_mtu);
986 int (*ndo_neigh_setup)(struct net_device *dev,
987 struct neigh_parms *);
988 void (*ndo_tx_timeout) (struct net_device *dev);
989
990 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
991 struct rtnl_link_stats64 *storage);
992 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
993
994 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
995 __be16 proto, u16 vid);
996 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
997 __be16 proto, u16 vid);
998#ifdef CONFIG_NET_POLL_CONTROLLER
999 void (*ndo_poll_controller)(struct net_device *dev);
1000 int (*ndo_netpoll_setup)(struct net_device *dev,
1001 struct netpoll_info *info,
1002 gfp_t gfp);
1003 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1004#endif
1005#ifdef CONFIG_NET_RX_BUSY_POLL
1006 int (*ndo_busy_poll)(struct napi_struct *dev);
1007#endif
1008 int (*ndo_set_vf_mac)(struct net_device *dev,
1009 int queue, u8 *mac);
1010 int (*ndo_set_vf_vlan)(struct net_device *dev,
1011 int queue, u16 vlan, u8 qos);
1012 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
1013 int vf, int rate);
1014 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1015 int vf, bool setting);
1016 int (*ndo_get_vf_config)(struct net_device *dev,
1017 int vf,
1018 struct ifla_vf_info *ivf);
1019 int (*ndo_set_vf_link_state)(struct net_device *dev,
1020 int vf, int link_state);
1021 int (*ndo_set_vf_port)(struct net_device *dev,
1022 int vf,
1023 struct nlattr *port[]);
1024 int (*ndo_get_vf_port)(struct net_device *dev,
1025 int vf, struct sk_buff *skb);
1026 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
1027#if IS_ENABLED(CONFIG_FCOE)
1028 int (*ndo_fcoe_enable)(struct net_device *dev);
1029 int (*ndo_fcoe_disable)(struct net_device *dev);
1030 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1031 u16 xid,
1032 struct scatterlist *sgl,
1033 unsigned int sgc);
1034 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1035 u16 xid);
1036 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1037 u16 xid,
1038 struct scatterlist *sgl,
1039 unsigned int sgc);
1040 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1041 struct netdev_fcoe_hbainfo *hbainfo);
1042#endif
1043
1044#if IS_ENABLED(CONFIG_LIBFCOE)
1045#define NETDEV_FCOE_WWNN 0
1046#define NETDEV_FCOE_WWPN 1
1047 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1048 u64 *wwn, int type);
1049#endif
1050
1051#ifdef CONFIG_RFS_ACCEL
1052 int (*ndo_rx_flow_steer)(struct net_device *dev,
1053 const struct sk_buff *skb,
1054 u16 rxq_index,
1055 u32 flow_id);
1056#endif
1057 int (*ndo_add_slave)(struct net_device *dev,
1058 struct net_device *slave_dev);
1059 int (*ndo_del_slave)(struct net_device *dev,
1060 struct net_device *slave_dev);
1061 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1062 netdev_features_t features);
1063 int (*ndo_set_features)(struct net_device *dev,
1064 netdev_features_t features);
1065 int (*ndo_neigh_construct)(struct neighbour *n);
1066 void (*ndo_neigh_destroy)(struct neighbour *n);
1067
1068 int (*ndo_fdb_add)(struct ndmsg *ndm,
1069 struct nlattr *tb[],
1070 struct net_device *dev,
1071 const unsigned char *addr,
1072 u16 flags);
1073 int (*ndo_fdb_del)(struct ndmsg *ndm,
1074 struct nlattr *tb[],
1075 struct net_device *dev,
1076 const unsigned char *addr);
1077 int (*ndo_fdb_dump)(struct sk_buff *skb,
1078 struct netlink_callback *cb,
1079 struct net_device *dev,
1080 int idx);
1081
1082 int (*ndo_bridge_setlink)(struct net_device *dev,
1083 struct nlmsghdr *nlh);
1084 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1085 u32 pid, u32 seq,
1086 struct net_device *dev,
1087 u32 filter_mask);
1088 int (*ndo_bridge_dellink)(struct net_device *dev,
1089 struct nlmsghdr *nlh);
1090 int (*ndo_change_carrier)(struct net_device *dev,
1091 bool new_carrier);
1092 int (*ndo_get_phys_port_id)(struct net_device *dev,
1093 struct netdev_phys_port_id *ppid);
1094 void (*ndo_add_vxlan_port)(struct net_device *dev,
1095 sa_family_t sa_family,
1096 __be16 port);
1097 void (*ndo_del_vxlan_port)(struct net_device *dev,
1098 sa_family_t sa_family,
1099 __be16 port);
1100};
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112struct net_device {
1113
1114
1115
1116
1117
1118
1119 char name[IFNAMSIZ];
1120
1121
1122 struct hlist_node name_hlist;
1123
1124
1125 char *ifalias;
1126
1127
1128
1129
1130
1131 unsigned long mem_end;
1132 unsigned long mem_start;
1133 unsigned long base_addr;
1134 unsigned int irq;
1135
1136
1137
1138
1139
1140
1141 unsigned long state;
1142
1143 struct list_head dev_list;
1144 struct list_head napi_list;
1145 struct list_head unreg_list;
1146 struct list_head upper_dev_list;
1147 struct list_head lower_dev_list;
1148
1149
1150
1151 netdev_features_t features;
1152
1153 netdev_features_t hw_features;
1154
1155 netdev_features_t wanted_features;
1156
1157 netdev_features_t vlan_features;
1158
1159
1160
1161
1162
1163 netdev_features_t hw_enc_features;
1164
1165 netdev_features_t mpls_features;
1166
1167
1168 int ifindex;
1169 int iflink;
1170
1171 struct net_device_stats stats;
1172 atomic_long_t rx_dropped;
1173
1174
1175
1176#ifdef CONFIG_WIRELESS_EXT
1177
1178
1179 const struct iw_handler_def * wireless_handlers;
1180
1181 struct iw_public_data * wireless_data;
1182#endif
1183
1184 const struct net_device_ops *netdev_ops;
1185 const struct ethtool_ops *ethtool_ops;
1186
1187
1188 const struct header_ops *header_ops;
1189
1190 unsigned int flags;
1191 unsigned int priv_flags;
1192
1193 unsigned short gflags;
1194 unsigned short padded;
1195
1196 unsigned char operstate;
1197 unsigned char link_mode;
1198
1199 unsigned char if_port;
1200 unsigned char dma;
1201
1202 unsigned int mtu;
1203 unsigned short type;
1204 unsigned short hard_header_len;
1205
1206
1207
1208
1209
1210 unsigned short needed_headroom;
1211 unsigned short needed_tailroom;
1212
1213
1214 unsigned char perm_addr[MAX_ADDR_LEN];
1215 unsigned char addr_assign_type;
1216 unsigned char addr_len;
1217 unsigned char neigh_priv_len;
1218 unsigned short dev_id;
1219
1220
1221
1222 spinlock_t addr_list_lock;
1223 struct netdev_hw_addr_list uc;
1224 struct netdev_hw_addr_list mc;
1225 struct netdev_hw_addr_list dev_addrs;
1226
1227
1228#ifdef CONFIG_SYSFS
1229 struct kset *queues_kset;
1230#endif
1231
1232 bool uc_promisc;
1233 unsigned int promiscuity;
1234 unsigned int allmulti;
1235
1236
1237
1238
1239#if IS_ENABLED(CONFIG_VLAN_8021Q)
1240 struct vlan_info __rcu *vlan_info;
1241#endif
1242#if IS_ENABLED(CONFIG_NET_DSA)
1243 struct dsa_switch_tree *dsa_ptr;
1244#endif
1245 void *atalk_ptr;
1246 struct in_device __rcu *ip_ptr;
1247 struct dn_dev __rcu *dn_ptr;
1248 struct inet6_dev __rcu *ip6_ptr;
1249 void *ax25_ptr;
1250 struct wireless_dev *ieee80211_ptr;
1251
1252
1253
1254
1255
1256 unsigned long last_rx;
1257
1258
1259
1260
1261
1262
1263
1264
1265 unsigned char *dev_addr;
1266
1267
1268
1269
1270#ifdef CONFIG_RPS
1271 struct netdev_rx_queue *_rx;
1272
1273
1274 unsigned int num_rx_queues;
1275
1276
1277 unsigned int real_num_rx_queues;
1278
1279#endif
1280
1281 rx_handler_func_t __rcu *rx_handler;
1282 void __rcu *rx_handler_data;
1283
1284 struct netdev_queue __rcu *ingress_queue;
1285 unsigned char broadcast[MAX_ADDR_LEN];
1286
1287
1288
1289
1290
1291 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1292
1293
1294 unsigned int num_tx_queues;
1295
1296
1297 unsigned int real_num_tx_queues;
1298
1299
1300 struct Qdisc *qdisc;
1301
1302 unsigned long tx_queue_len;
1303 spinlock_t tx_global_lock;
1304
1305#ifdef CONFIG_XPS
1306 struct xps_dev_maps __rcu *xps_maps;
1307#endif
1308#ifdef CONFIG_RFS_ACCEL
1309
1310
1311
1312 struct cpu_rmap *rx_cpu_rmap;
1313#endif
1314
1315
1316
1317
1318
1319
1320
1321 unsigned long trans_start;
1322
1323 int watchdog_timeo;
1324 struct timer_list watchdog_timer;
1325
1326
1327 int __percpu *pcpu_refcnt;
1328
1329
1330 struct list_head todo_list;
1331
1332 struct hlist_node index_hlist;
1333
1334 struct list_head link_watch_list;
1335
1336
1337 enum { NETREG_UNINITIALIZED=0,
1338 NETREG_REGISTERED,
1339 NETREG_UNREGISTERING,
1340 NETREG_UNREGISTERED,
1341 NETREG_RELEASED,
1342 NETREG_DUMMY,
1343 } reg_state:8;
1344
1345 bool dismantle;
1346
1347 enum {
1348 RTNL_LINK_INITIALIZED,
1349 RTNL_LINK_INITIALIZING,
1350 } rtnl_link_state:16;
1351
1352
1353 void (*destructor)(struct net_device *dev);
1354
1355#ifdef CONFIG_NETPOLL
1356 struct netpoll_info __rcu *npinfo;
1357#endif
1358
1359#ifdef CONFIG_NET_NS
1360
1361 struct net *nd_net;
1362#endif
1363
1364
1365 union {
1366 void *ml_priv;
1367 struct pcpu_lstats __percpu *lstats;
1368 struct pcpu_tstats __percpu *tstats;
1369 struct pcpu_dstats __percpu *dstats;
1370 struct pcpu_vstats __percpu *vstats;
1371 };
1372
1373 struct garp_port __rcu *garp_port;
1374
1375 struct mrp_port __rcu *mrp_port;
1376
1377
1378 struct device dev;
1379
1380 const struct attribute_group *sysfs_groups[4];
1381
1382
1383 const struct rtnl_link_ops *rtnl_link_ops;
1384
1385
1386#define GSO_MAX_SIZE 65536
1387 unsigned int gso_max_size;
1388#define GSO_MAX_SEGS 65535
1389 u16 gso_max_segs;
1390
1391#ifdef CONFIG_DCB
1392
1393 const struct dcbnl_rtnl_ops *dcbnl_ops;
1394#endif
1395 u8 num_tc;
1396 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1397 u8 prio_tc_map[TC_BITMASK + 1];
1398
1399#if IS_ENABLED(CONFIG_FCOE)
1400
1401 unsigned int fcoe_ddp_xid;
1402#endif
1403#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1404 struct netprio_map __rcu *priomap;
1405#endif
1406
1407 struct phy_device *phydev;
1408
1409 struct lock_class_key *qdisc_tx_busylock;
1410
1411
1412 int group;
1413
1414 struct pm_qos_request pm_qos_req;
1415};
1416#define to_net_dev(d) container_of(d, struct net_device, dev)
1417
1418#define NETDEV_ALIGN 32
1419
1420static inline
1421int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1422{
1423 return dev->prio_tc_map[prio & TC_BITMASK];
1424}
1425
1426static inline
1427int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1428{
1429 if (tc >= dev->num_tc)
1430 return -EINVAL;
1431
1432 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1433 return 0;
1434}
1435
1436static inline
1437void netdev_reset_tc(struct net_device *dev)
1438{
1439 dev->num_tc = 0;
1440 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1441 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1442}
1443
1444static inline
1445int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1446{
1447 if (tc >= dev->num_tc)
1448 return -EINVAL;
1449
1450 dev->tc_to_txq[tc].count = count;
1451 dev->tc_to_txq[tc].offset = offset;
1452 return 0;
1453}
1454
1455static inline
1456int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1457{
1458 if (num_tc > TC_MAX_QUEUE)
1459 return -EINVAL;
1460
1461 dev->num_tc = num_tc;
1462 return 0;
1463}
1464
1465static inline
1466int netdev_get_num_tc(struct net_device *dev)
1467{
1468 return dev->num_tc;
1469}
1470
1471static inline
1472struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1473 unsigned int index)
1474{
1475 return &dev->_tx[index];
1476}
1477
1478static inline void netdev_for_each_tx_queue(struct net_device *dev,
1479 void (*f)(struct net_device *,
1480 struct netdev_queue *,
1481 void *),
1482 void *arg)
1483{
1484 unsigned int i;
1485
1486 for (i = 0; i < dev->num_tx_queues; i++)
1487 f(dev, &dev->_tx[i], arg);
1488}
1489
1490extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1491 struct sk_buff *skb);
1492extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1493
1494
1495
1496
1497static inline
1498struct net *dev_net(const struct net_device *dev)
1499{
1500 return read_pnet(&dev->nd_net);
1501}
1502
1503static inline
1504void dev_net_set(struct net_device *dev, struct net *net)
1505{
1506#ifdef CONFIG_NET_NS
1507 release_net(dev->nd_net);
1508 dev->nd_net = hold_net(net);
1509#endif
1510}
1511
1512static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1513{
1514#ifdef CONFIG_NET_DSA_TAG_DSA
1515 if (dev->dsa_ptr != NULL)
1516 return dsa_uses_dsa_tags(dev->dsa_ptr);
1517#endif
1518
1519 return 0;
1520}
1521
1522static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1523{
1524#ifdef CONFIG_NET_DSA_TAG_TRAILER
1525 if (dev->dsa_ptr != NULL)
1526 return dsa_uses_trailer_tags(dev->dsa_ptr);
1527#endif
1528
1529 return 0;
1530}
1531
1532
1533
1534
1535
1536
1537
1538static inline void *netdev_priv(const struct net_device *dev)
1539{
1540 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1541}
1542
1543
1544
1545
1546#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1547
1548
1549
1550
1551
1552#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1553
1554
1555
1556
1557#define NAPI_POLL_WEIGHT 64
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1570 int (*poll)(struct napi_struct *, int), int weight);
1571
1572
1573
1574
1575
1576
1577
1578void netif_napi_del(struct napi_struct *napi);
1579
1580struct napi_gro_cb {
1581
1582 void *frag0;
1583
1584
1585 unsigned int frag0_len;
1586
1587
1588 int data_offset;
1589
1590
1591 int flush;
1592
1593
1594 u16 count;
1595
1596
1597 u8 same_flow;
1598
1599
1600 u8 free;
1601#define NAPI_GRO_FREE 1
1602#define NAPI_GRO_FREE_STOLEN_HEAD 2
1603
1604
1605 unsigned long age;
1606
1607
1608 int proto;
1609
1610
1611 struct sk_buff *last;
1612};
1613
1614#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1615
1616struct packet_type {
1617 __be16 type;
1618 struct net_device *dev;
1619 int (*func) (struct sk_buff *,
1620 struct net_device *,
1621 struct packet_type *,
1622 struct net_device *);
1623 bool (*id_match)(struct packet_type *ptype,
1624 struct sock *sk);
1625 void *af_packet_priv;
1626 struct list_head list;
1627};
1628
1629struct offload_callbacks {
1630 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1631 netdev_features_t features);
1632 int (*gso_send_check)(struct sk_buff *skb);
1633 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1634 struct sk_buff *skb);
1635 int (*gro_complete)(struct sk_buff *skb);
1636};
1637
1638struct packet_offload {
1639 __be16 type;
1640 struct offload_callbacks callbacks;
1641 struct list_head list;
1642};
1643
1644#include <linux/notifier.h>
1645
1646
1647
1648
1649
1650#define NETDEV_UP 0x0001
1651#define NETDEV_DOWN 0x0002
1652#define NETDEV_REBOOT 0x0003
1653
1654
1655
1656#define NETDEV_CHANGE 0x0004
1657#define NETDEV_REGISTER 0x0005
1658#define NETDEV_UNREGISTER 0x0006
1659#define NETDEV_CHANGEMTU 0x0007
1660#define NETDEV_CHANGEADDR 0x0008
1661#define NETDEV_GOING_DOWN 0x0009
1662#define NETDEV_CHANGENAME 0x000A
1663#define NETDEV_FEAT_CHANGE 0x000B
1664#define NETDEV_BONDING_FAILOVER 0x000C
1665#define NETDEV_PRE_UP 0x000D
1666#define NETDEV_PRE_TYPE_CHANGE 0x000E
1667#define NETDEV_POST_TYPE_CHANGE 0x000F
1668#define NETDEV_POST_INIT 0x0010
1669#define NETDEV_UNREGISTER_FINAL 0x0011
1670#define NETDEV_RELEASE 0x0012
1671#define NETDEV_NOTIFY_PEERS 0x0013
1672#define NETDEV_JOIN 0x0014
1673#define NETDEV_CHANGEUPPER 0x0015
1674#define NETDEV_RESEND_IGMP 0x0016
1675
1676extern int register_netdevice_notifier(struct notifier_block *nb);
1677extern int unregister_netdevice_notifier(struct notifier_block *nb);
1678
1679struct netdev_notifier_info {
1680 struct net_device *dev;
1681};
1682
1683struct netdev_notifier_change_info {
1684 struct netdev_notifier_info info;
1685 unsigned int flags_changed;
1686};
1687
1688static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
1689 struct net_device *dev)
1690{
1691 info->dev = dev;
1692}
1693
1694static inline struct net_device *
1695netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
1696{
1697 return info->dev;
1698}
1699
1700extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1701 struct netdev_notifier_info *info);
1702extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1703
1704
1705extern rwlock_t dev_base_lock;
1706
1707#define for_each_netdev(net, d) \
1708 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1709#define for_each_netdev_reverse(net, d) \
1710 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1711#define for_each_netdev_rcu(net, d) \
1712 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1713#define for_each_netdev_safe(net, d, n) \
1714 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1715#define for_each_netdev_continue(net, d) \
1716 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1717#define for_each_netdev_continue_rcu(net, d) \
1718 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1719#define for_each_netdev_in_bond_rcu(bond, slave) \
1720 for_each_netdev_rcu(&init_net, slave) \
1721 if (netdev_master_upper_dev_get_rcu(slave) == bond)
1722#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1723
1724static inline struct net_device *next_net_device(struct net_device *dev)
1725{
1726 struct list_head *lh;
1727 struct net *net;
1728
1729 net = dev_net(dev);
1730 lh = dev->dev_list.next;
1731 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1732}
1733
1734static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1735{
1736 struct list_head *lh;
1737 struct net *net;
1738
1739 net = dev_net(dev);
1740 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1741 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1742}
1743
1744static inline struct net_device *first_net_device(struct net *net)
1745{
1746 return list_empty(&net->dev_base_head) ? NULL :
1747 net_device_entry(net->dev_base_head.next);
1748}
1749
1750static inline struct net_device *first_net_device_rcu(struct net *net)
1751{
1752 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1753
1754 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1755}
1756
1757extern int netdev_boot_setup_check(struct net_device *dev);
1758extern unsigned long netdev_boot_base(const char *prefix, int unit);
1759extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1760 const char *hwaddr);
1761extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1762extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1763extern void dev_add_pack(struct packet_type *pt);
1764extern void dev_remove_pack(struct packet_type *pt);
1765extern void __dev_remove_pack(struct packet_type *pt);
1766extern void dev_add_offload(struct packet_offload *po);
1767extern void dev_remove_offload(struct packet_offload *po);
1768extern void __dev_remove_offload(struct packet_offload *po);
1769
1770extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1771 unsigned short mask);
1772extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1773extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1774extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1775extern int dev_alloc_name(struct net_device *dev, const char *name);
1776extern int dev_open(struct net_device *dev);
1777extern int dev_close(struct net_device *dev);
1778extern void dev_disable_lro(struct net_device *dev);
1779extern int dev_loopback_xmit(struct sk_buff *newskb);
1780extern int dev_queue_xmit(struct sk_buff *skb);
1781extern int register_netdevice(struct net_device *dev);
1782extern void unregister_netdevice_queue(struct net_device *dev,
1783 struct list_head *head);
1784extern void unregister_netdevice_many(struct list_head *head);
1785static inline void unregister_netdevice(struct net_device *dev)
1786{
1787 unregister_netdevice_queue(dev, NULL);
1788}
1789
1790extern int netdev_refcnt_read(const struct net_device *dev);
1791extern void free_netdev(struct net_device *dev);
1792extern void synchronize_net(void);
1793extern int init_dummy_netdev(struct net_device *dev);
1794
1795extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1796extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1797extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1798extern int netdev_get_name(struct net *net, char *name, int ifindex);
1799extern int dev_restart(struct net_device *dev);
1800#ifdef CONFIG_NETPOLL_TRAP
1801extern int netpoll_trap(void);
1802#endif
1803extern int skb_gro_receive(struct sk_buff **head,
1804 struct sk_buff *skb);
1805
1806static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1807{
1808 return NAPI_GRO_CB(skb)->data_offset;
1809}
1810
1811static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1812{
1813 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1814}
1815
1816static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1817{
1818 NAPI_GRO_CB(skb)->data_offset += len;
1819}
1820
1821static inline void *skb_gro_header_fast(struct sk_buff *skb,
1822 unsigned int offset)
1823{
1824 return NAPI_GRO_CB(skb)->frag0 + offset;
1825}
1826
1827static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1828{
1829 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1830}
1831
1832static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1833 unsigned int offset)
1834{
1835 if (!pskb_may_pull(skb, hlen))
1836 return NULL;
1837
1838 NAPI_GRO_CB(skb)->frag0 = NULL;
1839 NAPI_GRO_CB(skb)->frag0_len = 0;
1840 return skb->data + offset;
1841}
1842
1843static inline void *skb_gro_mac_header(struct sk_buff *skb)
1844{
1845 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1846}
1847
1848static inline void *skb_gro_network_header(struct sk_buff *skb)
1849{
1850 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1851 skb_network_offset(skb);
1852}
1853
1854static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1855 unsigned short type,
1856 const void *daddr, const void *saddr,
1857 unsigned int len)
1858{
1859 if (!dev->header_ops || !dev->header_ops->create)
1860 return 0;
1861
1862 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1863}
1864
1865static inline int dev_parse_header(const struct sk_buff *skb,
1866 unsigned char *haddr)
1867{
1868 const struct net_device *dev = skb->dev;
1869
1870 if (!dev->header_ops || !dev->header_ops->parse)
1871 return 0;
1872 return dev->header_ops->parse(skb, haddr);
1873}
1874
1875typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1876extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1877static inline int unregister_gifconf(unsigned int family)
1878{
1879 return register_gifconf(family, NULL);
1880}
1881
1882#ifdef CONFIG_NET_FLOW_LIMIT
1883#define FLOW_LIMIT_HISTORY (1 << 7)
1884struct sd_flow_limit {
1885 u64 count;
1886 unsigned int num_buckets;
1887 unsigned int history_head;
1888 u16 history[FLOW_LIMIT_HISTORY];
1889 u8 buckets[];
1890};
1891
1892extern int netdev_flow_limit_table_len;
1893#endif
1894
1895
1896
1897
1898struct softnet_data {
1899 struct Qdisc *output_queue;
1900 struct Qdisc **output_queue_tailp;
1901 struct list_head poll_list;
1902 struct sk_buff *completion_queue;
1903 struct sk_buff_head process_queue;
1904
1905
1906 unsigned int processed;
1907 unsigned int time_squeeze;
1908 unsigned int cpu_collision;
1909 unsigned int received_rps;
1910
1911#ifdef CONFIG_RPS
1912 struct softnet_data *rps_ipi_list;
1913
1914
1915 struct call_single_data csd ____cacheline_aligned_in_smp;
1916 struct softnet_data *rps_ipi_next;
1917 unsigned int cpu;
1918 unsigned int input_queue_head;
1919 unsigned int input_queue_tail;
1920#endif
1921 unsigned int dropped;
1922 struct sk_buff_head input_pkt_queue;
1923 struct napi_struct backlog;
1924
1925#ifdef CONFIG_NET_FLOW_LIMIT
1926 struct sd_flow_limit __rcu *flow_limit;
1927#endif
1928};
1929
1930static inline void input_queue_head_incr(struct softnet_data *sd)
1931{
1932#ifdef CONFIG_RPS
1933 sd->input_queue_head++;
1934#endif
1935}
1936
1937static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1938 unsigned int *qtail)
1939{
1940#ifdef CONFIG_RPS
1941 *qtail = ++sd->input_queue_tail;
1942#endif
1943}
1944
1945DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1946
1947extern void __netif_schedule(struct Qdisc *q);
1948
1949static inline void netif_schedule_queue(struct netdev_queue *txq)
1950{
1951 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1952 __netif_schedule(txq->qdisc);
1953}
1954
1955static inline void netif_tx_schedule_all(struct net_device *dev)
1956{
1957 unsigned int i;
1958
1959 for (i = 0; i < dev->num_tx_queues; i++)
1960 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1961}
1962
1963static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1964{
1965 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1966}
1967
1968
1969
1970
1971
1972
1973
1974static inline void netif_start_queue(struct net_device *dev)
1975{
1976 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1977}
1978
1979static inline void netif_tx_start_all_queues(struct net_device *dev)
1980{
1981 unsigned int i;
1982
1983 for (i = 0; i < dev->num_tx_queues; i++) {
1984 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1985 netif_tx_start_queue(txq);
1986 }
1987}
1988
1989static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1990{
1991#ifdef CONFIG_NETPOLL_TRAP
1992 if (netpoll_trap()) {
1993 netif_tx_start_queue(dev_queue);
1994 return;
1995 }
1996#endif
1997 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
1998 __netif_schedule(dev_queue->qdisc);
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008static inline void netif_wake_queue(struct net_device *dev)
2009{
2010 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2011}
2012
2013static inline void netif_tx_wake_all_queues(struct net_device *dev)
2014{
2015 unsigned int i;
2016
2017 for (i = 0; i < dev->num_tx_queues; i++) {
2018 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2019 netif_tx_wake_queue(txq);
2020 }
2021}
2022
2023static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2024{
2025 if (WARN_ON(!dev_queue)) {
2026 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2027 return;
2028 }
2029 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2030}
2031
2032
2033
2034
2035
2036
2037
2038
2039static inline void netif_stop_queue(struct net_device *dev)
2040{
2041 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2042}
2043
2044static inline void netif_tx_stop_all_queues(struct net_device *dev)
2045{
2046 unsigned int i;
2047
2048 for (i = 0; i < dev->num_tx_queues; i++) {
2049 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2050 netif_tx_stop_queue(txq);
2051 }
2052}
2053
2054static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2055{
2056 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2057}
2058
2059
2060
2061
2062
2063
2064
2065static inline bool netif_queue_stopped(const struct net_device *dev)
2066{
2067 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2068}
2069
2070static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2071{
2072 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2073}
2074
2075static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2076{
2077 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2078}
2079
2080static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2081 unsigned int bytes)
2082{
2083#ifdef CONFIG_BQL
2084 dql_queued(&dev_queue->dql, bytes);
2085
2086 if (likely(dql_avail(&dev_queue->dql) >= 0))
2087 return;
2088
2089 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2090
2091
2092
2093
2094
2095
2096 smp_mb();
2097
2098
2099 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2100 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2101#endif
2102}
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2114{
2115 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2116}
2117
2118static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2119 unsigned int pkts, unsigned int bytes)
2120{
2121#ifdef CONFIG_BQL
2122 if (unlikely(!bytes))
2123 return;
2124
2125 dql_completed(&dev_queue->dql, bytes);
2126
2127
2128
2129
2130
2131
2132 smp_mb();
2133
2134 if (dql_avail(&dev_queue->dql) < 0)
2135 return;
2136
2137 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2138 netif_schedule_queue(dev_queue);
2139#endif
2140}
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152static inline void netdev_completed_queue(struct net_device *dev,
2153 unsigned int pkts, unsigned int bytes)
2154{
2155 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2156}
2157
2158static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2159{
2160#ifdef CONFIG_BQL
2161 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
2162 dql_reset(&q->dql);
2163#endif
2164}
2165
2166
2167
2168
2169
2170
2171
2172
2173static inline void netdev_reset_queue(struct net_device *dev_queue)
2174{
2175 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
2176}
2177
2178
2179
2180
2181
2182
2183
2184static inline bool netif_running(const struct net_device *dev)
2185{
2186 return test_bit(__LINK_STATE_START, &dev->state);
2187}
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2204{
2205 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2206
2207 netif_tx_start_queue(txq);
2208}
2209
2210
2211
2212
2213
2214
2215
2216
2217static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2218{
2219 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2220#ifdef CONFIG_NETPOLL_TRAP
2221 if (netpoll_trap())
2222 return;
2223#endif
2224 netif_tx_stop_queue(txq);
2225}
2226
2227
2228
2229
2230
2231
2232
2233
2234static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2235 u16 queue_index)
2236{
2237 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2238
2239 return netif_tx_queue_stopped(txq);
2240}
2241
2242static inline bool netif_subqueue_stopped(const struct net_device *dev,
2243 struct sk_buff *skb)
2244{
2245 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2246}
2247
2248
2249
2250
2251
2252
2253
2254
2255static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2256{
2257 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2258#ifdef CONFIG_NETPOLL_TRAP
2259 if (netpoll_trap())
2260 return;
2261#endif
2262 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2263 __netif_schedule(txq->qdisc);
2264}
2265
2266#ifdef CONFIG_XPS
2267extern int netif_set_xps_queue(struct net_device *dev,
2268 const struct cpumask *mask,
2269 u16 index);
2270#else
2271static inline int netif_set_xps_queue(struct net_device *dev,
2272 const struct cpumask *mask,
2273 u16 index)
2274{
2275 return 0;
2276}
2277#endif
2278
2279
2280
2281
2282
2283static inline u16 skb_tx_hash(const struct net_device *dev,
2284 const struct sk_buff *skb)
2285{
2286 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2287}
2288
2289
2290
2291
2292
2293
2294
2295static inline bool netif_is_multiqueue(const struct net_device *dev)
2296{
2297 return dev->num_tx_queues > 1;
2298}
2299
2300extern int netif_set_real_num_tx_queues(struct net_device *dev,
2301 unsigned int txq);
2302
2303#ifdef CONFIG_RPS
2304extern int netif_set_real_num_rx_queues(struct net_device *dev,
2305 unsigned int rxq);
2306#else
2307static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2308 unsigned int rxq)
2309{
2310 return 0;
2311}
2312#endif
2313
2314static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2315 const struct net_device *from_dev)
2316{
2317 int err;
2318
2319 err = netif_set_real_num_tx_queues(to_dev,
2320 from_dev->real_num_tx_queues);
2321 if (err)
2322 return err;
2323#ifdef CONFIG_RPS
2324 return netif_set_real_num_rx_queues(to_dev,
2325 from_dev->real_num_rx_queues);
2326#else
2327 return 0;
2328#endif
2329}
2330
2331#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2332extern int netif_get_num_default_rss_queues(void);
2333
2334
2335
2336
2337
2338extern void dev_kfree_skb_irq(struct sk_buff *skb);
2339
2340
2341
2342
2343
2344extern void dev_kfree_skb_any(struct sk_buff *skb);
2345
2346extern int netif_rx(struct sk_buff *skb);
2347extern int netif_rx_ni(struct sk_buff *skb);
2348extern int netif_receive_skb(struct sk_buff *skb);
2349extern gro_result_t napi_gro_receive(struct napi_struct *napi,
2350 struct sk_buff *skb);
2351extern void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2352extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
2353extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2354
2355static inline void napi_free_frags(struct napi_struct *napi)
2356{
2357 kfree_skb(napi->skb);
2358 napi->skb = NULL;
2359}
2360
2361extern int netdev_rx_handler_register(struct net_device *dev,
2362 rx_handler_func_t *rx_handler,
2363 void *rx_handler_data);
2364extern void netdev_rx_handler_unregister(struct net_device *dev);
2365
2366extern bool dev_valid_name(const char *name);
2367extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2368extern int dev_ethtool(struct net *net, struct ifreq *);
2369extern unsigned int dev_get_flags(const struct net_device *);
2370extern int __dev_change_flags(struct net_device *, unsigned int flags);
2371extern int dev_change_flags(struct net_device *, unsigned int);
2372extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2373extern int dev_change_name(struct net_device *, const char *);
2374extern int dev_set_alias(struct net_device *, const char *, size_t);
2375extern int dev_change_net_namespace(struct net_device *,
2376 struct net *, const char *);
2377extern int dev_set_mtu(struct net_device *, int);
2378extern void dev_set_group(struct net_device *, int);
2379extern int dev_set_mac_address(struct net_device *,
2380 struct sockaddr *);
2381extern int dev_change_carrier(struct net_device *,
2382 bool new_carrier);
2383extern int dev_get_phys_port_id(struct net_device *dev,
2384 struct netdev_phys_port_id *ppid);
2385extern int dev_hard_start_xmit(struct sk_buff *skb,
2386 struct net_device *dev,
2387 struct netdev_queue *txq);
2388extern int dev_forward_skb(struct net_device *dev,
2389 struct sk_buff *skb);
2390
2391extern int netdev_budget;
2392
2393
2394extern void netdev_run_todo(void);
2395
2396
2397
2398
2399
2400
2401
2402static inline void dev_put(struct net_device *dev)
2403{
2404 this_cpu_dec(*dev->pcpu_refcnt);
2405}
2406
2407
2408
2409
2410
2411
2412
2413static inline void dev_hold(struct net_device *dev)
2414{
2415 this_cpu_inc(*dev->pcpu_refcnt);
2416}
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427extern void linkwatch_init_dev(struct net_device *dev);
2428extern void linkwatch_fire_event(struct net_device *dev);
2429extern void linkwatch_forget_dev(struct net_device *dev);
2430
2431
2432
2433
2434
2435
2436
2437static inline bool netif_carrier_ok(const struct net_device *dev)
2438{
2439 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2440}
2441
2442extern unsigned long dev_trans_start(struct net_device *dev);
2443
2444extern void __netdev_watchdog_up(struct net_device *dev);
2445
2446extern void netif_carrier_on(struct net_device *dev);
2447
2448extern void netif_carrier_off(struct net_device *dev);
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463static inline void netif_dormant_on(struct net_device *dev)
2464{
2465 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2466 linkwatch_fire_event(dev);
2467}
2468
2469
2470
2471
2472
2473
2474
2475static inline void netif_dormant_off(struct net_device *dev)
2476{
2477 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2478 linkwatch_fire_event(dev);
2479}
2480
2481
2482
2483
2484
2485
2486
2487static inline bool netif_dormant(const struct net_device *dev)
2488{
2489 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2490}
2491
2492
2493
2494
2495
2496
2497
2498
2499static inline bool netif_oper_up(const struct net_device *dev)
2500{
2501 return (dev->operstate == IF_OPER_UP ||
2502 dev->operstate == IF_OPER_UNKNOWN );
2503}
2504
2505
2506
2507
2508
2509
2510
2511static inline bool netif_device_present(struct net_device *dev)
2512{
2513 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2514}
2515
2516extern void netif_device_detach(struct net_device *dev);
2517
2518extern void netif_device_attach(struct net_device *dev);
2519
2520
2521
2522
2523
2524enum {
2525 NETIF_MSG_DRV = 0x0001,
2526 NETIF_MSG_PROBE = 0x0002,
2527 NETIF_MSG_LINK = 0x0004,
2528 NETIF_MSG_TIMER = 0x0008,
2529 NETIF_MSG_IFDOWN = 0x0010,
2530 NETIF_MSG_IFUP = 0x0020,
2531 NETIF_MSG_RX_ERR = 0x0040,
2532 NETIF_MSG_TX_ERR = 0x0080,
2533 NETIF_MSG_TX_QUEUED = 0x0100,
2534 NETIF_MSG_INTR = 0x0200,
2535 NETIF_MSG_TX_DONE = 0x0400,
2536 NETIF_MSG_RX_STATUS = 0x0800,
2537 NETIF_MSG_PKTDATA = 0x1000,
2538 NETIF_MSG_HW = 0x2000,
2539 NETIF_MSG_WOL = 0x4000,
2540};
2541
2542#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2543#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2544#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2545#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2546#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2547#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2548#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2549#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2550#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2551#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2552#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2553#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2554#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2555#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2556#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2557
2558static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2559{
2560
2561 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2562 return default_msg_enable_bits;
2563 if (debug_value == 0)
2564 return 0;
2565
2566 return (1 << debug_value) - 1;
2567}
2568
2569static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2570{
2571 spin_lock(&txq->_xmit_lock);
2572 txq->xmit_lock_owner = cpu;
2573}
2574
2575static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2576{
2577 spin_lock_bh(&txq->_xmit_lock);
2578 txq->xmit_lock_owner = smp_processor_id();
2579}
2580
2581static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2582{
2583 bool ok = spin_trylock(&txq->_xmit_lock);
2584 if (likely(ok))
2585 txq->xmit_lock_owner = smp_processor_id();
2586 return ok;
2587}
2588
2589static inline void __netif_tx_unlock(struct netdev_queue *txq)
2590{
2591 txq->xmit_lock_owner = -1;
2592 spin_unlock(&txq->_xmit_lock);
2593}
2594
2595static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2596{
2597 txq->xmit_lock_owner = -1;
2598 spin_unlock_bh(&txq->_xmit_lock);
2599}
2600
2601static inline void txq_trans_update(struct netdev_queue *txq)
2602{
2603 if (txq->xmit_lock_owner != -1)
2604 txq->trans_start = jiffies;
2605}
2606
2607
2608
2609
2610
2611
2612
2613static inline void netif_tx_lock(struct net_device *dev)
2614{
2615 unsigned int i;
2616 int cpu;
2617
2618 spin_lock(&dev->tx_global_lock);
2619 cpu = smp_processor_id();
2620 for (i = 0; i < dev->num_tx_queues; i++) {
2621 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2622
2623
2624
2625
2626
2627
2628
2629 __netif_tx_lock(txq, cpu);
2630 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2631 __netif_tx_unlock(txq);
2632 }
2633}
2634
2635static inline void netif_tx_lock_bh(struct net_device *dev)
2636{
2637 local_bh_disable();
2638 netif_tx_lock(dev);
2639}
2640
2641static inline void netif_tx_unlock(struct net_device *dev)
2642{
2643 unsigned int i;
2644
2645 for (i = 0; i < dev->num_tx_queues; i++) {
2646 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2647
2648
2649
2650
2651
2652 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2653 netif_schedule_queue(txq);
2654 }
2655 spin_unlock(&dev->tx_global_lock);
2656}
2657
2658static inline void netif_tx_unlock_bh(struct net_device *dev)
2659{
2660 netif_tx_unlock(dev);
2661 local_bh_enable();
2662}
2663
2664#define HARD_TX_LOCK(dev, txq, cpu) { \
2665 if ((dev->features & NETIF_F_LLTX) == 0) { \
2666 __netif_tx_lock(txq, cpu); \
2667 } \
2668}
2669
2670#define HARD_TX_UNLOCK(dev, txq) { \
2671 if ((dev->features & NETIF_F_LLTX) == 0) { \
2672 __netif_tx_unlock(txq); \
2673 } \
2674}
2675
2676static inline void netif_tx_disable(struct net_device *dev)
2677{
2678 unsigned int i;
2679 int cpu;
2680
2681 local_bh_disable();
2682 cpu = smp_processor_id();
2683 for (i = 0; i < dev->num_tx_queues; i++) {
2684 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2685
2686 __netif_tx_lock(txq, cpu);
2687 netif_tx_stop_queue(txq);
2688 __netif_tx_unlock(txq);
2689 }
2690 local_bh_enable();
2691}
2692
2693static inline void netif_addr_lock(struct net_device *dev)
2694{
2695 spin_lock(&dev->addr_list_lock);
2696}
2697
2698static inline void netif_addr_lock_nested(struct net_device *dev)
2699{
2700 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2701}
2702
2703static inline void netif_addr_lock_bh(struct net_device *dev)
2704{
2705 spin_lock_bh(&dev->addr_list_lock);
2706}
2707
2708static inline void netif_addr_unlock(struct net_device *dev)
2709{
2710 spin_unlock(&dev->addr_list_lock);
2711}
2712
2713static inline void netif_addr_unlock_bh(struct net_device *dev)
2714{
2715 spin_unlock_bh(&dev->addr_list_lock);
2716}
2717
2718
2719
2720
2721
2722#define for_each_dev_addr(dev, ha) \
2723 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2724
2725
2726
2727extern void ether_setup(struct net_device *dev);
2728
2729
2730extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2731 void (*setup)(struct net_device *),
2732 unsigned int txqs, unsigned int rxqs);
2733#define alloc_netdev(sizeof_priv, name, setup) \
2734 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2735
2736#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2737 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2738
2739extern int register_netdev(struct net_device *dev);
2740extern void unregister_netdev(struct net_device *dev);
2741
2742
2743extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2744 struct netdev_hw_addr_list *from_list,
2745 int addr_len, unsigned char addr_type);
2746extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2747 struct netdev_hw_addr_list *from_list,
2748 int addr_len, unsigned char addr_type);
2749extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2750 struct netdev_hw_addr_list *from_list,
2751 int addr_len);
2752extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2753 struct netdev_hw_addr_list *from_list,
2754 int addr_len);
2755extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2756extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2757
2758
2759extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
2760 unsigned char addr_type);
2761extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
2762 unsigned char addr_type);
2763extern int dev_addr_add_multiple(struct net_device *to_dev,
2764 struct net_device *from_dev,
2765 unsigned char addr_type);
2766extern int dev_addr_del_multiple(struct net_device *to_dev,
2767 struct net_device *from_dev,
2768 unsigned char addr_type);
2769extern void dev_addr_flush(struct net_device *dev);
2770extern int dev_addr_init(struct net_device *dev);
2771
2772
2773extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2774extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2775extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
2776extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2777extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
2778extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2779extern void dev_uc_flush(struct net_device *dev);
2780extern void dev_uc_init(struct net_device *dev);
2781
2782
2783extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2784extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2785extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2786extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2787extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
2788extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2789extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
2790extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2791extern void dev_mc_flush(struct net_device *dev);
2792extern void dev_mc_init(struct net_device *dev);
2793
2794
2795extern void dev_set_rx_mode(struct net_device *dev);
2796extern void __dev_set_rx_mode(struct net_device *dev);
2797extern int dev_set_promiscuity(struct net_device *dev, int inc);
2798extern int dev_set_allmulti(struct net_device *dev, int inc);
2799extern void netdev_state_change(struct net_device *dev);
2800extern void netdev_notify_peers(struct net_device *dev);
2801extern void netdev_features_change(struct net_device *dev);
2802
2803extern void dev_load(struct net *net, const char *name);
2804extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2805 struct rtnl_link_stats64 *storage);
2806extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2807 const struct net_device_stats *netdev_stats);
2808
2809extern int netdev_max_backlog;
2810extern int netdev_tstamp_prequeue;
2811extern int weight_p;
2812extern int bpf_jit_enable;
2813
2814extern bool netdev_has_upper_dev(struct net_device *dev,
2815 struct net_device *upper_dev);
2816extern bool netdev_has_any_upper_dev(struct net_device *dev);
2817extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
2818 struct list_head **iter);
2819
2820
2821#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \
2822 for (iter = &(dev)->upper_dev_list, \
2823 upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
2824 upper; \
2825 upper = netdev_upper_get_next_dev_rcu(dev, &(iter)))
2826
2827extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2828extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
2829extern int netdev_upper_dev_link(struct net_device *dev,
2830 struct net_device *upper_dev);
2831extern int netdev_master_upper_dev_link(struct net_device *dev,
2832 struct net_device *upper_dev);
2833extern void netdev_upper_dev_unlink(struct net_device *dev,
2834 struct net_device *upper_dev);
2835extern int skb_checksum_help(struct sk_buff *skb);
2836extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2837 netdev_features_t features, bool tx_path);
2838extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2839 netdev_features_t features);
2840
2841static inline
2842struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
2843{
2844 return __skb_gso_segment(skb, features, true);
2845}
2846__be16 skb_network_protocol(struct sk_buff *skb);
2847
2848static inline bool can_checksum_protocol(netdev_features_t features,
2849 __be16 protocol)
2850{
2851 return ((features & NETIF_F_GEN_CSUM) ||
2852 ((features & NETIF_F_V4_CSUM) &&
2853 protocol == htons(ETH_P_IP)) ||
2854 ((features & NETIF_F_V6_CSUM) &&
2855 protocol == htons(ETH_P_IPV6)) ||
2856 ((features & NETIF_F_FCOE_CRC) &&
2857 protocol == htons(ETH_P_FCOE)));
2858}
2859
2860#ifdef CONFIG_BUG
2861extern void netdev_rx_csum_fault(struct net_device *dev);
2862#else
2863static inline void netdev_rx_csum_fault(struct net_device *dev)
2864{
2865}
2866#endif
2867
2868extern void net_enable_timestamp(void);
2869extern void net_disable_timestamp(void);
2870
2871#ifdef CONFIG_PROC_FS
2872extern int __init dev_proc_init(void);
2873#else
2874#define dev_proc_init() 0
2875#endif
2876
2877extern int netdev_class_create_file(struct class_attribute *class_attr);
2878extern void netdev_class_remove_file(struct class_attribute *class_attr);
2879
2880extern struct kobj_ns_type_operations net_ns_type_operations;
2881
2882extern const char *netdev_drivername(const struct net_device *dev);
2883
2884extern void linkwatch_run_queue(void);
2885
2886static inline netdev_features_t netdev_get_wanted_features(
2887 struct net_device *dev)
2888{
2889 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2890}
2891netdev_features_t netdev_increment_features(netdev_features_t all,
2892 netdev_features_t one, netdev_features_t mask);
2893
2894
2895
2896
2897
2898static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
2899 netdev_features_t mask)
2900{
2901 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
2902}
2903
2904int __netdev_update_features(struct net_device *dev);
2905void netdev_update_features(struct net_device *dev);
2906void netdev_change_features(struct net_device *dev);
2907
2908void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2909 struct net_device *dev);
2910
2911netdev_features_t netif_skb_features(struct sk_buff *skb);
2912
2913static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2914{
2915 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2916
2917
2918 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2919 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2920 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2921 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2922 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2923 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2924
2925 return (features & feature) == feature;
2926}
2927
2928static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2929{
2930 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2931 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2932}
2933
2934static inline bool netif_needs_gso(struct sk_buff *skb,
2935 netdev_features_t features)
2936{
2937 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2938 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2939 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
2940}
2941
2942static inline void netif_set_gso_max_size(struct net_device *dev,
2943 unsigned int size)
2944{
2945 dev->gso_max_size = size;
2946}
2947
2948static inline bool netif_is_bond_master(struct net_device *dev)
2949{
2950 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
2951}
2952
2953static inline bool netif_is_bond_slave(struct net_device *dev)
2954{
2955 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2956}
2957
2958static inline bool netif_supports_nofcs(struct net_device *dev)
2959{
2960 return dev->priv_flags & IFF_SUPP_NOFCS;
2961}
2962
2963extern struct pernet_operations __net_initdata loopback_net_ops;
2964
2965
2966
2967
2968
2969static inline const char *netdev_name(const struct net_device *dev)
2970{
2971 if (dev->reg_state != NETREG_REGISTERED)
2972 return "(unregistered net_device)";
2973 return dev->name;
2974}
2975
2976extern __printf(3, 4)
2977int netdev_printk(const char *level, const struct net_device *dev,
2978 const char *format, ...);
2979extern __printf(2, 3)
2980int netdev_emerg(const struct net_device *dev, const char *format, ...);
2981extern __printf(2, 3)
2982int netdev_alert(const struct net_device *dev, const char *format, ...);
2983extern __printf(2, 3)
2984int netdev_crit(const struct net_device *dev, const char *format, ...);
2985extern __printf(2, 3)
2986int netdev_err(const struct net_device *dev, const char *format, ...);
2987extern __printf(2, 3)
2988int netdev_warn(const struct net_device *dev, const char *format, ...);
2989extern __printf(2, 3)
2990int netdev_notice(const struct net_device *dev, const char *format, ...);
2991extern __printf(2, 3)
2992int netdev_info(const struct net_device *dev, const char *format, ...);
2993
2994#define MODULE_ALIAS_NETDEV(device) \
2995 MODULE_ALIAS("netdev-" device)
2996
2997#if defined(CONFIG_DYNAMIC_DEBUG)
2998#define netdev_dbg(__dev, format, args...) \
2999do { \
3000 dynamic_netdev_dbg(__dev, format, ##args); \
3001} while (0)
3002#elif defined(DEBUG)
3003#define netdev_dbg(__dev, format, args...) \
3004 netdev_printk(KERN_DEBUG, __dev, format, ##args)
3005#else
3006#define netdev_dbg(__dev, format, args...) \
3007({ \
3008 if (0) \
3009 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3010 0; \
3011})
3012#endif
3013
3014#if defined(VERBOSE_DEBUG)
3015#define netdev_vdbg netdev_dbg
3016#else
3017
3018#define netdev_vdbg(dev, format, args...) \
3019({ \
3020 if (0) \
3021 netdev_printk(KERN_DEBUG, dev, format, ##args); \
3022 0; \
3023})
3024#endif
3025
3026
3027
3028
3029
3030
3031#define netdev_WARN(dev, format, args...) \
3032 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
3033
3034
3035
3036#define netif_printk(priv, type, level, dev, fmt, args...) \
3037do { \
3038 if (netif_msg_##type(priv)) \
3039 netdev_printk(level, (dev), fmt, ##args); \
3040} while (0)
3041
3042#define netif_level(level, priv, type, dev, fmt, args...) \
3043do { \
3044 if (netif_msg_##type(priv)) \
3045 netdev_##level(dev, fmt, ##args); \
3046} while (0)
3047
3048#define netif_emerg(priv, type, dev, fmt, args...) \
3049 netif_level(emerg, priv, type, dev, fmt, ##args)
3050#define netif_alert(priv, type, dev, fmt, args...) \
3051 netif_level(alert, priv, type, dev, fmt, ##args)
3052#define netif_crit(priv, type, dev, fmt, args...) \
3053 netif_level(crit, priv, type, dev, fmt, ##args)
3054#define netif_err(priv, type, dev, fmt, args...) \
3055 netif_level(err, priv, type, dev, fmt, ##args)
3056#define netif_warn(priv, type, dev, fmt, args...) \
3057 netif_level(warn, priv, type, dev, fmt, ##args)
3058#define netif_notice(priv, type, dev, fmt, args...) \
3059 netif_level(notice, priv, type, dev, fmt, ##args)
3060#define netif_info(priv, type, dev, fmt, args...) \
3061 netif_level(info, priv, type, dev, fmt, ##args)
3062
3063#if defined(CONFIG_DYNAMIC_DEBUG)
3064#define netif_dbg(priv, type, netdev, format, args...) \
3065do { \
3066 if (netif_msg_##type(priv)) \
3067 dynamic_netdev_dbg(netdev, format, ##args); \
3068} while (0)
3069#elif defined(DEBUG)
3070#define netif_dbg(priv, type, dev, format, args...) \
3071 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
3072#else
3073#define netif_dbg(priv, type, dev, format, args...) \
3074({ \
3075 if (0) \
3076 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3077 0; \
3078})
3079#endif
3080
3081#if defined(VERBOSE_DEBUG)
3082#define netif_vdbg netif_dbg
3083#else
3084#define netif_vdbg(priv, type, dev, format, args...) \
3085({ \
3086 if (0) \
3087 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3088 0; \
3089})
3090#endif
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119#define PTYPE_HASH_SIZE (16)
3120#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3121
3122#endif
3123