1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/pm_qos.h>
29#include <linux/timer.h>
30#include <linux/bug.h>
31#include <linux/delay.h>
32#include <linux/atomic.h>
33#include <asm/cache.h>
34#include <asm/byteorder.h>
35
36#include <linux/percpu.h>
37#include <linux/rculist.h>
38#include <linux/dmaengine.h>
39#include <linux/workqueue.h>
40#include <linux/dynamic_queue_limits.h>
41
42#include <linux/ethtool.h>
43#include <net/net_namespace.h>
44#include <net/dsa.h>
45#ifdef CONFIG_DCB
46#include <net/dcbnl.h>
47#endif
48#include <net/netprio_cgroup.h>
49
50#include <linux/netdev_features.h>
51#include <linux/neighbour.h>
52#include <uapi/linux/netdevice.h>
53
54struct netpoll_info;
55struct device;
56struct phy_device;
57
58struct wireless_dev;
59
60#define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) )
62
63extern void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops);
65
66
67#define NET_ADDR_PERM 0
68#define NET_ADDR_RANDOM 1
69#define NET_ADDR_STOLEN 2
70#define NET_ADDR_SET 3
71
72
73
74#define NET_RX_SUCCESS 0
75#define NET_RX_DROP 1
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define NET_XMIT_SUCCESS 0x00
96#define NET_XMIT_DROP 0x01
97#define NET_XMIT_CN 0x02
98#define NET_XMIT_POLICED 0x03
99#define NET_XMIT_MASK 0x0f
100
101
102
103
104#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
105#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106
107
108#define NETDEV_TX_MASK 0xf0
109
110enum netdev_tx {
111 __NETDEV_TX_MIN = INT_MIN,
112 NETDEV_TX_OK = 0x00,
113 NETDEV_TX_BUSY = 0x10,
114 NETDEV_TX_LOCKED = 0x20,
115};
116typedef enum netdev_tx netdev_tx_t;
117
118
119
120
121
122static inline bool dev_xmit_complete(int rc)
123{
124
125
126
127
128
129
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
136
137
138
139
140
141#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
142# if defined(CONFIG_MAC80211_MESH)
143# define LL_MAX_HEADER 128
144# else
145# define LL_MAX_HEADER 96
146# endif
147#else
148# define LL_MAX_HEADER 32
149#endif
150
151#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
153#define MAX_HEADER LL_MAX_HEADER
154#else
155#define MAX_HEADER (LL_MAX_HEADER + 48)
156#endif
157
158
159
160
161
162
163struct net_device_stats {
164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_errors;
169 unsigned long tx_errors;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172 unsigned long multicast;
173 unsigned long collisions;
174 unsigned long rx_length_errors;
175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors;
180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors;
185 unsigned long rx_compressed;
186 unsigned long tx_compressed;
187};
188
189
190#include <linux/cache.h>
191#include <linux/skbuff.h>
192
193#ifdef CONFIG_RPS
194#include <linux/static_key.h>
195extern struct static_key rps_needed;
196#endif
197
198struct neighbour;
199struct neigh_parms;
200struct sk_buff;
201
202struct netdev_hw_addr {
203 struct list_head list;
204 unsigned char addr[MAX_ADDR_LEN];
205 unsigned char type;
206#define NETDEV_HW_ADDR_T_LAN 1
207#define NETDEV_HW_ADDR_T_SAN 2
208#define NETDEV_HW_ADDR_T_SLAVE 3
209#define NETDEV_HW_ADDR_T_UNICAST 4
210#define NETDEV_HW_ADDR_T_MULTICAST 5
211 bool global_use;
212 int sync_cnt;
213 int refcount;
214 int synced;
215 struct rcu_head rcu_head;
216};
217
218struct netdev_hw_addr_list {
219 struct list_head list;
220 int count;
221};
222
223#define netdev_hw_addr_list_count(l) ((l)->count)
224#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225#define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list)
227
228#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
230#define netdev_for_each_uc_addr(ha, dev) \
231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
232
233#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
235#define netdev_for_each_mc_addr(ha, dev) \
236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
237
238struct hh_cache {
239 u16 hh_len;
240 u16 __pad;
241 seqlock_t hh_lock;
242
243
244#define HH_DATA_MOD 16
245#define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247#define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250};
251
252
253
254
255
256
257
258
259
260#define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264
265struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
268 const void *saddr, unsigned int len);
269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*rebuild)(struct sk_buff *skb);
271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
272 void (*cache_update)(struct hh_cache *hh,
273 const struct net_device *dev,
274 const unsigned char *haddr);
275};
276
277
278
279
280
281
282enum netdev_state_t {
283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
285 __LINK_STATE_NOCARRIER,
286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
288};
289
290
291
292
293
294
295struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298};
299#define NETDEV_BOOT_SETUP_MAX 8
300
301extern int __init netdev_boot_setup(char *str);
302
303
304
305
306struct napi_struct {
307
308
309
310
311
312
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
317 unsigned int gro_count;
318 int (*poll)(struct napi_struct *, int);
319#ifdef CONFIG_NETPOLL
320 spinlock_t poll_lock;
321 int poll_owner;
322#endif
323 struct net_device *dev;
324 struct sk_buff *gro_list;
325 struct sk_buff *skb;
326 struct list_head dev_list;
327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
329};
330
331enum {
332 NAPI_STATE_SCHED,
333 NAPI_STATE_DISABLE,
334 NAPI_STATE_NPSVC,
335 NAPI_STATE_HASHED,
336};
337
338enum gro_result {
339 GRO_MERGED,
340 GRO_MERGED_FREE,
341 GRO_HELD,
342 GRO_NORMAL,
343 GRO_DROP,
344};
345typedef enum gro_result gro_result_t;
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388enum rx_handler_result {
389 RX_HANDLER_CONSUMED,
390 RX_HANDLER_ANOTHER,
391 RX_HANDLER_EXACT,
392 RX_HANDLER_PASS,
393};
394typedef enum rx_handler_result rx_handler_result_t;
395typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
396
397extern void __napi_schedule(struct napi_struct *n);
398
399static inline bool napi_disable_pending(struct napi_struct *n)
400{
401 return test_bit(NAPI_STATE_DISABLE, &n->state);
402}
403
404
405
406
407
408
409
410
411
412
413static inline bool napi_schedule_prep(struct napi_struct *n)
414{
415 return !napi_disable_pending(n) &&
416 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
417}
418
419
420
421
422
423
424
425
426static inline void napi_schedule(struct napi_struct *n)
427{
428 if (napi_schedule_prep(n))
429 __napi_schedule(n);
430}
431
432
433static inline bool napi_reschedule(struct napi_struct *napi)
434{
435 if (napi_schedule_prep(napi)) {
436 __napi_schedule(napi);
437 return true;
438 }
439 return false;
440}
441
442
443
444
445
446
447
448extern void __napi_complete(struct napi_struct *n);
449extern void napi_complete(struct napi_struct *n);
450
451
452
453
454
455
456
457
458extern struct napi_struct *napi_by_id(unsigned int napi_id);
459
460
461
462
463
464
465
466extern void napi_hash_add(struct napi_struct *napi);
467
468
469
470
471
472
473
474
475extern void napi_hash_del(struct napi_struct *napi);
476
477
478
479
480
481
482
483
484static inline void napi_disable(struct napi_struct *n)
485{
486 set_bit(NAPI_STATE_DISABLE, &n->state);
487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
488 msleep(1);
489 clear_bit(NAPI_STATE_DISABLE, &n->state);
490}
491
492
493
494
495
496
497
498
499static inline void napi_enable(struct napi_struct *n)
500{
501 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
502 smp_mb__before_clear_bit();
503 clear_bit(NAPI_STATE_SCHED, &n->state);
504}
505
506#ifdef CONFIG_SMP
507
508
509
510
511
512
513
514
515static inline void napi_synchronize(const struct napi_struct *n)
516{
517 while (test_bit(NAPI_STATE_SCHED, &n->state))
518 msleep(1);
519}
520#else
521# define napi_synchronize(n) barrier()
522#endif
523
524enum netdev_queue_state_t {
525 __QUEUE_STATE_DRV_XOFF,
526 __QUEUE_STATE_STACK_XOFF,
527 __QUEUE_STATE_FROZEN,
528#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
529 (1 << __QUEUE_STATE_STACK_XOFF))
530#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
531 (1 << __QUEUE_STATE_FROZEN))
532};
533
534
535
536
537
538
539
540
541
542
543struct netdev_queue {
544
545
546
547 struct net_device *dev;
548 struct Qdisc *qdisc;
549 struct Qdisc *qdisc_sleeping;
550#ifdef CONFIG_SYSFS
551 struct kobject kobj;
552#endif
553#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
554 int numa_node;
555#endif
556
557
558
559 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
560 int xmit_lock_owner;
561
562
563
564 unsigned long trans_start;
565
566
567
568
569
570 unsigned long trans_timeout;
571
572 unsigned long state;
573
574#ifdef CONFIG_BQL
575 struct dql dql;
576#endif
577} ____cacheline_aligned_in_smp;
578
579static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
580{
581#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
582 return q->numa_node;
583#else
584 return NUMA_NO_NODE;
585#endif
586}
587
588static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
589{
590#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
591 q->numa_node = node;
592#endif
593}
594
595#ifdef CONFIG_RPS
596
597
598
599
600struct rps_map {
601 unsigned int len;
602 struct rcu_head rcu;
603 u16 cpus[0];
604};
605#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
606
607
608
609
610
611
612struct rps_dev_flow {
613 u16 cpu;
614 u16 filter;
615 unsigned int last_qtail;
616};
617#define RPS_NO_FILTER 0xffff
618
619
620
621
622struct rps_dev_flow_table {
623 unsigned int mask;
624 struct rcu_head rcu;
625 struct rps_dev_flow flows[0];
626};
627#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
628 ((_num) * sizeof(struct rps_dev_flow)))
629
630
631
632
633
634struct rps_sock_flow_table {
635 unsigned int mask;
636 u16 ents[0];
637};
638#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
639 ((_num) * sizeof(u16)))
640
641#define RPS_NO_CPU 0xffff
642
643static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
644 u32 hash)
645{
646 if (table && hash) {
647 unsigned int cpu, index = hash & table->mask;
648
649
650 cpu = raw_smp_processor_id();
651
652 if (table->ents[index] != cpu)
653 table->ents[index] = cpu;
654 }
655}
656
657static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
658 u32 hash)
659{
660 if (table && hash)
661 table->ents[hash & table->mask] = RPS_NO_CPU;
662}
663
664extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
665
666#ifdef CONFIG_RFS_ACCEL
667extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
668 u32 flow_id, u16 filter_id);
669#endif
670
671
672struct netdev_rx_queue {
673 struct rps_map __rcu *rps_map;
674 struct rps_dev_flow_table __rcu *rps_flow_table;
675 struct kobject kobj;
676 struct net_device *dev;
677} ____cacheline_aligned_in_smp;
678#endif
679
680#ifdef CONFIG_XPS
681
682
683
684
685struct xps_map {
686 unsigned int len;
687 unsigned int alloc_len;
688 struct rcu_head rcu;
689 u16 queues[0];
690};
691#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
692#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
693 / sizeof(u16))
694
695
696
697
698struct xps_dev_maps {
699 struct rcu_head rcu;
700 struct xps_map __rcu *cpu_map[0];
701};
702#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
703 (nr_cpu_ids * sizeof(struct xps_map *)))
704#endif
705
706#define TC_MAX_QUEUE 16
707#define TC_BITMASK 15
708
709struct netdev_tc_txq {
710 u16 count;
711 u16 offset;
712};
713
714#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
715
716
717
718
719struct netdev_fcoe_hbainfo {
720 char manufacturer[64];
721 char serial_number[64];
722 char hardware_version[64];
723 char driver_version[64];
724 char optionrom_version[64];
725 char firmware_version[64];
726 char model[256];
727 char model_description[256];
728};
729#endif
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936struct net_device_ops {
937 int (*ndo_init)(struct net_device *dev);
938 void (*ndo_uninit)(struct net_device *dev);
939 int (*ndo_open)(struct net_device *dev);
940 int (*ndo_stop)(struct net_device *dev);
941 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
942 struct net_device *dev);
943 u16 (*ndo_select_queue)(struct net_device *dev,
944 struct sk_buff *skb);
945 void (*ndo_change_rx_flags)(struct net_device *dev,
946 int flags);
947 void (*ndo_set_rx_mode)(struct net_device *dev);
948 int (*ndo_set_mac_address)(struct net_device *dev,
949 void *addr);
950 int (*ndo_validate_addr)(struct net_device *dev);
951 int (*ndo_do_ioctl)(struct net_device *dev,
952 struct ifreq *ifr, int cmd);
953 int (*ndo_set_config)(struct net_device *dev,
954 struct ifmap *map);
955 int (*ndo_change_mtu)(struct net_device *dev,
956 int new_mtu);
957 int (*ndo_neigh_setup)(struct net_device *dev,
958 struct neigh_parms *);
959 void (*ndo_tx_timeout) (struct net_device *dev);
960
961 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
962 struct rtnl_link_stats64 *storage);
963 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
964
965 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
966 __be16 proto, u16 vid);
967 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
968 __be16 proto, u16 vid);
969#ifdef CONFIG_NET_POLL_CONTROLLER
970 void (*ndo_poll_controller)(struct net_device *dev);
971 int (*ndo_netpoll_setup)(struct net_device *dev,
972 struct netpoll_info *info,
973 gfp_t gfp);
974 void (*ndo_netpoll_cleanup)(struct net_device *dev);
975#endif
976#ifdef CONFIG_NET_RX_BUSY_POLL
977 int (*ndo_busy_poll)(struct napi_struct *dev);
978#endif
979 int (*ndo_set_vf_mac)(struct net_device *dev,
980 int queue, u8 *mac);
981 int (*ndo_set_vf_vlan)(struct net_device *dev,
982 int queue, u16 vlan, u8 qos);
983 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
984 int vf, int rate);
985 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
986 int vf, bool setting);
987 int (*ndo_get_vf_config)(struct net_device *dev,
988 int vf,
989 struct ifla_vf_info *ivf);
990 int (*ndo_set_vf_link_state)(struct net_device *dev,
991 int vf, int link_state);
992 int (*ndo_set_vf_port)(struct net_device *dev,
993 int vf,
994 struct nlattr *port[]);
995 int (*ndo_get_vf_port)(struct net_device *dev,
996 int vf, struct sk_buff *skb);
997 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
998#if IS_ENABLED(CONFIG_FCOE)
999 int (*ndo_fcoe_enable)(struct net_device *dev);
1000 int (*ndo_fcoe_disable)(struct net_device *dev);
1001 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1002 u16 xid,
1003 struct scatterlist *sgl,
1004 unsigned int sgc);
1005 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1006 u16 xid);
1007 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1008 u16 xid,
1009 struct scatterlist *sgl,
1010 unsigned int sgc);
1011 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1012 struct netdev_fcoe_hbainfo *hbainfo);
1013#endif
1014
1015#if IS_ENABLED(CONFIG_LIBFCOE)
1016#define NETDEV_FCOE_WWNN 0
1017#define NETDEV_FCOE_WWPN 1
1018 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1019 u64 *wwn, int type);
1020#endif
1021
1022#ifdef CONFIG_RFS_ACCEL
1023 int (*ndo_rx_flow_steer)(struct net_device *dev,
1024 const struct sk_buff *skb,
1025 u16 rxq_index,
1026 u32 flow_id);
1027#endif
1028 int (*ndo_add_slave)(struct net_device *dev,
1029 struct net_device *slave_dev);
1030 int (*ndo_del_slave)(struct net_device *dev,
1031 struct net_device *slave_dev);
1032 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1033 netdev_features_t features);
1034 int (*ndo_set_features)(struct net_device *dev,
1035 netdev_features_t features);
1036 int (*ndo_neigh_construct)(struct neighbour *n);
1037 void (*ndo_neigh_destroy)(struct neighbour *n);
1038
1039 int (*ndo_fdb_add)(struct ndmsg *ndm,
1040 struct nlattr *tb[],
1041 struct net_device *dev,
1042 const unsigned char *addr,
1043 u16 flags);
1044 int (*ndo_fdb_del)(struct ndmsg *ndm,
1045 struct nlattr *tb[],
1046 struct net_device *dev,
1047 const unsigned char *addr);
1048 int (*ndo_fdb_dump)(struct sk_buff *skb,
1049 struct netlink_callback *cb,
1050 struct net_device *dev,
1051 int idx);
1052
1053 int (*ndo_bridge_setlink)(struct net_device *dev,
1054 struct nlmsghdr *nlh);
1055 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1056 u32 pid, u32 seq,
1057 struct net_device *dev,
1058 u32 filter_mask);
1059 int (*ndo_bridge_dellink)(struct net_device *dev,
1060 struct nlmsghdr *nlh);
1061 int (*ndo_change_carrier)(struct net_device *dev,
1062 bool new_carrier);
1063};
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075struct net_device {
1076
1077
1078
1079
1080
1081
1082 char name[IFNAMSIZ];
1083
1084
1085 struct hlist_node name_hlist;
1086
1087
1088 char *ifalias;
1089
1090
1091
1092
1093
1094 unsigned long mem_end;
1095 unsigned long mem_start;
1096 unsigned long base_addr;
1097 unsigned int irq;
1098
1099
1100
1101
1102
1103
1104 unsigned long state;
1105
1106 struct list_head dev_list;
1107 struct list_head napi_list;
1108 struct list_head unreg_list;
1109 struct list_head upper_dev_list;
1110
1111
1112
1113 netdev_features_t features;
1114
1115 netdev_features_t hw_features;
1116
1117 netdev_features_t wanted_features;
1118
1119 netdev_features_t vlan_features;
1120
1121
1122
1123
1124
1125 netdev_features_t hw_enc_features;
1126
1127 netdev_features_t mpls_features;
1128
1129
1130 int ifindex;
1131 int iflink;
1132
1133 struct net_device_stats stats;
1134 atomic_long_t rx_dropped;
1135
1136
1137
1138#ifdef CONFIG_WIRELESS_EXT
1139
1140
1141 const struct iw_handler_def * wireless_handlers;
1142
1143 struct iw_public_data * wireless_data;
1144#endif
1145
1146 const struct net_device_ops *netdev_ops;
1147 const struct ethtool_ops *ethtool_ops;
1148
1149
1150 const struct header_ops *header_ops;
1151
1152 unsigned int flags;
1153 unsigned int priv_flags;
1154
1155 unsigned short gflags;
1156 unsigned short padded;
1157
1158 unsigned char operstate;
1159 unsigned char link_mode;
1160
1161 unsigned char if_port;
1162 unsigned char dma;
1163
1164 unsigned int mtu;
1165 unsigned short type;
1166 unsigned short hard_header_len;
1167
1168
1169
1170
1171
1172 unsigned short needed_headroom;
1173 unsigned short needed_tailroom;
1174
1175
1176 unsigned char perm_addr[MAX_ADDR_LEN];
1177 unsigned char addr_assign_type;
1178 unsigned char addr_len;
1179 unsigned char neigh_priv_len;
1180 unsigned short dev_id;
1181
1182
1183
1184 spinlock_t addr_list_lock;
1185 struct netdev_hw_addr_list uc;
1186 struct netdev_hw_addr_list mc;
1187 struct netdev_hw_addr_list dev_addrs;
1188
1189
1190#ifdef CONFIG_SYSFS
1191 struct kset *queues_kset;
1192#endif
1193
1194 bool uc_promisc;
1195 unsigned int promiscuity;
1196 unsigned int allmulti;
1197
1198
1199
1200
1201#if IS_ENABLED(CONFIG_VLAN_8021Q)
1202 struct vlan_info __rcu *vlan_info;
1203#endif
1204#if IS_ENABLED(CONFIG_NET_DSA)
1205 struct dsa_switch_tree *dsa_ptr;
1206#endif
1207 void *atalk_ptr;
1208 struct in_device __rcu *ip_ptr;
1209 struct dn_dev __rcu *dn_ptr;
1210 struct inet6_dev __rcu *ip6_ptr;
1211 void *ax25_ptr;
1212 struct wireless_dev *ieee80211_ptr;
1213
1214
1215
1216
1217
1218 unsigned long last_rx;
1219
1220
1221
1222
1223
1224
1225
1226
1227 unsigned char *dev_addr;
1228
1229
1230
1231
1232#ifdef CONFIG_RPS
1233 struct netdev_rx_queue *_rx;
1234
1235
1236 unsigned int num_rx_queues;
1237
1238
1239 unsigned int real_num_rx_queues;
1240
1241#endif
1242
1243 rx_handler_func_t __rcu *rx_handler;
1244 void __rcu *rx_handler_data;
1245
1246 struct netdev_queue __rcu *ingress_queue;
1247 unsigned char broadcast[MAX_ADDR_LEN];
1248
1249
1250
1251
1252
1253 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1254
1255
1256 unsigned int num_tx_queues;
1257
1258
1259 unsigned int real_num_tx_queues;
1260
1261
1262 struct Qdisc *qdisc;
1263
1264 unsigned long tx_queue_len;
1265 spinlock_t tx_global_lock;
1266
1267#ifdef CONFIG_XPS
1268 struct xps_dev_maps __rcu *xps_maps;
1269#endif
1270#ifdef CONFIG_RFS_ACCEL
1271
1272
1273
1274 struct cpu_rmap *rx_cpu_rmap;
1275#endif
1276
1277
1278
1279
1280
1281
1282
1283 unsigned long trans_start;
1284
1285 int watchdog_timeo;
1286 struct timer_list watchdog_timer;
1287
1288
1289 int __percpu *pcpu_refcnt;
1290
1291
1292 struct list_head todo_list;
1293
1294 struct hlist_node index_hlist;
1295
1296 struct list_head link_watch_list;
1297
1298
1299 enum { NETREG_UNINITIALIZED=0,
1300 NETREG_REGISTERED,
1301 NETREG_UNREGISTERING,
1302 NETREG_UNREGISTERED,
1303 NETREG_RELEASED,
1304 NETREG_DUMMY,
1305 } reg_state:8;
1306
1307 bool dismantle;
1308
1309 enum {
1310 RTNL_LINK_INITIALIZED,
1311 RTNL_LINK_INITIALIZING,
1312 } rtnl_link_state:16;
1313
1314
1315 void (*destructor)(struct net_device *dev);
1316
1317#ifdef CONFIG_NETPOLL
1318 struct netpoll_info __rcu *npinfo;
1319#endif
1320
1321#ifdef CONFIG_NET_NS
1322
1323 struct net *nd_net;
1324#endif
1325
1326
1327 union {
1328 void *ml_priv;
1329 struct pcpu_lstats __percpu *lstats;
1330 struct pcpu_tstats __percpu *tstats;
1331 struct pcpu_dstats __percpu *dstats;
1332 struct pcpu_vstats __percpu *vstats;
1333 };
1334
1335 struct garp_port __rcu *garp_port;
1336
1337 struct mrp_port __rcu *mrp_port;
1338
1339
1340 struct device dev;
1341
1342 const struct attribute_group *sysfs_groups[4];
1343
1344
1345 const struct rtnl_link_ops *rtnl_link_ops;
1346
1347
1348#define GSO_MAX_SIZE 65536
1349 unsigned int gso_max_size;
1350#define GSO_MAX_SEGS 65535
1351 u16 gso_max_segs;
1352
1353#ifdef CONFIG_DCB
1354
1355 const struct dcbnl_rtnl_ops *dcbnl_ops;
1356#endif
1357 u8 num_tc;
1358 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1359 u8 prio_tc_map[TC_BITMASK + 1];
1360
1361#if IS_ENABLED(CONFIG_FCOE)
1362
1363 unsigned int fcoe_ddp_xid;
1364#endif
1365#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1366 struct netprio_map __rcu *priomap;
1367#endif
1368
1369 struct phy_device *phydev;
1370
1371 struct lock_class_key *qdisc_tx_busylock;
1372
1373
1374 int group;
1375
1376 struct pm_qos_request pm_qos_req;
1377};
1378#define to_net_dev(d) container_of(d, struct net_device, dev)
1379
1380#define NETDEV_ALIGN 32
1381
1382static inline
1383int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1384{
1385 return dev->prio_tc_map[prio & TC_BITMASK];
1386}
1387
1388static inline
1389int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1390{
1391 if (tc >= dev->num_tc)
1392 return -EINVAL;
1393
1394 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1395 return 0;
1396}
1397
1398static inline
1399void netdev_reset_tc(struct net_device *dev)
1400{
1401 dev->num_tc = 0;
1402 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1403 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1404}
1405
1406static inline
1407int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1408{
1409 if (tc >= dev->num_tc)
1410 return -EINVAL;
1411
1412 dev->tc_to_txq[tc].count = count;
1413 dev->tc_to_txq[tc].offset = offset;
1414 return 0;
1415}
1416
1417static inline
1418int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1419{
1420 if (num_tc > TC_MAX_QUEUE)
1421 return -EINVAL;
1422
1423 dev->num_tc = num_tc;
1424 return 0;
1425}
1426
1427static inline
1428int netdev_get_num_tc(struct net_device *dev)
1429{
1430 return dev->num_tc;
1431}
1432
1433static inline
1434struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1435 unsigned int index)
1436{
1437 return &dev->_tx[index];
1438}
1439
1440static inline void netdev_for_each_tx_queue(struct net_device *dev,
1441 void (*f)(struct net_device *,
1442 struct netdev_queue *,
1443 void *),
1444 void *arg)
1445{
1446 unsigned int i;
1447
1448 for (i = 0; i < dev->num_tx_queues; i++)
1449 f(dev, &dev->_tx[i], arg);
1450}
1451
1452extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1453 struct sk_buff *skb);
1454extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1455
1456
1457
1458
1459static inline
1460struct net *dev_net(const struct net_device *dev)
1461{
1462 return read_pnet(&dev->nd_net);
1463}
1464
1465static inline
1466void dev_net_set(struct net_device *dev, struct net *net)
1467{
1468#ifdef CONFIG_NET_NS
1469 release_net(dev->nd_net);
1470 dev->nd_net = hold_net(net);
1471#endif
1472}
1473
1474static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1475{
1476#ifdef CONFIG_NET_DSA_TAG_DSA
1477 if (dev->dsa_ptr != NULL)
1478 return dsa_uses_dsa_tags(dev->dsa_ptr);
1479#endif
1480
1481 return 0;
1482}
1483
1484static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1485{
1486#ifdef CONFIG_NET_DSA_TAG_TRAILER
1487 if (dev->dsa_ptr != NULL)
1488 return dsa_uses_trailer_tags(dev->dsa_ptr);
1489#endif
1490
1491 return 0;
1492}
1493
1494
1495
1496
1497
1498
1499
1500static inline void *netdev_priv(const struct net_device *dev)
1501{
1502 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1503}
1504
1505
1506
1507
1508#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1509
1510
1511
1512
1513
1514#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1515
1516
1517
1518
1519#define NAPI_POLL_WEIGHT 64
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1532 int (*poll)(struct napi_struct *, int), int weight);
1533
1534
1535
1536
1537
1538
1539
1540void netif_napi_del(struct napi_struct *napi);
1541
1542struct napi_gro_cb {
1543
1544 void *frag0;
1545
1546
1547 unsigned int frag0_len;
1548
1549
1550 int data_offset;
1551
1552
1553 int flush;
1554
1555
1556 u16 count;
1557
1558
1559 u8 same_flow;
1560
1561
1562 u8 free;
1563#define NAPI_GRO_FREE 1
1564#define NAPI_GRO_FREE_STOLEN_HEAD 2
1565
1566
1567 unsigned long age;
1568
1569
1570 int proto;
1571
1572
1573 struct sk_buff *last;
1574};
1575
1576#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1577
1578struct packet_type {
1579 __be16 type;
1580 struct net_device *dev;
1581 int (*func) (struct sk_buff *,
1582 struct net_device *,
1583 struct packet_type *,
1584 struct net_device *);
1585 bool (*id_match)(struct packet_type *ptype,
1586 struct sock *sk);
1587 void *af_packet_priv;
1588 struct list_head list;
1589};
1590
1591struct offload_callbacks {
1592 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1593 netdev_features_t features);
1594 int (*gso_send_check)(struct sk_buff *skb);
1595 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1596 struct sk_buff *skb);
1597 int (*gro_complete)(struct sk_buff *skb);
1598};
1599
1600struct packet_offload {
1601 __be16 type;
1602 struct offload_callbacks callbacks;
1603 struct list_head list;
1604};
1605
1606#include <linux/notifier.h>
1607
1608
1609
1610
1611
1612#define NETDEV_UP 0x0001
1613#define NETDEV_DOWN 0x0002
1614#define NETDEV_REBOOT 0x0003
1615
1616
1617
1618#define NETDEV_CHANGE 0x0004
1619#define NETDEV_REGISTER 0x0005
1620#define NETDEV_UNREGISTER 0x0006
1621#define NETDEV_CHANGEMTU 0x0007
1622#define NETDEV_CHANGEADDR 0x0008
1623#define NETDEV_GOING_DOWN 0x0009
1624#define NETDEV_CHANGENAME 0x000A
1625#define NETDEV_FEAT_CHANGE 0x000B
1626#define NETDEV_BONDING_FAILOVER 0x000C
1627#define NETDEV_PRE_UP 0x000D
1628#define NETDEV_PRE_TYPE_CHANGE 0x000E
1629#define NETDEV_POST_TYPE_CHANGE 0x000F
1630#define NETDEV_POST_INIT 0x0010
1631#define NETDEV_UNREGISTER_FINAL 0x0011
1632#define NETDEV_RELEASE 0x0012
1633#define NETDEV_NOTIFY_PEERS 0x0013
1634#define NETDEV_JOIN 0x0014
1635#define NETDEV_CHANGEUPPER 0x0015
1636
1637extern int register_netdevice_notifier(struct notifier_block *nb);
1638extern int unregister_netdevice_notifier(struct notifier_block *nb);
1639
1640struct netdev_notifier_info {
1641 struct net_device *dev;
1642};
1643
1644struct netdev_notifier_change_info {
1645 struct netdev_notifier_info info;
1646 unsigned int flags_changed;
1647};
1648
1649static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
1650 struct net_device *dev)
1651{
1652 info->dev = dev;
1653}
1654
1655static inline struct net_device *
1656netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
1657{
1658 return info->dev;
1659}
1660
1661extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1662 struct netdev_notifier_info *info);
1663extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1664
1665
1666extern rwlock_t dev_base_lock;
1667
1668extern seqcount_t devnet_rename_seq;
1669
1670
1671#define for_each_netdev(net, d) \
1672 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1673#define for_each_netdev_reverse(net, d) \
1674 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1675#define for_each_netdev_rcu(net, d) \
1676 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1677#define for_each_netdev_safe(net, d, n) \
1678 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1679#define for_each_netdev_continue(net, d) \
1680 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1681#define for_each_netdev_continue_rcu(net, d) \
1682 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1683#define for_each_netdev_in_bond_rcu(bond, slave) \
1684 for_each_netdev_rcu(&init_net, slave) \
1685 if (netdev_master_upper_dev_get_rcu(slave) == bond)
1686#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1687
1688static inline struct net_device *next_net_device(struct net_device *dev)
1689{
1690 struct list_head *lh;
1691 struct net *net;
1692
1693 net = dev_net(dev);
1694 lh = dev->dev_list.next;
1695 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1696}
1697
1698static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1699{
1700 struct list_head *lh;
1701 struct net *net;
1702
1703 net = dev_net(dev);
1704 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1705 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1706}
1707
1708static inline struct net_device *first_net_device(struct net *net)
1709{
1710 return list_empty(&net->dev_base_head) ? NULL :
1711 net_device_entry(net->dev_base_head.next);
1712}
1713
1714static inline struct net_device *first_net_device_rcu(struct net *net)
1715{
1716 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1717
1718 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1719}
1720
1721extern int netdev_boot_setup_check(struct net_device *dev);
1722extern unsigned long netdev_boot_base(const char *prefix, int unit);
1723extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1724 const char *hwaddr);
1725extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1726extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1727extern void dev_add_pack(struct packet_type *pt);
1728extern void dev_remove_pack(struct packet_type *pt);
1729extern void __dev_remove_pack(struct packet_type *pt);
1730extern void dev_add_offload(struct packet_offload *po);
1731extern void dev_remove_offload(struct packet_offload *po);
1732extern void __dev_remove_offload(struct packet_offload *po);
1733
1734extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1735 unsigned short mask);
1736extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1737extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1738extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1739extern int dev_alloc_name(struct net_device *dev, const char *name);
1740extern int dev_open(struct net_device *dev);
1741extern int dev_close(struct net_device *dev);
1742extern void dev_disable_lro(struct net_device *dev);
1743extern int dev_loopback_xmit(struct sk_buff *newskb);
1744extern int dev_queue_xmit(struct sk_buff *skb);
1745extern int register_netdevice(struct net_device *dev);
1746extern void unregister_netdevice_queue(struct net_device *dev,
1747 struct list_head *head);
1748extern void unregister_netdevice_many(struct list_head *head);
1749static inline void unregister_netdevice(struct net_device *dev)
1750{
1751 unregister_netdevice_queue(dev, NULL);
1752}
1753
1754extern int netdev_refcnt_read(const struct net_device *dev);
1755extern void free_netdev(struct net_device *dev);
1756extern void synchronize_net(void);
1757extern int init_dummy_netdev(struct net_device *dev);
1758
1759extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1760extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1761extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1762extern int netdev_get_name(struct net *net, char *name, int ifindex);
1763extern int dev_restart(struct net_device *dev);
1764#ifdef CONFIG_NETPOLL_TRAP
1765extern int netpoll_trap(void);
1766#endif
1767extern int skb_gro_receive(struct sk_buff **head,
1768 struct sk_buff *skb);
1769
1770static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1771{
1772 return NAPI_GRO_CB(skb)->data_offset;
1773}
1774
1775static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1776{
1777 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1778}
1779
1780static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1781{
1782 NAPI_GRO_CB(skb)->data_offset += len;
1783}
1784
1785static inline void *skb_gro_header_fast(struct sk_buff *skb,
1786 unsigned int offset)
1787{
1788 return NAPI_GRO_CB(skb)->frag0 + offset;
1789}
1790
1791static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1792{
1793 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1794}
1795
1796static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1797 unsigned int offset)
1798{
1799 if (!pskb_may_pull(skb, hlen))
1800 return NULL;
1801
1802 NAPI_GRO_CB(skb)->frag0 = NULL;
1803 NAPI_GRO_CB(skb)->frag0_len = 0;
1804 return skb->data + offset;
1805}
1806
1807static inline void *skb_gro_mac_header(struct sk_buff *skb)
1808{
1809 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1810}
1811
1812static inline void *skb_gro_network_header(struct sk_buff *skb)
1813{
1814 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1815 skb_network_offset(skb);
1816}
1817
1818static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1819 unsigned short type,
1820 const void *daddr, const void *saddr,
1821 unsigned int len)
1822{
1823 if (!dev->header_ops || !dev->header_ops->create)
1824 return 0;
1825
1826 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1827}
1828
1829static inline int dev_parse_header(const struct sk_buff *skb,
1830 unsigned char *haddr)
1831{
1832 const struct net_device *dev = skb->dev;
1833
1834 if (!dev->header_ops || !dev->header_ops->parse)
1835 return 0;
1836 return dev->header_ops->parse(skb, haddr);
1837}
1838
1839typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1840extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1841static inline int unregister_gifconf(unsigned int family)
1842{
1843 return register_gifconf(family, NULL);
1844}
1845
1846#ifdef CONFIG_NET_FLOW_LIMIT
1847#define FLOW_LIMIT_HISTORY (1 << 7)
1848struct sd_flow_limit {
1849 u64 count;
1850 unsigned int num_buckets;
1851 unsigned int history_head;
1852 u16 history[FLOW_LIMIT_HISTORY];
1853 u8 buckets[];
1854};
1855
1856extern int netdev_flow_limit_table_len;
1857#endif
1858
1859
1860
1861
1862struct softnet_data {
1863 struct Qdisc *output_queue;
1864 struct Qdisc **output_queue_tailp;
1865 struct list_head poll_list;
1866 struct sk_buff *completion_queue;
1867 struct sk_buff_head process_queue;
1868
1869
1870 unsigned int processed;
1871 unsigned int time_squeeze;
1872 unsigned int cpu_collision;
1873 unsigned int received_rps;
1874
1875#ifdef CONFIG_RPS
1876 struct softnet_data *rps_ipi_list;
1877
1878
1879 struct call_single_data csd ____cacheline_aligned_in_smp;
1880 struct softnet_data *rps_ipi_next;
1881 unsigned int cpu;
1882 unsigned int input_queue_head;
1883 unsigned int input_queue_tail;
1884#endif
1885 unsigned int dropped;
1886 struct sk_buff_head input_pkt_queue;
1887 struct napi_struct backlog;
1888
1889#ifdef CONFIG_NET_FLOW_LIMIT
1890 struct sd_flow_limit __rcu *flow_limit;
1891#endif
1892};
1893
1894static inline void input_queue_head_incr(struct softnet_data *sd)
1895{
1896#ifdef CONFIG_RPS
1897 sd->input_queue_head++;
1898#endif
1899}
1900
1901static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1902 unsigned int *qtail)
1903{
1904#ifdef CONFIG_RPS
1905 *qtail = ++sd->input_queue_tail;
1906#endif
1907}
1908
1909DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1910
1911extern void __netif_schedule(struct Qdisc *q);
1912
1913static inline void netif_schedule_queue(struct netdev_queue *txq)
1914{
1915 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1916 __netif_schedule(txq->qdisc);
1917}
1918
1919static inline void netif_tx_schedule_all(struct net_device *dev)
1920{
1921 unsigned int i;
1922
1923 for (i = 0; i < dev->num_tx_queues; i++)
1924 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1925}
1926
1927static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1928{
1929 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1930}
1931
1932
1933
1934
1935
1936
1937
1938static inline void netif_start_queue(struct net_device *dev)
1939{
1940 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1941}
1942
1943static inline void netif_tx_start_all_queues(struct net_device *dev)
1944{
1945 unsigned int i;
1946
1947 for (i = 0; i < dev->num_tx_queues; i++) {
1948 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1949 netif_tx_start_queue(txq);
1950 }
1951}
1952
1953static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1954{
1955#ifdef CONFIG_NETPOLL_TRAP
1956 if (netpoll_trap()) {
1957 netif_tx_start_queue(dev_queue);
1958 return;
1959 }
1960#endif
1961 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
1962 __netif_schedule(dev_queue->qdisc);
1963}
1964
1965
1966
1967
1968
1969
1970
1971
1972static inline void netif_wake_queue(struct net_device *dev)
1973{
1974 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1975}
1976
1977static inline void netif_tx_wake_all_queues(struct net_device *dev)
1978{
1979 unsigned int i;
1980
1981 for (i = 0; i < dev->num_tx_queues; i++) {
1982 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1983 netif_tx_wake_queue(txq);
1984 }
1985}
1986
1987static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1988{
1989 if (WARN_ON(!dev_queue)) {
1990 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1991 return;
1992 }
1993 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1994}
1995
1996
1997
1998
1999
2000
2001
2002
2003static inline void netif_stop_queue(struct net_device *dev)
2004{
2005 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2006}
2007
2008static inline void netif_tx_stop_all_queues(struct net_device *dev)
2009{
2010 unsigned int i;
2011
2012 for (i = 0; i < dev->num_tx_queues; i++) {
2013 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2014 netif_tx_stop_queue(txq);
2015 }
2016}
2017
2018static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2019{
2020 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2021}
2022
2023
2024
2025
2026
2027
2028
2029static inline bool netif_queue_stopped(const struct net_device *dev)
2030{
2031 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2032}
2033
2034static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2035{
2036 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2037}
2038
2039static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2040{
2041 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2042}
2043
2044static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2045 unsigned int bytes)
2046{
2047#ifdef CONFIG_BQL
2048 dql_queued(&dev_queue->dql, bytes);
2049
2050 if (likely(dql_avail(&dev_queue->dql) >= 0))
2051 return;
2052
2053 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2054
2055
2056
2057
2058
2059
2060 smp_mb();
2061
2062
2063 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2064 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2065#endif
2066}
2067
2068static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2069{
2070 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2071}
2072
2073static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2074 unsigned int pkts, unsigned int bytes)
2075{
2076#ifdef CONFIG_BQL
2077 if (unlikely(!bytes))
2078 return;
2079
2080 dql_completed(&dev_queue->dql, bytes);
2081
2082
2083
2084
2085
2086
2087 smp_mb();
2088
2089 if (dql_avail(&dev_queue->dql) < 0)
2090 return;
2091
2092 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2093 netif_schedule_queue(dev_queue);
2094#endif
2095}
2096
2097static inline void netdev_completed_queue(struct net_device *dev,
2098 unsigned int pkts, unsigned int bytes)
2099{
2100 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2101}
2102
2103static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2104{
2105#ifdef CONFIG_BQL
2106 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
2107 dql_reset(&q->dql);
2108#endif
2109}
2110
2111static inline void netdev_reset_queue(struct net_device *dev_queue)
2112{
2113 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
2114}
2115
2116
2117
2118
2119
2120
2121
2122static inline bool netif_running(const struct net_device *dev)
2123{
2124 return test_bit(__LINK_STATE_START, &dev->state);
2125}
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2142{
2143 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2144
2145 netif_tx_start_queue(txq);
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2156{
2157 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2158#ifdef CONFIG_NETPOLL_TRAP
2159 if (netpoll_trap())
2160 return;
2161#endif
2162 netif_tx_stop_queue(txq);
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2173 u16 queue_index)
2174{
2175 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2176
2177 return netif_tx_queue_stopped(txq);
2178}
2179
2180static inline bool netif_subqueue_stopped(const struct net_device *dev,
2181 struct sk_buff *skb)
2182{
2183 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2184}
2185
2186
2187
2188
2189
2190
2191
2192
2193static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2194{
2195 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2196#ifdef CONFIG_NETPOLL_TRAP
2197 if (netpoll_trap())
2198 return;
2199#endif
2200 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2201 __netif_schedule(txq->qdisc);
2202}
2203
2204#ifdef CONFIG_XPS
2205extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
2206 u16 index);
2207#else
2208static inline int netif_set_xps_queue(struct net_device *dev,
2209 struct cpumask *mask,
2210 u16 index)
2211{
2212 return 0;
2213}
2214#endif
2215
2216
2217
2218
2219
2220static inline u16 skb_tx_hash(const struct net_device *dev,
2221 const struct sk_buff *skb)
2222{
2223 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2224}
2225
2226
2227
2228
2229
2230
2231
2232static inline bool netif_is_multiqueue(const struct net_device *dev)
2233{
2234 return dev->num_tx_queues > 1;
2235}
2236
2237extern int netif_set_real_num_tx_queues(struct net_device *dev,
2238 unsigned int txq);
2239
2240#ifdef CONFIG_RPS
2241extern int netif_set_real_num_rx_queues(struct net_device *dev,
2242 unsigned int rxq);
2243#else
2244static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2245 unsigned int rxq)
2246{
2247 return 0;
2248}
2249#endif
2250
2251static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2252 const struct net_device *from_dev)
2253{
2254 int err;
2255
2256 err = netif_set_real_num_tx_queues(to_dev,
2257 from_dev->real_num_tx_queues);
2258 if (err)
2259 return err;
2260#ifdef CONFIG_RPS
2261 return netif_set_real_num_rx_queues(to_dev,
2262 from_dev->real_num_rx_queues);
2263#else
2264 return 0;
2265#endif
2266}
2267
2268#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2269extern int netif_get_num_default_rss_queues(void);
2270
2271
2272
2273
2274
2275extern void dev_kfree_skb_irq(struct sk_buff *skb);
2276
2277
2278
2279
2280
2281extern void dev_kfree_skb_any(struct sk_buff *skb);
2282
2283extern int netif_rx(struct sk_buff *skb);
2284extern int netif_rx_ni(struct sk_buff *skb);
2285extern int netif_receive_skb(struct sk_buff *skb);
2286extern gro_result_t napi_gro_receive(struct napi_struct *napi,
2287 struct sk_buff *skb);
2288extern void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2289extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
2290extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2291
2292static inline void napi_free_frags(struct napi_struct *napi)
2293{
2294 kfree_skb(napi->skb);
2295 napi->skb = NULL;
2296}
2297
2298extern int netdev_rx_handler_register(struct net_device *dev,
2299 rx_handler_func_t *rx_handler,
2300 void *rx_handler_data);
2301extern void netdev_rx_handler_unregister(struct net_device *dev);
2302
2303extern bool dev_valid_name(const char *name);
2304extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2305extern int dev_ethtool(struct net *net, struct ifreq *);
2306extern unsigned int dev_get_flags(const struct net_device *);
2307extern int __dev_change_flags(struct net_device *, unsigned int flags);
2308extern int dev_change_flags(struct net_device *, unsigned int);
2309extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2310extern int dev_change_name(struct net_device *, const char *);
2311extern int dev_set_alias(struct net_device *, const char *, size_t);
2312extern int dev_change_net_namespace(struct net_device *,
2313 struct net *, const char *);
2314extern int dev_set_mtu(struct net_device *, int);
2315extern void dev_set_group(struct net_device *, int);
2316extern int dev_set_mac_address(struct net_device *,
2317 struct sockaddr *);
2318extern int dev_change_carrier(struct net_device *,
2319 bool new_carrier);
2320extern int dev_hard_start_xmit(struct sk_buff *skb,
2321 struct net_device *dev,
2322 struct netdev_queue *txq);
2323extern int dev_forward_skb(struct net_device *dev,
2324 struct sk_buff *skb);
2325
2326extern int netdev_budget;
2327
2328
2329extern void netdev_run_todo(void);
2330
2331
2332
2333
2334
2335
2336
2337static inline void dev_put(struct net_device *dev)
2338{
2339 this_cpu_dec(*dev->pcpu_refcnt);
2340}
2341
2342
2343
2344
2345
2346
2347
2348static inline void dev_hold(struct net_device *dev)
2349{
2350 this_cpu_inc(*dev->pcpu_refcnt);
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362extern void linkwatch_init_dev(struct net_device *dev);
2363extern void linkwatch_fire_event(struct net_device *dev);
2364extern void linkwatch_forget_dev(struct net_device *dev);
2365
2366
2367
2368
2369
2370
2371
2372static inline bool netif_carrier_ok(const struct net_device *dev)
2373{
2374 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2375}
2376
2377extern unsigned long dev_trans_start(struct net_device *dev);
2378
2379extern void __netdev_watchdog_up(struct net_device *dev);
2380
2381extern void netif_carrier_on(struct net_device *dev);
2382
2383extern void netif_carrier_off(struct net_device *dev);
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398static inline void netif_dormant_on(struct net_device *dev)
2399{
2400 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2401 linkwatch_fire_event(dev);
2402}
2403
2404
2405
2406
2407
2408
2409
2410static inline void netif_dormant_off(struct net_device *dev)
2411{
2412 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2413 linkwatch_fire_event(dev);
2414}
2415
2416
2417
2418
2419
2420
2421
2422static inline bool netif_dormant(const struct net_device *dev)
2423{
2424 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434static inline bool netif_oper_up(const struct net_device *dev)
2435{
2436 return (dev->operstate == IF_OPER_UP ||
2437 dev->operstate == IF_OPER_UNKNOWN );
2438}
2439
2440
2441
2442
2443
2444
2445
2446static inline bool netif_device_present(struct net_device *dev)
2447{
2448 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2449}
2450
2451extern void netif_device_detach(struct net_device *dev);
2452
2453extern void netif_device_attach(struct net_device *dev);
2454
2455
2456
2457
2458
2459enum {
2460 NETIF_MSG_DRV = 0x0001,
2461 NETIF_MSG_PROBE = 0x0002,
2462 NETIF_MSG_LINK = 0x0004,
2463 NETIF_MSG_TIMER = 0x0008,
2464 NETIF_MSG_IFDOWN = 0x0010,
2465 NETIF_MSG_IFUP = 0x0020,
2466 NETIF_MSG_RX_ERR = 0x0040,
2467 NETIF_MSG_TX_ERR = 0x0080,
2468 NETIF_MSG_TX_QUEUED = 0x0100,
2469 NETIF_MSG_INTR = 0x0200,
2470 NETIF_MSG_TX_DONE = 0x0400,
2471 NETIF_MSG_RX_STATUS = 0x0800,
2472 NETIF_MSG_PKTDATA = 0x1000,
2473 NETIF_MSG_HW = 0x2000,
2474 NETIF_MSG_WOL = 0x4000,
2475};
2476
2477#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2478#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2479#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2480#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2481#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2482#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2483#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2484#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2485#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2486#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2487#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2488#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2489#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2490#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2491#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2492
2493static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2494{
2495
2496 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2497 return default_msg_enable_bits;
2498 if (debug_value == 0)
2499 return 0;
2500
2501 return (1 << debug_value) - 1;
2502}
2503
2504static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2505{
2506 spin_lock(&txq->_xmit_lock);
2507 txq->xmit_lock_owner = cpu;
2508}
2509
2510static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2511{
2512 spin_lock_bh(&txq->_xmit_lock);
2513 txq->xmit_lock_owner = smp_processor_id();
2514}
2515
2516static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2517{
2518 bool ok = spin_trylock(&txq->_xmit_lock);
2519 if (likely(ok))
2520 txq->xmit_lock_owner = smp_processor_id();
2521 return ok;
2522}
2523
2524static inline void __netif_tx_unlock(struct netdev_queue *txq)
2525{
2526 txq->xmit_lock_owner = -1;
2527 spin_unlock(&txq->_xmit_lock);
2528}
2529
2530static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2531{
2532 txq->xmit_lock_owner = -1;
2533 spin_unlock_bh(&txq->_xmit_lock);
2534}
2535
2536static inline void txq_trans_update(struct netdev_queue *txq)
2537{
2538 if (txq->xmit_lock_owner != -1)
2539 txq->trans_start = jiffies;
2540}
2541
2542
2543
2544
2545
2546
2547
2548static inline void netif_tx_lock(struct net_device *dev)
2549{
2550 unsigned int i;
2551 int cpu;
2552
2553 spin_lock(&dev->tx_global_lock);
2554 cpu = smp_processor_id();
2555 for (i = 0; i < dev->num_tx_queues; i++) {
2556 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2557
2558
2559
2560
2561
2562
2563
2564 __netif_tx_lock(txq, cpu);
2565 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2566 __netif_tx_unlock(txq);
2567 }
2568}
2569
2570static inline void netif_tx_lock_bh(struct net_device *dev)
2571{
2572 local_bh_disable();
2573 netif_tx_lock(dev);
2574}
2575
2576static inline void netif_tx_unlock(struct net_device *dev)
2577{
2578 unsigned int i;
2579
2580 for (i = 0; i < dev->num_tx_queues; i++) {
2581 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2582
2583
2584
2585
2586
2587 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2588 netif_schedule_queue(txq);
2589 }
2590 spin_unlock(&dev->tx_global_lock);
2591}
2592
2593static inline void netif_tx_unlock_bh(struct net_device *dev)
2594{
2595 netif_tx_unlock(dev);
2596 local_bh_enable();
2597}
2598
2599#define HARD_TX_LOCK(dev, txq, cpu) { \
2600 if ((dev->features & NETIF_F_LLTX) == 0) { \
2601 __netif_tx_lock(txq, cpu); \
2602 } \
2603}
2604
2605#define HARD_TX_UNLOCK(dev, txq) { \
2606 if ((dev->features & NETIF_F_LLTX) == 0) { \
2607 __netif_tx_unlock(txq); \
2608 } \
2609}
2610
2611static inline void netif_tx_disable(struct net_device *dev)
2612{
2613 unsigned int i;
2614 int cpu;
2615
2616 local_bh_disable();
2617 cpu = smp_processor_id();
2618 for (i = 0; i < dev->num_tx_queues; i++) {
2619 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2620
2621 __netif_tx_lock(txq, cpu);
2622 netif_tx_stop_queue(txq);
2623 __netif_tx_unlock(txq);
2624 }
2625 local_bh_enable();
2626}
2627
2628static inline void netif_addr_lock(struct net_device *dev)
2629{
2630 spin_lock(&dev->addr_list_lock);
2631}
2632
2633static inline void netif_addr_lock_nested(struct net_device *dev)
2634{
2635 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2636}
2637
2638static inline void netif_addr_lock_bh(struct net_device *dev)
2639{
2640 spin_lock_bh(&dev->addr_list_lock);
2641}
2642
2643static inline void netif_addr_unlock(struct net_device *dev)
2644{
2645 spin_unlock(&dev->addr_list_lock);
2646}
2647
2648static inline void netif_addr_unlock_bh(struct net_device *dev)
2649{
2650 spin_unlock_bh(&dev->addr_list_lock);
2651}
2652
2653
2654
2655
2656
2657#define for_each_dev_addr(dev, ha) \
2658 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2659
2660
2661
2662extern void ether_setup(struct net_device *dev);
2663
2664
2665extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2666 void (*setup)(struct net_device *),
2667 unsigned int txqs, unsigned int rxqs);
2668#define alloc_netdev(sizeof_priv, name, setup) \
2669 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2670
2671#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2672 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2673
2674extern int register_netdev(struct net_device *dev);
2675extern void unregister_netdev(struct net_device *dev);
2676
2677
2678extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2679 struct netdev_hw_addr_list *from_list,
2680 int addr_len, unsigned char addr_type);
2681extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2682 struct netdev_hw_addr_list *from_list,
2683 int addr_len, unsigned char addr_type);
2684extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2685 struct netdev_hw_addr_list *from_list,
2686 int addr_len);
2687extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2688 struct netdev_hw_addr_list *from_list,
2689 int addr_len);
2690extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2691extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2692
2693
2694extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
2695 unsigned char addr_type);
2696extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
2697 unsigned char addr_type);
2698extern int dev_addr_add_multiple(struct net_device *to_dev,
2699 struct net_device *from_dev,
2700 unsigned char addr_type);
2701extern int dev_addr_del_multiple(struct net_device *to_dev,
2702 struct net_device *from_dev,
2703 unsigned char addr_type);
2704extern void dev_addr_flush(struct net_device *dev);
2705extern int dev_addr_init(struct net_device *dev);
2706
2707
2708extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2709extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2710extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
2711extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2712extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
2713extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2714extern void dev_uc_flush(struct net_device *dev);
2715extern void dev_uc_init(struct net_device *dev);
2716
2717
2718extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2719extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2720extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2721extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2722extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
2723extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2724extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
2725extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2726extern void dev_mc_flush(struct net_device *dev);
2727extern void dev_mc_init(struct net_device *dev);
2728
2729
2730extern void dev_set_rx_mode(struct net_device *dev);
2731extern void __dev_set_rx_mode(struct net_device *dev);
2732extern int dev_set_promiscuity(struct net_device *dev, int inc);
2733extern int dev_set_allmulti(struct net_device *dev, int inc);
2734extern void netdev_state_change(struct net_device *dev);
2735extern void netdev_notify_peers(struct net_device *dev);
2736extern void netdev_features_change(struct net_device *dev);
2737
2738extern void dev_load(struct net *net, const char *name);
2739extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2740 struct rtnl_link_stats64 *storage);
2741extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2742 const struct net_device_stats *netdev_stats);
2743
2744extern int netdev_max_backlog;
2745extern int netdev_tstamp_prequeue;
2746extern int weight_p;
2747extern int bpf_jit_enable;
2748
2749extern bool netdev_has_upper_dev(struct net_device *dev,
2750 struct net_device *upper_dev);
2751extern bool netdev_has_any_upper_dev(struct net_device *dev);
2752extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2753extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
2754extern int netdev_upper_dev_link(struct net_device *dev,
2755 struct net_device *upper_dev);
2756extern int netdev_master_upper_dev_link(struct net_device *dev,
2757 struct net_device *upper_dev);
2758extern void netdev_upper_dev_unlink(struct net_device *dev,
2759 struct net_device *upper_dev);
2760extern int skb_checksum_help(struct sk_buff *skb);
2761extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2762 netdev_features_t features, bool tx_path);
2763extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2764 netdev_features_t features);
2765
2766static inline
2767struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
2768{
2769 return __skb_gso_segment(skb, features, true);
2770}
2771__be16 skb_network_protocol(struct sk_buff *skb);
2772
2773static inline bool can_checksum_protocol(netdev_features_t features,
2774 __be16 protocol)
2775{
2776 return ((features & NETIF_F_GEN_CSUM) ||
2777 ((features & NETIF_F_V4_CSUM) &&
2778 protocol == htons(ETH_P_IP)) ||
2779 ((features & NETIF_F_V6_CSUM) &&
2780 protocol == htons(ETH_P_IPV6)) ||
2781 ((features & NETIF_F_FCOE_CRC) &&
2782 protocol == htons(ETH_P_FCOE)));
2783}
2784
2785#ifdef CONFIG_BUG
2786extern void netdev_rx_csum_fault(struct net_device *dev);
2787#else
2788static inline void netdev_rx_csum_fault(struct net_device *dev)
2789{
2790}
2791#endif
2792
2793extern void net_enable_timestamp(void);
2794extern void net_disable_timestamp(void);
2795
2796#ifdef CONFIG_PROC_FS
2797extern int __init dev_proc_init(void);
2798#else
2799#define dev_proc_init() 0
2800#endif
2801
2802extern int netdev_class_create_file(struct class_attribute *class_attr);
2803extern void netdev_class_remove_file(struct class_attribute *class_attr);
2804
2805extern struct kobj_ns_type_operations net_ns_type_operations;
2806
2807extern const char *netdev_drivername(const struct net_device *dev);
2808
2809extern void linkwatch_run_queue(void);
2810
2811static inline netdev_features_t netdev_get_wanted_features(
2812 struct net_device *dev)
2813{
2814 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2815}
2816netdev_features_t netdev_increment_features(netdev_features_t all,
2817 netdev_features_t one, netdev_features_t mask);
2818
2819
2820
2821
2822
2823static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
2824 netdev_features_t mask)
2825{
2826 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
2827}
2828
2829int __netdev_update_features(struct net_device *dev);
2830void netdev_update_features(struct net_device *dev);
2831void netdev_change_features(struct net_device *dev);
2832
2833void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2834 struct net_device *dev);
2835
2836netdev_features_t netif_skb_features(struct sk_buff *skb);
2837
2838static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2839{
2840 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2841
2842
2843 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2844 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2845 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2846 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2847 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2848 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2849
2850 return (features & feature) == feature;
2851}
2852
2853static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2854{
2855 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2856 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2857}
2858
2859static inline bool netif_needs_gso(struct sk_buff *skb,
2860 netdev_features_t features)
2861{
2862 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2863 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2864 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
2865}
2866
2867static inline void netif_set_gso_max_size(struct net_device *dev,
2868 unsigned int size)
2869{
2870 dev->gso_max_size = size;
2871}
2872
2873static inline bool netif_is_bond_master(struct net_device *dev)
2874{
2875 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
2876}
2877
2878static inline bool netif_is_bond_slave(struct net_device *dev)
2879{
2880 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2881}
2882
2883static inline bool netif_supports_nofcs(struct net_device *dev)
2884{
2885 return dev->priv_flags & IFF_SUPP_NOFCS;
2886}
2887
2888extern struct pernet_operations __net_initdata loopback_net_ops;
2889
2890
2891
2892
2893
2894static inline const char *netdev_name(const struct net_device *dev)
2895{
2896 if (dev->reg_state != NETREG_REGISTERED)
2897 return "(unregistered net_device)";
2898 return dev->name;
2899}
2900
2901extern __printf(3, 4)
2902int netdev_printk(const char *level, const struct net_device *dev,
2903 const char *format, ...);
2904extern __printf(2, 3)
2905int netdev_emerg(const struct net_device *dev, const char *format, ...);
2906extern __printf(2, 3)
2907int netdev_alert(const struct net_device *dev, const char *format, ...);
2908extern __printf(2, 3)
2909int netdev_crit(const struct net_device *dev, const char *format, ...);
2910extern __printf(2, 3)
2911int netdev_err(const struct net_device *dev, const char *format, ...);
2912extern __printf(2, 3)
2913int netdev_warn(const struct net_device *dev, const char *format, ...);
2914extern __printf(2, 3)
2915int netdev_notice(const struct net_device *dev, const char *format, ...);
2916extern __printf(2, 3)
2917int netdev_info(const struct net_device *dev, const char *format, ...);
2918
2919#define MODULE_ALIAS_NETDEV(device) \
2920 MODULE_ALIAS("netdev-" device)
2921
2922#if defined(CONFIG_DYNAMIC_DEBUG)
2923#define netdev_dbg(__dev, format, args...) \
2924do { \
2925 dynamic_netdev_dbg(__dev, format, ##args); \
2926} while (0)
2927#elif defined(DEBUG)
2928#define netdev_dbg(__dev, format, args...) \
2929 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2930#else
2931#define netdev_dbg(__dev, format, args...) \
2932({ \
2933 if (0) \
2934 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2935 0; \
2936})
2937#endif
2938
2939#if defined(VERBOSE_DEBUG)
2940#define netdev_vdbg netdev_dbg
2941#else
2942
2943#define netdev_vdbg(dev, format, args...) \
2944({ \
2945 if (0) \
2946 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2947 0; \
2948})
2949#endif
2950
2951
2952
2953
2954
2955
2956#define netdev_WARN(dev, format, args...) \
2957 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2958
2959
2960
2961#define netif_printk(priv, type, level, dev, fmt, args...) \
2962do { \
2963 if (netif_msg_##type(priv)) \
2964 netdev_printk(level, (dev), fmt, ##args); \
2965} while (0)
2966
2967#define netif_level(level, priv, type, dev, fmt, args...) \
2968do { \
2969 if (netif_msg_##type(priv)) \
2970 netdev_##level(dev, fmt, ##args); \
2971} while (0)
2972
2973#define netif_emerg(priv, type, dev, fmt, args...) \
2974 netif_level(emerg, priv, type, dev, fmt, ##args)
2975#define netif_alert(priv, type, dev, fmt, args...) \
2976 netif_level(alert, priv, type, dev, fmt, ##args)
2977#define netif_crit(priv, type, dev, fmt, args...) \
2978 netif_level(crit, priv, type, dev, fmt, ##args)
2979#define netif_err(priv, type, dev, fmt, args...) \
2980 netif_level(err, priv, type, dev, fmt, ##args)
2981#define netif_warn(priv, type, dev, fmt, args...) \
2982 netif_level(warn, priv, type, dev, fmt, ##args)
2983#define netif_notice(priv, type, dev, fmt, args...) \
2984 netif_level(notice, priv, type, dev, fmt, ##args)
2985#define netif_info(priv, type, dev, fmt, args...) \
2986 netif_level(info, priv, type, dev, fmt, ##args)
2987
2988#if defined(CONFIG_DYNAMIC_DEBUG)
2989#define netif_dbg(priv, type, netdev, format, args...) \
2990do { \
2991 if (netif_msg_##type(priv)) \
2992 dynamic_netdev_dbg(netdev, format, ##args); \
2993} while (0)
2994#elif defined(DEBUG)
2995#define netif_dbg(priv, type, dev, format, args...) \
2996 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2997#else
2998#define netif_dbg(priv, type, dev, format, args...) \
2999({ \
3000 if (0) \
3001 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3002 0; \
3003})
3004#endif
3005
3006#if defined(VERBOSE_DEBUG)
3007#define netif_vdbg netif_dbg
3008#else
3009#define netif_vdbg(priv, type, dev, format, args...) \
3010({ \
3011 if (0) \
3012 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3013 0; \
3014})
3015#endif
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044#define PTYPE_HASH_SIZE (16)
3045#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3046
3047#endif
3048