1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31#include <linux/if_link.h>
32
33#ifdef __KERNEL__
34#include <linux/pm_qos_params.h>
35#include <linux/timer.h>
36#include <linux/delay.h>
37#include <linux/mm.h>
38#include <asm/atomic.h>
39#include <asm/cache.h>
40#include <asm/byteorder.h>
41
42#include <linux/device.h>
43#include <linux/percpu.h>
44#include <linux/rculist.h>
45#include <linux/dmaengine.h>
46#include <linux/workqueue.h>
47
48#include <linux/ethtool.h>
49#include <net/net_namespace.h>
50#include <net/dsa.h>
51#ifdef CONFIG_DCB
52#include <net/dcbnl.h>
53#endif
54
55struct vlan_group;
56struct netpoll_info;
57struct phy_device;
58
59struct wireless_dev;
60
61#define SET_ETHTOOL_OPS(netdev,ops) \
62 ( (netdev)->ethtool_ops = (ops) )
63
64#define HAVE_ALLOC_NETDEV
65
66#define HAVE_FREE_NETDEV
67#define HAVE_NETDEV_PRIV
68
69
70#define NET_ADDR_PERM 0
71#define NET_ADDR_RANDOM 1
72#define NET_ADDR_STOLEN 2
73
74
75#define NET_RX_SUCCESS 0
76#define NET_RX_DROP 1
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96#define NET_XMIT_SUCCESS 0x00
97#define NET_XMIT_DROP 0x01
98#define NET_XMIT_CN 0x02
99#define NET_XMIT_POLICED 0x03
100#define NET_XMIT_MASK 0x0f
101
102
103
104
105#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
106#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
107
108
109#define NETDEV_TX_MASK 0xf0
110
111enum netdev_tx {
112 __NETDEV_TX_MIN = INT_MIN,
113 NETDEV_TX_OK = 0x00,
114 NETDEV_TX_BUSY = 0x10,
115 NETDEV_TX_LOCKED = 0x20,
116};
117typedef enum netdev_tx netdev_tx_t;
118
119
120
121
122
123static inline bool dev_xmit_complete(int rc)
124{
125
126
127
128
129
130
131 if (likely(rc < NET_XMIT_MASK))
132 return true;
133
134 return false;
135}
136
137#endif
138
139#define MAX_ADDR_LEN 32
140
141
142#define INIT_NETDEV_GROUP 0
143
144#ifdef __KERNEL__
145
146
147
148
149
150#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
151# if defined(CONFIG_MAC80211_MESH)
152# define LL_MAX_HEADER 128
153# else
154# define LL_MAX_HEADER 96
155# endif
156#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
157# define LL_MAX_HEADER 48
158#else
159# define LL_MAX_HEADER 32
160#endif
161
162#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
163 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
164 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
165 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
166#define MAX_HEADER LL_MAX_HEADER
167#else
168#define MAX_HEADER (LL_MAX_HEADER + 48)
169#endif
170
171
172
173
174
175
176struct net_device_stats {
177 unsigned long rx_packets;
178 unsigned long tx_packets;
179 unsigned long rx_bytes;
180 unsigned long tx_bytes;
181 unsigned long rx_errors;
182 unsigned long tx_errors;
183 unsigned long rx_dropped;
184 unsigned long tx_dropped;
185 unsigned long multicast;
186 unsigned long collisions;
187 unsigned long rx_length_errors;
188 unsigned long rx_over_errors;
189 unsigned long rx_crc_errors;
190 unsigned long rx_frame_errors;
191 unsigned long rx_fifo_errors;
192 unsigned long rx_missed_errors;
193 unsigned long tx_aborted_errors;
194 unsigned long tx_carrier_errors;
195 unsigned long tx_fifo_errors;
196 unsigned long tx_heartbeat_errors;
197 unsigned long tx_window_errors;
198 unsigned long rx_compressed;
199 unsigned long tx_compressed;
200};
201
202#endif
203
204
205
206enum {
207 IF_PORT_UNKNOWN = 0,
208 IF_PORT_10BASE2,
209 IF_PORT_10BASET,
210 IF_PORT_AUI,
211 IF_PORT_100BASET,
212 IF_PORT_100BASETX,
213 IF_PORT_100BASEFX
214};
215
216#ifdef __KERNEL__
217
218#include <linux/cache.h>
219#include <linux/skbuff.h>
220
221struct neighbour;
222struct neigh_parms;
223struct sk_buff;
224
225struct netdev_hw_addr {
226 struct list_head list;
227 unsigned char addr[MAX_ADDR_LEN];
228 unsigned char type;
229#define NETDEV_HW_ADDR_T_LAN 1
230#define NETDEV_HW_ADDR_T_SAN 2
231#define NETDEV_HW_ADDR_T_SLAVE 3
232#define NETDEV_HW_ADDR_T_UNICAST 4
233#define NETDEV_HW_ADDR_T_MULTICAST 5
234 bool synced;
235 bool global_use;
236 int refcount;
237 struct rcu_head rcu_head;
238};
239
240struct netdev_hw_addr_list {
241 struct list_head list;
242 int count;
243};
244
245#define netdev_hw_addr_list_count(l) ((l)->count)
246#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
247#define netdev_hw_addr_list_for_each(ha, l) \
248 list_for_each_entry(ha, &(l)->list, list)
249
250#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
251#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
252#define netdev_for_each_uc_addr(ha, dev) \
253 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
254
255#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
256#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
257#define netdev_for_each_mc_addr(ha, dev) \
258 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
259
260struct hh_cache {
261 struct hh_cache *hh_next;
262 atomic_t hh_refcnt;
263
264
265
266
267
268
269 __be16 hh_type ____cacheline_aligned_in_smp;
270
271
272
273
274 u16 hh_len;
275 int (*hh_output)(struct sk_buff *skb);
276 seqlock_t hh_lock;
277
278
279#define HH_DATA_MOD 16
280#define HH_DATA_OFF(__len) \
281 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
282#define HH_DATA_ALIGN(__len) \
283 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
284 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
285};
286
287static inline void hh_cache_put(struct hh_cache *hh)
288{
289 if (atomic_dec_and_test(&hh->hh_refcnt))
290 kfree(hh);
291}
292
293
294
295
296
297
298
299
300
301
302
303
304#define LL_RESERVED_SPACE(dev) \
305 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
306#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
307 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
308#define LL_ALLOCATED_SPACE(dev) \
309 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
310
311struct header_ops {
312 int (*create) (struct sk_buff *skb, struct net_device *dev,
313 unsigned short type, const void *daddr,
314 const void *saddr, unsigned len);
315 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
316 int (*rebuild)(struct sk_buff *skb);
317#define HAVE_HEADER_CACHE
318 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
319 void (*cache_update)(struct hh_cache *hh,
320 const struct net_device *dev,
321 const unsigned char *haddr);
322};
323
324
325
326
327
328
329enum netdev_state_t {
330 __LINK_STATE_START,
331 __LINK_STATE_PRESENT,
332 __LINK_STATE_NOCARRIER,
333 __LINK_STATE_LINKWATCH_PENDING,
334 __LINK_STATE_DORMANT,
335};
336
337
338
339
340
341
342struct netdev_boot_setup {
343 char name[IFNAMSIZ];
344 struct ifmap map;
345};
346#define NETDEV_BOOT_SETUP_MAX 8
347
348extern int __init netdev_boot_setup(char *str);
349
350
351
352
353struct napi_struct {
354
355
356
357
358
359
360 struct list_head poll_list;
361
362 unsigned long state;
363 int weight;
364 int (*poll)(struct napi_struct *, int);
365#ifdef CONFIG_NETPOLL
366 spinlock_t poll_lock;
367 int poll_owner;
368#endif
369
370 unsigned int gro_count;
371
372 struct net_device *dev;
373 struct list_head dev_list;
374 struct sk_buff *gro_list;
375 struct sk_buff *skb;
376};
377
378enum {
379 NAPI_STATE_SCHED,
380 NAPI_STATE_DISABLE,
381 NAPI_STATE_NPSVC,
382};
383
384enum gro_result {
385 GRO_MERGED,
386 GRO_MERGED_FREE,
387 GRO_HELD,
388 GRO_NORMAL,
389 GRO_DROP,
390};
391typedef enum gro_result gro_result_t;
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434enum rx_handler_result {
435 RX_HANDLER_CONSUMED,
436 RX_HANDLER_ANOTHER,
437 RX_HANDLER_EXACT,
438 RX_HANDLER_PASS,
439};
440typedef enum rx_handler_result rx_handler_result_t;
441typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
442
443extern void __napi_schedule(struct napi_struct *n);
444
445static inline int napi_disable_pending(struct napi_struct *n)
446{
447 return test_bit(NAPI_STATE_DISABLE, &n->state);
448}
449
450
451
452
453
454
455
456
457
458
459static inline int napi_schedule_prep(struct napi_struct *n)
460{
461 return !napi_disable_pending(n) &&
462 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
463}
464
465
466
467
468
469
470
471
472static inline void napi_schedule(struct napi_struct *n)
473{
474 if (napi_schedule_prep(n))
475 __napi_schedule(n);
476}
477
478
479static inline int napi_reschedule(struct napi_struct *napi)
480{
481 if (napi_schedule_prep(napi)) {
482 __napi_schedule(napi);
483 return 1;
484 }
485 return 0;
486}
487
488
489
490
491
492
493
494extern void __napi_complete(struct napi_struct *n);
495extern void napi_complete(struct napi_struct *n);
496
497
498
499
500
501
502
503
504static inline void napi_disable(struct napi_struct *n)
505{
506 set_bit(NAPI_STATE_DISABLE, &n->state);
507 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
508 msleep(1);
509 clear_bit(NAPI_STATE_DISABLE, &n->state);
510}
511
512
513
514
515
516
517
518
519static inline void napi_enable(struct napi_struct *n)
520{
521 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
522 smp_mb__before_clear_bit();
523 clear_bit(NAPI_STATE_SCHED, &n->state);
524}
525
526#ifdef CONFIG_SMP
527
528
529
530
531
532
533
534
535static inline void napi_synchronize(const struct napi_struct *n)
536{
537 while (test_bit(NAPI_STATE_SCHED, &n->state))
538 msleep(1);
539}
540#else
541# define napi_synchronize(n) barrier()
542#endif
543
544enum netdev_queue_state_t {
545 __QUEUE_STATE_XOFF,
546 __QUEUE_STATE_FROZEN,
547#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
548 (1 << __QUEUE_STATE_FROZEN))
549};
550
551struct netdev_queue {
552
553
554
555 struct net_device *dev;
556 struct Qdisc *qdisc;
557 unsigned long state;
558 struct Qdisc *qdisc_sleeping;
559#ifdef CONFIG_RPS
560 struct kobject kobj;
561#endif
562#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
563 int numa_node;
564#endif
565
566
567
568 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
569 int xmit_lock_owner;
570
571
572
573 unsigned long trans_start;
574} ____cacheline_aligned_in_smp;
575
576static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
577{
578#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
579 return q->numa_node;
580#else
581 return NUMA_NO_NODE;
582#endif
583}
584
585static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
586{
587#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
588 q->numa_node = node;
589#endif
590}
591
592#ifdef CONFIG_RPS
593
594
595
596
597struct rps_map {
598 unsigned int len;
599 struct rcu_head rcu;
600 u16 cpus[0];
601};
602#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
603
604
605
606
607
608
609struct rps_dev_flow {
610 u16 cpu;
611 u16 filter;
612 unsigned int last_qtail;
613};
614#define RPS_NO_FILTER 0xffff
615
616
617
618
619struct rps_dev_flow_table {
620 unsigned int mask;
621 struct rcu_head rcu;
622 struct work_struct free_work;
623 struct rps_dev_flow flows[0];
624};
625#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
626 (_num * sizeof(struct rps_dev_flow)))
627
628
629
630
631
632struct rps_sock_flow_table {
633 unsigned int mask;
634 u16 ents[0];
635};
636#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
637 (_num * sizeof(u16)))
638
639#define RPS_NO_CPU 0xffff
640
641static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
642 u32 hash)
643{
644 if (table && hash) {
645 unsigned int cpu, index = hash & table->mask;
646
647
648 cpu = raw_smp_processor_id();
649
650 if (table->ents[index] != cpu)
651 table->ents[index] = cpu;
652 }
653}
654
655static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
656 u32 hash)
657{
658 if (table && hash)
659 table->ents[hash & table->mask] = RPS_NO_CPU;
660}
661
662extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
663
664#ifdef CONFIG_RFS_ACCEL
665extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
666 u32 flow_id, u16 filter_id);
667#endif
668
669
670struct netdev_rx_queue {
671 struct rps_map __rcu *rps_map;
672 struct rps_dev_flow_table __rcu *rps_flow_table;
673 struct kobject kobj;
674 struct net_device *dev;
675} ____cacheline_aligned_in_smp;
676#endif
677
678#ifdef CONFIG_XPS
679
680
681
682
683struct xps_map {
684 unsigned int len;
685 unsigned int alloc_len;
686 struct rcu_head rcu;
687 u16 queues[0];
688};
689#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
690#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
691 / sizeof(u16))
692
693
694
695
696struct xps_dev_maps {
697 struct rcu_head rcu;
698 struct xps_map __rcu *cpu_map[0];
699};
700#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
701 (nr_cpu_ids * sizeof(struct xps_map *)))
702#endif
703
704#define TC_MAX_QUEUE 16
705#define TC_BITMASK 15
706
707struct netdev_tc_txq {
708 u16 count;
709 u16 offset;
710};
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891#define HAVE_NET_DEVICE_OPS
892struct net_device_ops {
893 int (*ndo_init)(struct net_device *dev);
894 void (*ndo_uninit)(struct net_device *dev);
895 int (*ndo_open)(struct net_device *dev);
896 int (*ndo_stop)(struct net_device *dev);
897 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
898 struct net_device *dev);
899 u16 (*ndo_select_queue)(struct net_device *dev,
900 struct sk_buff *skb);
901 void (*ndo_change_rx_flags)(struct net_device *dev,
902 int flags);
903 void (*ndo_set_rx_mode)(struct net_device *dev);
904 void (*ndo_set_multicast_list)(struct net_device *dev);
905 int (*ndo_set_mac_address)(struct net_device *dev,
906 void *addr);
907 int (*ndo_validate_addr)(struct net_device *dev);
908 int (*ndo_do_ioctl)(struct net_device *dev,
909 struct ifreq *ifr, int cmd);
910 int (*ndo_set_config)(struct net_device *dev,
911 struct ifmap *map);
912 int (*ndo_change_mtu)(struct net_device *dev,
913 int new_mtu);
914 int (*ndo_neigh_setup)(struct net_device *dev,
915 struct neigh_parms *);
916 void (*ndo_tx_timeout) (struct net_device *dev);
917
918 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
919 struct rtnl_link_stats64 *storage);
920 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
921
922 void (*ndo_vlan_rx_register)(struct net_device *dev,
923 struct vlan_group *grp);
924 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
925 unsigned short vid);
926 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
927 unsigned short vid);
928#ifdef CONFIG_NET_POLL_CONTROLLER
929 void (*ndo_poll_controller)(struct net_device *dev);
930 int (*ndo_netpoll_setup)(struct net_device *dev,
931 struct netpoll_info *info);
932 void (*ndo_netpoll_cleanup)(struct net_device *dev);
933#endif
934 int (*ndo_set_vf_mac)(struct net_device *dev,
935 int queue, u8 *mac);
936 int (*ndo_set_vf_vlan)(struct net_device *dev,
937 int queue, u16 vlan, u8 qos);
938 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
939 int vf, int rate);
940 int (*ndo_get_vf_config)(struct net_device *dev,
941 int vf,
942 struct ifla_vf_info *ivf);
943 int (*ndo_set_vf_port)(struct net_device *dev,
944 int vf,
945 struct nlattr *port[]);
946 int (*ndo_get_vf_port)(struct net_device *dev,
947 int vf, struct sk_buff *skb);
948 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
949#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
950 int (*ndo_fcoe_enable)(struct net_device *dev);
951 int (*ndo_fcoe_disable)(struct net_device *dev);
952 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
953 u16 xid,
954 struct scatterlist *sgl,
955 unsigned int sgc);
956 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
957 u16 xid);
958 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
959 u16 xid,
960 struct scatterlist *sgl,
961 unsigned int sgc);
962#define NETDEV_FCOE_WWNN 0
963#define NETDEV_FCOE_WWPN 1
964 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
965 u64 *wwn, int type);
966#endif
967#ifdef CONFIG_RFS_ACCEL
968 int (*ndo_rx_flow_steer)(struct net_device *dev,
969 const struct sk_buff *skb,
970 u16 rxq_index,
971 u32 flow_id);
972#endif
973 int (*ndo_add_slave)(struct net_device *dev,
974 struct net_device *slave_dev);
975 int (*ndo_del_slave)(struct net_device *dev,
976 struct net_device *slave_dev);
977 u32 (*ndo_fix_features)(struct net_device *dev,
978 u32 features);
979 int (*ndo_set_features)(struct net_device *dev,
980 u32 features);
981};
982
983
984
985
986
987
988
989
990
991
992
993struct net_device {
994
995
996
997
998
999
1000 char name[IFNAMSIZ];
1001
1002 struct pm_qos_request_list pm_qos_req;
1003
1004
1005 struct hlist_node name_hlist;
1006
1007 char *ifalias;
1008
1009
1010
1011
1012
1013 unsigned long mem_end;
1014 unsigned long mem_start;
1015 unsigned long base_addr;
1016 unsigned int irq;
1017
1018
1019
1020
1021
1022
1023 unsigned char if_port;
1024 unsigned char dma;
1025
1026 unsigned long state;
1027
1028 struct list_head dev_list;
1029 struct list_head napi_list;
1030 struct list_head unreg_list;
1031
1032
1033 u32 features;
1034
1035 u32 hw_features;
1036
1037 u32 wanted_features;
1038
1039 u32 vlan_features;
1040
1041
1042
1043
1044#define NETIF_F_SG 1
1045#define NETIF_F_IP_CSUM 2
1046#define NETIF_F_NO_CSUM 4
1047#define NETIF_F_HW_CSUM 8
1048#define NETIF_F_IPV6_CSUM 16
1049#define NETIF_F_HIGHDMA 32
1050#define NETIF_F_FRAGLIST 64
1051#define NETIF_F_HW_VLAN_TX 128
1052#define NETIF_F_HW_VLAN_RX 256
1053#define NETIF_F_HW_VLAN_FILTER 512
1054#define NETIF_F_VLAN_CHALLENGED 1024
1055#define NETIF_F_GSO 2048
1056#define NETIF_F_LLTX 4096
1057
1058#define NETIF_F_NETNS_LOCAL 8192
1059#define NETIF_F_GRO 16384
1060#define NETIF_F_LRO 32768
1061
1062
1063#define NETIF_F_FCOE_CRC (1 << 24)
1064#define NETIF_F_SCTP_CSUM (1 << 25)
1065#define NETIF_F_FCOE_MTU (1 << 26)
1066#define NETIF_F_NTUPLE (1 << 27)
1067#define NETIF_F_RXHASH (1 << 28)
1068#define NETIF_F_RXCSUM (1 << 29)
1069
1070
1071#define NETIF_F_GSO_SHIFT 16
1072#define NETIF_F_GSO_MASK 0x00ff0000
1073#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
1074#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
1075#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
1076#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
1077#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
1078#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
1079
1080
1081
1082#define NETIF_F_NEVER_CHANGE (NETIF_F_HIGHDMA | NETIF_F_VLAN_CHALLENGED | \
1083 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
1084#define NETIF_F_ETHTOOL_BITS (0x3f3fffff & ~NETIF_F_NEVER_CHANGE)
1085
1086
1087#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
1088 NETIF_F_TSO6 | NETIF_F_UFO)
1089
1090
1091#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
1092#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
1093#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
1094#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
1095
1096#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1097
1098#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \
1099 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
1100 NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC)
1101
1102
1103
1104
1105
1106#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
1107 NETIF_F_SG | NETIF_F_HIGHDMA | \
1108 NETIF_F_FRAGLIST)
1109
1110
1111#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
1112
1113
1114 int ifindex;
1115 int iflink;
1116
1117 struct net_device_stats stats;
1118 atomic_long_t rx_dropped;
1119
1120
1121
1122#ifdef CONFIG_WIRELESS_EXT
1123
1124
1125 const struct iw_handler_def * wireless_handlers;
1126
1127 struct iw_public_data * wireless_data;
1128#endif
1129
1130 const struct net_device_ops *netdev_ops;
1131 const struct ethtool_ops *ethtool_ops;
1132
1133
1134 const struct header_ops *header_ops;
1135
1136 unsigned int flags;
1137 unsigned short gflags;
1138 unsigned int priv_flags;
1139 unsigned short padded;
1140
1141 unsigned char operstate;
1142 unsigned char link_mode;
1143
1144 unsigned int mtu;
1145 unsigned short type;
1146 unsigned short hard_header_len;
1147
1148
1149
1150
1151
1152 unsigned short needed_headroom;
1153 unsigned short needed_tailroom;
1154
1155
1156 unsigned char perm_addr[MAX_ADDR_LEN];
1157 unsigned char addr_assign_type;
1158 unsigned char addr_len;
1159 unsigned short dev_id;
1160
1161 spinlock_t addr_list_lock;
1162 struct netdev_hw_addr_list uc;
1163 struct netdev_hw_addr_list mc;
1164 int uc_promisc;
1165 unsigned int promiscuity;
1166 unsigned int allmulti;
1167
1168
1169
1170
1171#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1172 struct vlan_group __rcu *vlgrp;
1173#endif
1174#ifdef CONFIG_NET_DSA
1175 void *dsa_ptr;
1176#endif
1177 void *atalk_ptr;
1178 struct in_device __rcu *ip_ptr;
1179 struct dn_dev __rcu *dn_ptr;
1180 struct inet6_dev __rcu *ip6_ptr;
1181 void *ec_ptr;
1182 void *ax25_ptr;
1183 struct wireless_dev *ieee80211_ptr;
1184
1185
1186
1187
1188
1189 unsigned long last_rx;
1190
1191
1192
1193
1194
1195
1196
1197 struct net_device *master;
1198
1199
1200
1201
1202 unsigned char *dev_addr;
1203
1204
1205
1206 struct netdev_hw_addr_list dev_addrs;
1207
1208
1209 unsigned char broadcast[MAX_ADDR_LEN];
1210
1211#ifdef CONFIG_RPS
1212 struct kset *queues_kset;
1213
1214 struct netdev_rx_queue *_rx;
1215
1216
1217 unsigned int num_rx_queues;
1218
1219
1220 unsigned int real_num_rx_queues;
1221
1222#ifdef CONFIG_RFS_ACCEL
1223
1224
1225
1226 struct cpu_rmap *rx_cpu_rmap;
1227#endif
1228#endif
1229
1230 rx_handler_func_t __rcu *rx_handler;
1231 void __rcu *rx_handler_data;
1232
1233 struct netdev_queue __rcu *ingress_queue;
1234
1235
1236
1237
1238 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1239
1240
1241 unsigned int num_tx_queues;
1242
1243
1244 unsigned int real_num_tx_queues;
1245
1246
1247 struct Qdisc *qdisc;
1248
1249 unsigned long tx_queue_len;
1250 spinlock_t tx_global_lock;
1251
1252#ifdef CONFIG_XPS
1253 struct xps_dev_maps __rcu *xps_maps;
1254#endif
1255
1256
1257
1258
1259
1260
1261
1262 unsigned long trans_start;
1263
1264 int watchdog_timeo;
1265 struct timer_list watchdog_timer;
1266
1267
1268 int __percpu *pcpu_refcnt;
1269
1270
1271 struct list_head todo_list;
1272
1273 struct hlist_node index_hlist;
1274
1275 struct list_head link_watch_list;
1276
1277
1278 enum { NETREG_UNINITIALIZED=0,
1279 NETREG_REGISTERED,
1280 NETREG_UNREGISTERING,
1281 NETREG_UNREGISTERED,
1282 NETREG_RELEASED,
1283 NETREG_DUMMY,
1284 } reg_state:16;
1285
1286 enum {
1287 RTNL_LINK_INITIALIZED,
1288 RTNL_LINK_INITIALIZING,
1289 } rtnl_link_state:16;
1290
1291
1292 void (*destructor)(struct net_device *dev);
1293
1294#ifdef CONFIG_NETPOLL
1295 struct netpoll_info *npinfo;
1296#endif
1297
1298#ifdef CONFIG_NET_NS
1299
1300 struct net *nd_net;
1301#endif
1302
1303
1304 union {
1305 void *ml_priv;
1306 struct pcpu_lstats __percpu *lstats;
1307 struct pcpu_tstats __percpu *tstats;
1308 struct pcpu_dstats __percpu *dstats;
1309 };
1310
1311 struct garp_port __rcu *garp_port;
1312
1313
1314 struct device dev;
1315
1316 const struct attribute_group *sysfs_groups[4];
1317
1318
1319 const struct rtnl_link_ops *rtnl_link_ops;
1320
1321
1322#define GSO_MAX_SIZE 65536
1323 unsigned int gso_max_size;
1324
1325#ifdef CONFIG_DCB
1326
1327 const struct dcbnl_rtnl_ops *dcbnl_ops;
1328#endif
1329 u8 num_tc;
1330 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1331 u8 prio_tc_map[TC_BITMASK + 1];
1332
1333#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
1334
1335 unsigned int fcoe_ddp_xid;
1336#endif
1337
1338 struct ethtool_rx_ntuple_list ethtool_ntuple_list;
1339
1340
1341 struct phy_device *phydev;
1342
1343
1344 int group;
1345};
1346#define to_net_dev(d) container_of(d, struct net_device, dev)
1347
1348#define NETDEV_ALIGN 32
1349
1350static inline
1351int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1352{
1353 return dev->prio_tc_map[prio & TC_BITMASK];
1354}
1355
1356static inline
1357int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1358{
1359 if (tc >= dev->num_tc)
1360 return -EINVAL;
1361
1362 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1363 return 0;
1364}
1365
1366static inline
1367void netdev_reset_tc(struct net_device *dev)
1368{
1369 dev->num_tc = 0;
1370 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1371 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1372}
1373
1374static inline
1375int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1376{
1377 if (tc >= dev->num_tc)
1378 return -EINVAL;
1379
1380 dev->tc_to_txq[tc].count = count;
1381 dev->tc_to_txq[tc].offset = offset;
1382 return 0;
1383}
1384
1385static inline
1386int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1387{
1388 if (num_tc > TC_MAX_QUEUE)
1389 return -EINVAL;
1390
1391 dev->num_tc = num_tc;
1392 return 0;
1393}
1394
1395static inline
1396int netdev_get_num_tc(struct net_device *dev)
1397{
1398 return dev->num_tc;
1399}
1400
1401static inline
1402struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1403 unsigned int index)
1404{
1405 return &dev->_tx[index];
1406}
1407
1408static inline void netdev_for_each_tx_queue(struct net_device *dev,
1409 void (*f)(struct net_device *,
1410 struct netdev_queue *,
1411 void *),
1412 void *arg)
1413{
1414 unsigned int i;
1415
1416 for (i = 0; i < dev->num_tx_queues; i++)
1417 f(dev, &dev->_tx[i], arg);
1418}
1419
1420
1421
1422
1423static inline
1424struct net *dev_net(const struct net_device *dev)
1425{
1426 return read_pnet(&dev->nd_net);
1427}
1428
1429static inline
1430void dev_net_set(struct net_device *dev, struct net *net)
1431{
1432#ifdef CONFIG_NET_NS
1433 release_net(dev->nd_net);
1434 dev->nd_net = hold_net(net);
1435#endif
1436}
1437
1438static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1439{
1440#ifdef CONFIG_NET_DSA_TAG_DSA
1441 if (dev->dsa_ptr != NULL)
1442 return dsa_uses_dsa_tags(dev->dsa_ptr);
1443#endif
1444
1445 return 0;
1446}
1447
1448#ifndef CONFIG_NET_NS
1449static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1450{
1451 skb->dev = dev;
1452}
1453#else
1454void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
1455#endif
1456
1457static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1458{
1459#ifdef CONFIG_NET_DSA_TAG_TRAILER
1460 if (dev->dsa_ptr != NULL)
1461 return dsa_uses_trailer_tags(dev->dsa_ptr);
1462#endif
1463
1464 return 0;
1465}
1466
1467
1468
1469
1470
1471
1472
1473static inline void *netdev_priv(const struct net_device *dev)
1474{
1475 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1476}
1477
1478
1479
1480
1481#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1482
1483
1484
1485
1486
1487#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1500 int (*poll)(struct napi_struct *, int), int weight);
1501
1502
1503
1504
1505
1506
1507
1508void netif_napi_del(struct napi_struct *napi);
1509
1510struct napi_gro_cb {
1511
1512 void *frag0;
1513
1514
1515 unsigned int frag0_len;
1516
1517
1518 int data_offset;
1519
1520
1521 int same_flow;
1522
1523
1524 int flush;
1525
1526
1527 int count;
1528
1529
1530 int free;
1531};
1532
1533#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1534
1535struct packet_type {
1536 __be16 type;
1537 struct net_device *dev;
1538 int (*func) (struct sk_buff *,
1539 struct net_device *,
1540 struct packet_type *,
1541 struct net_device *);
1542 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1543 u32 features);
1544 int (*gso_send_check)(struct sk_buff *skb);
1545 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1546 struct sk_buff *skb);
1547 int (*gro_complete)(struct sk_buff *skb);
1548 void *af_packet_priv;
1549 struct list_head list;
1550};
1551
1552#include <linux/interrupt.h>
1553#include <linux/notifier.h>
1554
1555extern rwlock_t dev_base_lock;
1556
1557
1558#define for_each_netdev(net, d) \
1559 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1560#define for_each_netdev_reverse(net, d) \
1561 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1562#define for_each_netdev_rcu(net, d) \
1563 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1564#define for_each_netdev_safe(net, d, n) \
1565 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1566#define for_each_netdev_continue(net, d) \
1567 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1568#define for_each_netdev_continue_rcu(net, d) \
1569 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1570#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1571
1572static inline struct net_device *next_net_device(struct net_device *dev)
1573{
1574 struct list_head *lh;
1575 struct net *net;
1576
1577 net = dev_net(dev);
1578 lh = dev->dev_list.next;
1579 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1580}
1581
1582static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1583{
1584 struct list_head *lh;
1585 struct net *net;
1586
1587 net = dev_net(dev);
1588 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1589 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1590}
1591
1592static inline struct net_device *first_net_device(struct net *net)
1593{
1594 return list_empty(&net->dev_base_head) ? NULL :
1595 net_device_entry(net->dev_base_head.next);
1596}
1597
1598static inline struct net_device *first_net_device_rcu(struct net *net)
1599{
1600 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1601
1602 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1603}
1604
1605extern int netdev_boot_setup_check(struct net_device *dev);
1606extern unsigned long netdev_boot_base(const char *prefix, int unit);
1607extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1608 const char *hwaddr);
1609extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1610extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1611extern void dev_add_pack(struct packet_type *pt);
1612extern void dev_remove_pack(struct packet_type *pt);
1613extern void __dev_remove_pack(struct packet_type *pt);
1614
1615extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1616 unsigned short mask);
1617extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1618extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1619extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1620extern int dev_alloc_name(struct net_device *dev, const char *name);
1621extern int dev_open(struct net_device *dev);
1622extern int dev_close(struct net_device *dev);
1623extern void dev_disable_lro(struct net_device *dev);
1624extern int dev_queue_xmit(struct sk_buff *skb);
1625extern int register_netdevice(struct net_device *dev);
1626extern void unregister_netdevice_queue(struct net_device *dev,
1627 struct list_head *head);
1628extern void unregister_netdevice_many(struct list_head *head);
1629static inline void unregister_netdevice(struct net_device *dev)
1630{
1631 unregister_netdevice_queue(dev, NULL);
1632}
1633
1634extern int netdev_refcnt_read(const struct net_device *dev);
1635extern void free_netdev(struct net_device *dev);
1636extern void synchronize_net(void);
1637extern int register_netdevice_notifier(struct notifier_block *nb);
1638extern int unregister_netdevice_notifier(struct notifier_block *nb);
1639extern int init_dummy_netdev(struct net_device *dev);
1640extern void netdev_resync_ops(struct net_device *dev);
1641
1642extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1643extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1644extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1645extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1646extern int dev_restart(struct net_device *dev);
1647#ifdef CONFIG_NETPOLL_TRAP
1648extern int netpoll_trap(void);
1649#endif
1650extern int skb_gro_receive(struct sk_buff **head,
1651 struct sk_buff *skb);
1652extern void skb_gro_reset_offset(struct sk_buff *skb);
1653
1654static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1655{
1656 return NAPI_GRO_CB(skb)->data_offset;
1657}
1658
1659static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1660{
1661 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1662}
1663
1664static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1665{
1666 NAPI_GRO_CB(skb)->data_offset += len;
1667}
1668
1669static inline void *skb_gro_header_fast(struct sk_buff *skb,
1670 unsigned int offset)
1671{
1672 return NAPI_GRO_CB(skb)->frag0 + offset;
1673}
1674
1675static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1676{
1677 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1678}
1679
1680static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1681 unsigned int offset)
1682{
1683 NAPI_GRO_CB(skb)->frag0 = NULL;
1684 NAPI_GRO_CB(skb)->frag0_len = 0;
1685 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
1686}
1687
1688static inline void *skb_gro_mac_header(struct sk_buff *skb)
1689{
1690 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1691}
1692
1693static inline void *skb_gro_network_header(struct sk_buff *skb)
1694{
1695 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1696 skb_network_offset(skb);
1697}
1698
1699static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1700 unsigned short type,
1701 const void *daddr, const void *saddr,
1702 unsigned len)
1703{
1704 if (!dev->header_ops || !dev->header_ops->create)
1705 return 0;
1706
1707 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1708}
1709
1710static inline int dev_parse_header(const struct sk_buff *skb,
1711 unsigned char *haddr)
1712{
1713 const struct net_device *dev = skb->dev;
1714
1715 if (!dev->header_ops || !dev->header_ops->parse)
1716 return 0;
1717 return dev->header_ops->parse(skb, haddr);
1718}
1719
1720typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1721extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1722static inline int unregister_gifconf(unsigned int family)
1723{
1724 return register_gifconf(family, NULL);
1725}
1726
1727
1728
1729
1730struct softnet_data {
1731 struct Qdisc *output_queue;
1732 struct Qdisc **output_queue_tailp;
1733 struct list_head poll_list;
1734 struct sk_buff *completion_queue;
1735 struct sk_buff_head process_queue;
1736
1737
1738 unsigned int processed;
1739 unsigned int time_squeeze;
1740 unsigned int cpu_collision;
1741 unsigned int received_rps;
1742
1743#ifdef CONFIG_RPS
1744 struct softnet_data *rps_ipi_list;
1745
1746
1747 struct call_single_data csd ____cacheline_aligned_in_smp;
1748 struct softnet_data *rps_ipi_next;
1749 unsigned int cpu;
1750 unsigned int input_queue_head;
1751 unsigned int input_queue_tail;
1752#endif
1753 unsigned dropped;
1754 struct sk_buff_head input_pkt_queue;
1755 struct napi_struct backlog;
1756};
1757
1758static inline void input_queue_head_incr(struct softnet_data *sd)
1759{
1760#ifdef CONFIG_RPS
1761 sd->input_queue_head++;
1762#endif
1763}
1764
1765static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1766 unsigned int *qtail)
1767{
1768#ifdef CONFIG_RPS
1769 *qtail = ++sd->input_queue_tail;
1770#endif
1771}
1772
1773DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1774
1775#define HAVE_NETIF_QUEUE
1776
1777extern void __netif_schedule(struct Qdisc *q);
1778
1779static inline void netif_schedule_queue(struct netdev_queue *txq)
1780{
1781 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1782 __netif_schedule(txq->qdisc);
1783}
1784
1785static inline void netif_tx_schedule_all(struct net_device *dev)
1786{
1787 unsigned int i;
1788
1789 for (i = 0; i < dev->num_tx_queues; i++)
1790 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1791}
1792
1793static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1794{
1795 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1796}
1797
1798
1799
1800
1801
1802
1803
1804static inline void netif_start_queue(struct net_device *dev)
1805{
1806 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1807}
1808
1809static inline void netif_tx_start_all_queues(struct net_device *dev)
1810{
1811 unsigned int i;
1812
1813 for (i = 0; i < dev->num_tx_queues; i++) {
1814 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1815 netif_tx_start_queue(txq);
1816 }
1817}
1818
1819static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1820{
1821#ifdef CONFIG_NETPOLL_TRAP
1822 if (netpoll_trap()) {
1823 netif_tx_start_queue(dev_queue);
1824 return;
1825 }
1826#endif
1827 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
1828 __netif_schedule(dev_queue->qdisc);
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838static inline void netif_wake_queue(struct net_device *dev)
1839{
1840 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1841}
1842
1843static inline void netif_tx_wake_all_queues(struct net_device *dev)
1844{
1845 unsigned int i;
1846
1847 for (i = 0; i < dev->num_tx_queues; i++) {
1848 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1849 netif_tx_wake_queue(txq);
1850 }
1851}
1852
1853static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1854{
1855 if (WARN_ON(!dev_queue)) {
1856 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1857 return;
1858 }
1859 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869static inline void netif_stop_queue(struct net_device *dev)
1870{
1871 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1872}
1873
1874static inline void netif_tx_stop_all_queues(struct net_device *dev)
1875{
1876 unsigned int i;
1877
1878 for (i = 0; i < dev->num_tx_queues; i++) {
1879 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1880 netif_tx_stop_queue(txq);
1881 }
1882}
1883
1884static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1885{
1886 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1887}
1888
1889
1890
1891
1892
1893
1894
1895static inline int netif_queue_stopped(const struct net_device *dev)
1896{
1897 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1898}
1899
1900static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
1901{
1902 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
1903}
1904
1905
1906
1907
1908
1909
1910
1911static inline int netif_running(const struct net_device *dev)
1912{
1913 return test_bit(__LINK_STATE_START, &dev->state);
1914}
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1931{
1932 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1933
1934 netif_tx_start_queue(txq);
1935}
1936
1937
1938
1939
1940
1941
1942
1943
1944static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1945{
1946 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1947#ifdef CONFIG_NETPOLL_TRAP
1948 if (netpoll_trap())
1949 return;
1950#endif
1951 netif_tx_stop_queue(txq);
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961static inline int __netif_subqueue_stopped(const struct net_device *dev,
1962 u16 queue_index)
1963{
1964 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1965
1966 return netif_tx_queue_stopped(txq);
1967}
1968
1969static inline int netif_subqueue_stopped(const struct net_device *dev,
1970 struct sk_buff *skb)
1971{
1972 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1983{
1984 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1985#ifdef CONFIG_NETPOLL_TRAP
1986 if (netpoll_trap())
1987 return;
1988#endif
1989 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
1990 __netif_schedule(txq->qdisc);
1991}
1992
1993
1994
1995
1996
1997static inline u16 skb_tx_hash(const struct net_device *dev,
1998 const struct sk_buff *skb)
1999{
2000 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2001}
2002
2003
2004
2005
2006
2007
2008
2009static inline int netif_is_multiqueue(const struct net_device *dev)
2010{
2011 return dev->num_tx_queues > 1;
2012}
2013
2014extern int netif_set_real_num_tx_queues(struct net_device *dev,
2015 unsigned int txq);
2016
2017#ifdef CONFIG_RPS
2018extern int netif_set_real_num_rx_queues(struct net_device *dev,
2019 unsigned int rxq);
2020#else
2021static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2022 unsigned int rxq)
2023{
2024 return 0;
2025}
2026#endif
2027
2028static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2029 const struct net_device *from_dev)
2030{
2031 netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
2032#ifdef CONFIG_RPS
2033 return netif_set_real_num_rx_queues(to_dev,
2034 from_dev->real_num_rx_queues);
2035#else
2036 return 0;
2037#endif
2038}
2039
2040
2041
2042
2043
2044extern void dev_kfree_skb_irq(struct sk_buff *skb);
2045
2046
2047
2048
2049
2050extern void dev_kfree_skb_any(struct sk_buff *skb);
2051
2052#define HAVE_NETIF_RX 1
2053extern int netif_rx(struct sk_buff *skb);
2054extern int netif_rx_ni(struct sk_buff *skb);
2055#define HAVE_NETIF_RECEIVE_SKB 1
2056extern int netif_receive_skb(struct sk_buff *skb);
2057extern gro_result_t dev_gro_receive(struct napi_struct *napi,
2058 struct sk_buff *skb);
2059extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
2060extern gro_result_t napi_gro_receive(struct napi_struct *napi,
2061 struct sk_buff *skb);
2062extern void napi_gro_flush(struct napi_struct *napi);
2063extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
2064extern gro_result_t napi_frags_finish(struct napi_struct *napi,
2065 struct sk_buff *skb,
2066 gro_result_t ret);
2067extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
2068extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2069
2070static inline void napi_free_frags(struct napi_struct *napi)
2071{
2072 kfree_skb(napi->skb);
2073 napi->skb = NULL;
2074}
2075
2076extern int netdev_rx_handler_register(struct net_device *dev,
2077 rx_handler_func_t *rx_handler,
2078 void *rx_handler_data);
2079extern void netdev_rx_handler_unregister(struct net_device *dev);
2080
2081extern int dev_valid_name(const char *name);
2082extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2083extern int dev_ethtool(struct net *net, struct ifreq *);
2084extern unsigned dev_get_flags(const struct net_device *);
2085extern int __dev_change_flags(struct net_device *, unsigned int flags);
2086extern int dev_change_flags(struct net_device *, unsigned);
2087extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2088extern int dev_change_name(struct net_device *, const char *);
2089extern int dev_set_alias(struct net_device *, const char *, size_t);
2090extern int dev_change_net_namespace(struct net_device *,
2091 struct net *, const char *);
2092extern int dev_set_mtu(struct net_device *, int);
2093extern void dev_set_group(struct net_device *, int);
2094extern int dev_set_mac_address(struct net_device *,
2095 struct sockaddr *);
2096extern int dev_hard_start_xmit(struct sk_buff *skb,
2097 struct net_device *dev,
2098 struct netdev_queue *txq);
2099extern int dev_forward_skb(struct net_device *dev,
2100 struct sk_buff *skb);
2101
2102extern int netdev_budget;
2103
2104
2105extern void netdev_run_todo(void);
2106
2107
2108
2109
2110
2111
2112
2113static inline void dev_put(struct net_device *dev)
2114{
2115 irqsafe_cpu_dec(*dev->pcpu_refcnt);
2116}
2117
2118
2119
2120
2121
2122
2123
2124static inline void dev_hold(struct net_device *dev)
2125{
2126 irqsafe_cpu_inc(*dev->pcpu_refcnt);
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138extern void linkwatch_fire_event(struct net_device *dev);
2139extern void linkwatch_forget_dev(struct net_device *dev);
2140
2141
2142
2143
2144
2145
2146
2147static inline int netif_carrier_ok(const struct net_device *dev)
2148{
2149 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2150}
2151
2152extern unsigned long dev_trans_start(struct net_device *dev);
2153
2154extern void __netdev_watchdog_up(struct net_device *dev);
2155
2156extern void netif_carrier_on(struct net_device *dev);
2157
2158extern void netif_carrier_off(struct net_device *dev);
2159
2160extern void netif_notify_peers(struct net_device *dev);
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175static inline void netif_dormant_on(struct net_device *dev)
2176{
2177 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2178 linkwatch_fire_event(dev);
2179}
2180
2181
2182
2183
2184
2185
2186
2187static inline void netif_dormant_off(struct net_device *dev)
2188{
2189 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2190 linkwatch_fire_event(dev);
2191}
2192
2193
2194
2195
2196
2197
2198
2199static inline int netif_dormant(const struct net_device *dev)
2200{
2201 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2202}
2203
2204
2205
2206
2207
2208
2209
2210
2211static inline int netif_oper_up(const struct net_device *dev)
2212{
2213 return (dev->operstate == IF_OPER_UP ||
2214 dev->operstate == IF_OPER_UNKNOWN );
2215}
2216
2217
2218
2219
2220
2221
2222
2223static inline int netif_device_present(struct net_device *dev)
2224{
2225 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2226}
2227
2228extern void netif_device_detach(struct net_device *dev);
2229
2230extern void netif_device_attach(struct net_device *dev);
2231
2232
2233
2234
2235#define HAVE_NETIF_MSG 1
2236
2237enum {
2238 NETIF_MSG_DRV = 0x0001,
2239 NETIF_MSG_PROBE = 0x0002,
2240 NETIF_MSG_LINK = 0x0004,
2241 NETIF_MSG_TIMER = 0x0008,
2242 NETIF_MSG_IFDOWN = 0x0010,
2243 NETIF_MSG_IFUP = 0x0020,
2244 NETIF_MSG_RX_ERR = 0x0040,
2245 NETIF_MSG_TX_ERR = 0x0080,
2246 NETIF_MSG_TX_QUEUED = 0x0100,
2247 NETIF_MSG_INTR = 0x0200,
2248 NETIF_MSG_TX_DONE = 0x0400,
2249 NETIF_MSG_RX_STATUS = 0x0800,
2250 NETIF_MSG_PKTDATA = 0x1000,
2251 NETIF_MSG_HW = 0x2000,
2252 NETIF_MSG_WOL = 0x4000,
2253};
2254
2255#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2256#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2257#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2258#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2259#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2260#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2261#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2262#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2263#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2264#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2265#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2266#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2267#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2268#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2269#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2270
2271static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2272{
2273
2274 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2275 return default_msg_enable_bits;
2276 if (debug_value == 0)
2277 return 0;
2278
2279 return (1 << debug_value) - 1;
2280}
2281
2282static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2283{
2284 spin_lock(&txq->_xmit_lock);
2285 txq->xmit_lock_owner = cpu;
2286}
2287
2288static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2289{
2290 spin_lock_bh(&txq->_xmit_lock);
2291 txq->xmit_lock_owner = smp_processor_id();
2292}
2293
2294static inline int __netif_tx_trylock(struct netdev_queue *txq)
2295{
2296 int ok = spin_trylock(&txq->_xmit_lock);
2297 if (likely(ok))
2298 txq->xmit_lock_owner = smp_processor_id();
2299 return ok;
2300}
2301
2302static inline void __netif_tx_unlock(struct netdev_queue *txq)
2303{
2304 txq->xmit_lock_owner = -1;
2305 spin_unlock(&txq->_xmit_lock);
2306}
2307
2308static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2309{
2310 txq->xmit_lock_owner = -1;
2311 spin_unlock_bh(&txq->_xmit_lock);
2312}
2313
2314static inline void txq_trans_update(struct netdev_queue *txq)
2315{
2316 if (txq->xmit_lock_owner != -1)
2317 txq->trans_start = jiffies;
2318}
2319
2320
2321
2322
2323
2324
2325
2326static inline void netif_tx_lock(struct net_device *dev)
2327{
2328 unsigned int i;
2329 int cpu;
2330
2331 spin_lock(&dev->tx_global_lock);
2332 cpu = smp_processor_id();
2333 for (i = 0; i < dev->num_tx_queues; i++) {
2334 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2335
2336
2337
2338
2339
2340
2341
2342 __netif_tx_lock(txq, cpu);
2343 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2344 __netif_tx_unlock(txq);
2345 }
2346}
2347
2348static inline void netif_tx_lock_bh(struct net_device *dev)
2349{
2350 local_bh_disable();
2351 netif_tx_lock(dev);
2352}
2353
2354static inline void netif_tx_unlock(struct net_device *dev)
2355{
2356 unsigned int i;
2357
2358 for (i = 0; i < dev->num_tx_queues; i++) {
2359 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2360
2361
2362
2363
2364
2365 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2366 netif_schedule_queue(txq);
2367 }
2368 spin_unlock(&dev->tx_global_lock);
2369}
2370
2371static inline void netif_tx_unlock_bh(struct net_device *dev)
2372{
2373 netif_tx_unlock(dev);
2374 local_bh_enable();
2375}
2376
2377#define HARD_TX_LOCK(dev, txq, cpu) { \
2378 if ((dev->features & NETIF_F_LLTX) == 0) { \
2379 __netif_tx_lock(txq, cpu); \
2380 } \
2381}
2382
2383#define HARD_TX_UNLOCK(dev, txq) { \
2384 if ((dev->features & NETIF_F_LLTX) == 0) { \
2385 __netif_tx_unlock(txq); \
2386 } \
2387}
2388
2389static inline void netif_tx_disable(struct net_device *dev)
2390{
2391 unsigned int i;
2392 int cpu;
2393
2394 local_bh_disable();
2395 cpu = smp_processor_id();
2396 for (i = 0; i < dev->num_tx_queues; i++) {
2397 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2398
2399 __netif_tx_lock(txq, cpu);
2400 netif_tx_stop_queue(txq);
2401 __netif_tx_unlock(txq);
2402 }
2403 local_bh_enable();
2404}
2405
2406static inline void netif_addr_lock(struct net_device *dev)
2407{
2408 spin_lock(&dev->addr_list_lock);
2409}
2410
2411static inline void netif_addr_lock_bh(struct net_device *dev)
2412{
2413 spin_lock_bh(&dev->addr_list_lock);
2414}
2415
2416static inline void netif_addr_unlock(struct net_device *dev)
2417{
2418 spin_unlock(&dev->addr_list_lock);
2419}
2420
2421static inline void netif_addr_unlock_bh(struct net_device *dev)
2422{
2423 spin_unlock_bh(&dev->addr_list_lock);
2424}
2425
2426
2427
2428
2429
2430#define for_each_dev_addr(dev, ha) \
2431 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2432
2433
2434
2435extern void ether_setup(struct net_device *dev);
2436
2437
2438extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2439 void (*setup)(struct net_device *),
2440 unsigned int txqs, unsigned int rxqs);
2441#define alloc_netdev(sizeof_priv, name, setup) \
2442 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2443
2444#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2445 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2446
2447extern int register_netdev(struct net_device *dev);
2448extern void unregister_netdev(struct net_device *dev);
2449
2450
2451extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2452 struct netdev_hw_addr_list *from_list,
2453 int addr_len, unsigned char addr_type);
2454extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2455 struct netdev_hw_addr_list *from_list,
2456 int addr_len, unsigned char addr_type);
2457extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2458 struct netdev_hw_addr_list *from_list,
2459 int addr_len);
2460extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2461 struct netdev_hw_addr_list *from_list,
2462 int addr_len);
2463extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2464extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2465
2466
2467extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
2468 unsigned char addr_type);
2469extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
2470 unsigned char addr_type);
2471extern int dev_addr_add_multiple(struct net_device *to_dev,
2472 struct net_device *from_dev,
2473 unsigned char addr_type);
2474extern int dev_addr_del_multiple(struct net_device *to_dev,
2475 struct net_device *from_dev,
2476 unsigned char addr_type);
2477extern void dev_addr_flush(struct net_device *dev);
2478extern int dev_addr_init(struct net_device *dev);
2479
2480
2481extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2482extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2483extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2484extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2485extern void dev_uc_flush(struct net_device *dev);
2486extern void dev_uc_init(struct net_device *dev);
2487
2488
2489extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2490extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2491extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2492extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2493extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2494extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2495extern void dev_mc_flush(struct net_device *dev);
2496extern void dev_mc_init(struct net_device *dev);
2497
2498
2499extern void dev_set_rx_mode(struct net_device *dev);
2500extern void __dev_set_rx_mode(struct net_device *dev);
2501extern int dev_set_promiscuity(struct net_device *dev, int inc);
2502extern int dev_set_allmulti(struct net_device *dev, int inc);
2503extern void netdev_state_change(struct net_device *dev);
2504extern int netdev_bonding_change(struct net_device *dev,
2505 unsigned long event);
2506extern void netdev_features_change(struct net_device *dev);
2507
2508extern void dev_load(struct net *net, const char *name);
2509extern void dev_mcast_init(void);
2510extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2511 struct rtnl_link_stats64 *storage);
2512
2513extern int netdev_max_backlog;
2514extern int netdev_tstamp_prequeue;
2515extern int weight_p;
2516extern int netdev_set_master(struct net_device *dev, struct net_device *master);
2517extern int netdev_set_bond_master(struct net_device *dev,
2518 struct net_device *master);
2519extern int skb_checksum_help(struct sk_buff *skb);
2520extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
2521#ifdef CONFIG_BUG
2522extern void netdev_rx_csum_fault(struct net_device *dev);
2523#else
2524static inline void netdev_rx_csum_fault(struct net_device *dev)
2525{
2526}
2527#endif
2528
2529extern void net_enable_timestamp(void);
2530extern void net_disable_timestamp(void);
2531
2532#ifdef CONFIG_PROC_FS
2533extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2534extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2535extern void dev_seq_stop(struct seq_file *seq, void *v);
2536#endif
2537
2538extern int netdev_class_create_file(struct class_attribute *class_attr);
2539extern void netdev_class_remove_file(struct class_attribute *class_attr);
2540
2541extern struct kobj_ns_type_operations net_ns_type_operations;
2542
2543extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
2544
2545extern void linkwatch_run_queue(void);
2546
2547static inline u32 netdev_get_wanted_features(struct net_device *dev)
2548{
2549 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2550}
2551u32 netdev_increment_features(u32 all, u32 one, u32 mask);
2552u32 netdev_fix_features(struct net_device *dev, u32 features);
2553void netdev_update_features(struct net_device *dev);
2554
2555void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2556 struct net_device *dev);
2557
2558u32 netif_skb_features(struct sk_buff *skb);
2559
2560static inline int net_gso_ok(u32 features, int gso_type)
2561{
2562 int feature = gso_type << NETIF_F_GSO_SHIFT;
2563 return (features & feature) == feature;
2564}
2565
2566static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
2567{
2568 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2569 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2570}
2571
2572static inline int netif_needs_gso(struct sk_buff *skb, int features)
2573{
2574 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2575 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2576}
2577
2578static inline void netif_set_gso_max_size(struct net_device *dev,
2579 unsigned int size)
2580{
2581 dev->gso_max_size = size;
2582}
2583
2584static inline int netif_is_bond_slave(struct net_device *dev)
2585{
2586 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2587}
2588
2589extern struct pernet_operations __net_initdata loopback_net_ops;
2590
2591static inline int dev_ethtool_get_settings(struct net_device *dev,
2592 struct ethtool_cmd *cmd)
2593{
2594 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
2595 return -EOPNOTSUPP;
2596 return dev->ethtool_ops->get_settings(dev, cmd);
2597}
2598
2599static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
2600{
2601 if (dev->features & NETIF_F_RXCSUM)
2602 return 1;
2603 if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
2604 return 0;
2605 return dev->ethtool_ops->get_rx_csum(dev);
2606}
2607
2608static inline u32 dev_ethtool_get_flags(struct net_device *dev)
2609{
2610 if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
2611 return 0;
2612 return dev->ethtool_ops->get_flags(dev);
2613}
2614
2615
2616
2617
2618
2619static inline const char *netdev_name(const struct net_device *dev)
2620{
2621 if (dev->reg_state != NETREG_REGISTERED)
2622 return "(unregistered net_device)";
2623 return dev->name;
2624}
2625
2626extern int netdev_printk(const char *level, const struct net_device *dev,
2627 const char *format, ...)
2628 __attribute__ ((format (printf, 3, 4)));
2629extern int netdev_emerg(const struct net_device *dev, const char *format, ...)
2630 __attribute__ ((format (printf, 2, 3)));
2631extern int netdev_alert(const struct net_device *dev, const char *format, ...)
2632 __attribute__ ((format (printf, 2, 3)));
2633extern int netdev_crit(const struct net_device *dev, const char *format, ...)
2634 __attribute__ ((format (printf, 2, 3)));
2635extern int netdev_err(const struct net_device *dev, const char *format, ...)
2636 __attribute__ ((format (printf, 2, 3)));
2637extern int netdev_warn(const struct net_device *dev, const char *format, ...)
2638 __attribute__ ((format (printf, 2, 3)));
2639extern int netdev_notice(const struct net_device *dev, const char *format, ...)
2640 __attribute__ ((format (printf, 2, 3)));
2641extern int netdev_info(const struct net_device *dev, const char *format, ...)
2642 __attribute__ ((format (printf, 2, 3)));
2643
2644#define MODULE_ALIAS_NETDEV(device) \
2645 MODULE_ALIAS("netdev-" device)
2646
2647#if defined(DEBUG)
2648#define netdev_dbg(__dev, format, args...) \
2649 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2650#elif defined(CONFIG_DYNAMIC_DEBUG)
2651#define netdev_dbg(__dev, format, args...) \
2652do { \
2653 dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
2654 netdev_name(__dev), ##args); \
2655} while (0)
2656#else
2657#define netdev_dbg(__dev, format, args...) \
2658({ \
2659 if (0) \
2660 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2661 0; \
2662})
2663#endif
2664
2665#if defined(VERBOSE_DEBUG)
2666#define netdev_vdbg netdev_dbg
2667#else
2668
2669#define netdev_vdbg(dev, format, args...) \
2670({ \
2671 if (0) \
2672 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2673 0; \
2674})
2675#endif
2676
2677
2678
2679
2680
2681
2682#define netdev_WARN(dev, format, args...) \
2683 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2684
2685
2686
2687#define netif_printk(priv, type, level, dev, fmt, args...) \
2688do { \
2689 if (netif_msg_##type(priv)) \
2690 netdev_printk(level, (dev), fmt, ##args); \
2691} while (0)
2692
2693#define netif_level(level, priv, type, dev, fmt, args...) \
2694do { \
2695 if (netif_msg_##type(priv)) \
2696 netdev_##level(dev, fmt, ##args); \
2697} while (0)
2698
2699#define netif_emerg(priv, type, dev, fmt, args...) \
2700 netif_level(emerg, priv, type, dev, fmt, ##args)
2701#define netif_alert(priv, type, dev, fmt, args...) \
2702 netif_level(alert, priv, type, dev, fmt, ##args)
2703#define netif_crit(priv, type, dev, fmt, args...) \
2704 netif_level(crit, priv, type, dev, fmt, ##args)
2705#define netif_err(priv, type, dev, fmt, args...) \
2706 netif_level(err, priv, type, dev, fmt, ##args)
2707#define netif_warn(priv, type, dev, fmt, args...) \
2708 netif_level(warn, priv, type, dev, fmt, ##args)
2709#define netif_notice(priv, type, dev, fmt, args...) \
2710 netif_level(notice, priv, type, dev, fmt, ##args)
2711#define netif_info(priv, type, dev, fmt, args...) \
2712 netif_level(info, priv, type, dev, fmt, ##args)
2713
2714#if defined(DEBUG)
2715#define netif_dbg(priv, type, dev, format, args...) \
2716 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2717#elif defined(CONFIG_DYNAMIC_DEBUG)
2718#define netif_dbg(priv, type, netdev, format, args...) \
2719do { \
2720 if (netif_msg_##type(priv)) \
2721 dynamic_dev_dbg((netdev)->dev.parent, \
2722 "%s: " format, \
2723 netdev_name(netdev), ##args); \
2724} while (0)
2725#else
2726#define netif_dbg(priv, type, dev, format, args...) \
2727({ \
2728 if (0) \
2729 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2730 0; \
2731})
2732#endif
2733
2734#if defined(VERBOSE_DEBUG)
2735#define netif_vdbg netif_dbg
2736#else
2737#define netif_vdbg(priv, type, dev, format, args...) \
2738({ \
2739 if (0) \
2740 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2741 0; \
2742})
2743#endif
2744
2745#endif
2746
2747#endif
2748