1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31#include <linux/if_link.h>
32
33#ifdef __KERNEL__
34#include <linux/pm_qos_params.h>
35#include <linux/timer.h>
36#include <linux/delay.h>
37#include <linux/mm.h>
38#include <asm/atomic.h>
39#include <asm/cache.h>
40#include <asm/byteorder.h>
41
42#include <linux/device.h>
43#include <linux/percpu.h>
44#include <linux/rculist.h>
45#include <linux/dmaengine.h>
46#include <linux/workqueue.h>
47
48#include <linux/ethtool.h>
49#include <net/net_namespace.h>
50#include <net/dsa.h>
51#ifdef CONFIG_DCB
52#include <net/dcbnl.h>
53#endif
54
55struct vlan_group;
56struct netpoll_info;
57struct phy_device;
58
59struct wireless_dev;
60
61#define SET_ETHTOOL_OPS(netdev,ops) \
62 ( (netdev)->ethtool_ops = (ops) )
63
64#define HAVE_ALLOC_NETDEV
65
66#define HAVE_FREE_NETDEV
67#define HAVE_NETDEV_PRIV
68
69
70#define NET_ADDR_PERM 0
71#define NET_ADDR_RANDOM 1
72#define NET_ADDR_STOLEN 2
73
74
75#define NET_RX_SUCCESS 0
76#define NET_RX_DROP 1
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96#define NET_XMIT_SUCCESS 0x00
97#define NET_XMIT_DROP 0x01
98#define NET_XMIT_CN 0x02
99#define NET_XMIT_POLICED 0x03
100#define NET_XMIT_MASK 0x0f
101
102
103
104
105#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
106#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
107
108
109#define NETDEV_TX_MASK 0xf0
110
111enum netdev_tx {
112 __NETDEV_TX_MIN = INT_MIN,
113 NETDEV_TX_OK = 0x00,
114 NETDEV_TX_BUSY = 0x10,
115 NETDEV_TX_LOCKED = 0x20,
116};
117typedef enum netdev_tx netdev_tx_t;
118
119
120
121
122
123static inline bool dev_xmit_complete(int rc)
124{
125
126
127
128
129
130
131 if (likely(rc < NET_XMIT_MASK))
132 return true;
133
134 return false;
135}
136
137#endif
138
139#define MAX_ADDR_LEN 32
140
141#ifdef __KERNEL__
142
143
144
145
146
147#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
148# if defined(CONFIG_MAC80211_MESH)
149# define LL_MAX_HEADER 128
150# else
151# define LL_MAX_HEADER 96
152# endif
153#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
154# define LL_MAX_HEADER 48
155#else
156# define LL_MAX_HEADER 32
157#endif
158
159#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
160 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
161 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
162 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
163#define MAX_HEADER LL_MAX_HEADER
164#else
165#define MAX_HEADER (LL_MAX_HEADER + 48)
166#endif
167
168
169
170
171
172
173struct net_device_stats {
174 unsigned long rx_packets;
175 unsigned long tx_packets;
176 unsigned long rx_bytes;
177 unsigned long tx_bytes;
178 unsigned long rx_errors;
179 unsigned long tx_errors;
180 unsigned long rx_dropped;
181 unsigned long tx_dropped;
182 unsigned long multicast;
183 unsigned long collisions;
184 unsigned long rx_length_errors;
185 unsigned long rx_over_errors;
186 unsigned long rx_crc_errors;
187 unsigned long rx_frame_errors;
188 unsigned long rx_fifo_errors;
189 unsigned long rx_missed_errors;
190 unsigned long tx_aborted_errors;
191 unsigned long tx_carrier_errors;
192 unsigned long tx_fifo_errors;
193 unsigned long tx_heartbeat_errors;
194 unsigned long tx_window_errors;
195 unsigned long rx_compressed;
196 unsigned long tx_compressed;
197};
198
199#endif
200
201
202
203enum {
204 IF_PORT_UNKNOWN = 0,
205 IF_PORT_10BASE2,
206 IF_PORT_10BASET,
207 IF_PORT_AUI,
208 IF_PORT_100BASET,
209 IF_PORT_100BASETX,
210 IF_PORT_100BASEFX
211};
212
213#ifdef __KERNEL__
214
215#include <linux/cache.h>
216#include <linux/skbuff.h>
217
218struct neighbour;
219struct neigh_parms;
220struct sk_buff;
221
222struct netdev_hw_addr {
223 struct list_head list;
224 unsigned char addr[MAX_ADDR_LEN];
225 unsigned char type;
226#define NETDEV_HW_ADDR_T_LAN 1
227#define NETDEV_HW_ADDR_T_SAN 2
228#define NETDEV_HW_ADDR_T_SLAVE 3
229#define NETDEV_HW_ADDR_T_UNICAST 4
230#define NETDEV_HW_ADDR_T_MULTICAST 5
231 bool synced;
232 bool global_use;
233 int refcount;
234 struct rcu_head rcu_head;
235};
236
237struct netdev_hw_addr_list {
238 struct list_head list;
239 int count;
240};
241
242#define netdev_hw_addr_list_count(l) ((l)->count)
243#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
244#define netdev_hw_addr_list_for_each(ha, l) \
245 list_for_each_entry(ha, &(l)->list, list)
246
247#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
248#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
249#define netdev_for_each_uc_addr(ha, dev) \
250 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
251
252#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
253#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
254#define netdev_for_each_mc_addr(ha, dev) \
255 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
256
257struct hh_cache {
258 struct hh_cache *hh_next;
259 atomic_t hh_refcnt;
260
261
262
263
264
265
266 __be16 hh_type ____cacheline_aligned_in_smp;
267
268
269
270
271 u16 hh_len;
272 int (*hh_output)(struct sk_buff *skb);
273 seqlock_t hh_lock;
274
275
276#define HH_DATA_MOD 16
277#define HH_DATA_OFF(__len) \
278 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
279#define HH_DATA_ALIGN(__len) \
280 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
281 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
282};
283
284static inline void hh_cache_put(struct hh_cache *hh)
285{
286 if (atomic_dec_and_test(&hh->hh_refcnt))
287 kfree(hh);
288}
289
290
291
292
293
294
295
296
297
298
299
300
301#define LL_RESERVED_SPACE(dev) \
302 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
303#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
304 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
305#define LL_ALLOCATED_SPACE(dev) \
306 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
307
308struct header_ops {
309 int (*create) (struct sk_buff *skb, struct net_device *dev,
310 unsigned short type, const void *daddr,
311 const void *saddr, unsigned len);
312 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
313 int (*rebuild)(struct sk_buff *skb);
314#define HAVE_HEADER_CACHE
315 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
316 void (*cache_update)(struct hh_cache *hh,
317 const struct net_device *dev,
318 const unsigned char *haddr);
319};
320
321
322
323
324
325
326enum netdev_state_t {
327 __LINK_STATE_START,
328 __LINK_STATE_PRESENT,
329 __LINK_STATE_NOCARRIER,
330 __LINK_STATE_LINKWATCH_PENDING,
331 __LINK_STATE_DORMANT,
332};
333
334
335
336
337
338
339struct netdev_boot_setup {
340 char name[IFNAMSIZ];
341 struct ifmap map;
342};
343#define NETDEV_BOOT_SETUP_MAX 8
344
345extern int __init netdev_boot_setup(char *str);
346
347
348
349
350struct napi_struct {
351
352
353
354
355
356
357 struct list_head poll_list;
358
359 unsigned long state;
360 int weight;
361 int (*poll)(struct napi_struct *, int);
362#ifdef CONFIG_NETPOLL
363 spinlock_t poll_lock;
364 int poll_owner;
365#endif
366
367 unsigned int gro_count;
368
369 struct net_device *dev;
370 struct list_head dev_list;
371 struct sk_buff *gro_list;
372 struct sk_buff *skb;
373};
374
375enum {
376 NAPI_STATE_SCHED,
377 NAPI_STATE_DISABLE,
378 NAPI_STATE_NPSVC,
379};
380
381enum gro_result {
382 GRO_MERGED,
383 GRO_MERGED_FREE,
384 GRO_HELD,
385 GRO_NORMAL,
386 GRO_DROP,
387};
388typedef enum gro_result gro_result_t;
389
390typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb);
391
392extern void __napi_schedule(struct napi_struct *n);
393
394static inline int napi_disable_pending(struct napi_struct *n)
395{
396 return test_bit(NAPI_STATE_DISABLE, &n->state);
397}
398
399
400
401
402
403
404
405
406
407
408static inline int napi_schedule_prep(struct napi_struct *n)
409{
410 return !napi_disable_pending(n) &&
411 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
412}
413
414
415
416
417
418
419
420
421static inline void napi_schedule(struct napi_struct *n)
422{
423 if (napi_schedule_prep(n))
424 __napi_schedule(n);
425}
426
427
428static inline int napi_reschedule(struct napi_struct *napi)
429{
430 if (napi_schedule_prep(napi)) {
431 __napi_schedule(napi);
432 return 1;
433 }
434 return 0;
435}
436
437
438
439
440
441
442
443extern void __napi_complete(struct napi_struct *n);
444extern void napi_complete(struct napi_struct *n);
445
446
447
448
449
450
451
452
453static inline void napi_disable(struct napi_struct *n)
454{
455 set_bit(NAPI_STATE_DISABLE, &n->state);
456 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
457 msleep(1);
458 clear_bit(NAPI_STATE_DISABLE, &n->state);
459}
460
461
462
463
464
465
466
467
468static inline void napi_enable(struct napi_struct *n)
469{
470 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
471 smp_mb__before_clear_bit();
472 clear_bit(NAPI_STATE_SCHED, &n->state);
473}
474
475#ifdef CONFIG_SMP
476
477
478
479
480
481
482
483
484static inline void napi_synchronize(const struct napi_struct *n)
485{
486 while (test_bit(NAPI_STATE_SCHED, &n->state))
487 msleep(1);
488}
489#else
490# define napi_synchronize(n) barrier()
491#endif
492
493enum netdev_queue_state_t {
494 __QUEUE_STATE_XOFF,
495 __QUEUE_STATE_FROZEN,
496#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
497 (1 << __QUEUE_STATE_FROZEN))
498};
499
500struct netdev_queue {
501
502
503
504 struct net_device *dev;
505 struct Qdisc *qdisc;
506 unsigned long state;
507 struct Qdisc *qdisc_sleeping;
508#ifdef CONFIG_RPS
509 struct kobject kobj;
510#endif
511#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
512 int numa_node;
513#endif
514
515
516
517 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
518 int xmit_lock_owner;
519
520
521
522 unsigned long trans_start;
523} ____cacheline_aligned_in_smp;
524
525static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
526{
527#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
528 return q->numa_node;
529#else
530 return NUMA_NO_NODE;
531#endif
532}
533
534static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
535{
536#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
537 q->numa_node = node;
538#endif
539}
540
541#ifdef CONFIG_RPS
542
543
544
545
546struct rps_map {
547 unsigned int len;
548 struct rcu_head rcu;
549 u16 cpus[0];
550};
551#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
552
553
554
555
556
557struct rps_dev_flow {
558 u16 cpu;
559 u16 fill;
560 unsigned int last_qtail;
561};
562
563
564
565
566struct rps_dev_flow_table {
567 unsigned int mask;
568 struct rcu_head rcu;
569 struct work_struct free_work;
570 struct rps_dev_flow flows[0];
571};
572#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
573 (_num * sizeof(struct rps_dev_flow)))
574
575
576
577
578
579struct rps_sock_flow_table {
580 unsigned int mask;
581 u16 ents[0];
582};
583#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
584 (_num * sizeof(u16)))
585
586#define RPS_NO_CPU 0xffff
587
588static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
589 u32 hash)
590{
591 if (table && hash) {
592 unsigned int cpu, index = hash & table->mask;
593
594
595 cpu = raw_smp_processor_id();
596
597 if (table->ents[index] != cpu)
598 table->ents[index] = cpu;
599 }
600}
601
602static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
603 u32 hash)
604{
605 if (table && hash)
606 table->ents[hash & table->mask] = RPS_NO_CPU;
607}
608
609extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
610
611
612struct netdev_rx_queue {
613 struct rps_map __rcu *rps_map;
614 struct rps_dev_flow_table __rcu *rps_flow_table;
615 struct kobject kobj;
616 struct net_device *dev;
617} ____cacheline_aligned_in_smp;
618#endif
619
620#ifdef CONFIG_XPS
621
622
623
624
625struct xps_map {
626 unsigned int len;
627 unsigned int alloc_len;
628 struct rcu_head rcu;
629 u16 queues[0];
630};
631#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
632#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
633 / sizeof(u16))
634
635
636
637
638struct xps_dev_maps {
639 struct rcu_head rcu;
640 struct xps_map __rcu *cpu_map[0];
641};
642#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
643 (nr_cpu_ids * sizeof(struct xps_map *)))
644#endif
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757#define HAVE_NET_DEVICE_OPS
758struct net_device_ops {
759 int (*ndo_init)(struct net_device *dev);
760 void (*ndo_uninit)(struct net_device *dev);
761 int (*ndo_open)(struct net_device *dev);
762 int (*ndo_stop)(struct net_device *dev);
763 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
764 struct net_device *dev);
765 u16 (*ndo_select_queue)(struct net_device *dev,
766 struct sk_buff *skb);
767 void (*ndo_change_rx_flags)(struct net_device *dev,
768 int flags);
769 void (*ndo_set_rx_mode)(struct net_device *dev);
770 void (*ndo_set_multicast_list)(struct net_device *dev);
771 int (*ndo_set_mac_address)(struct net_device *dev,
772 void *addr);
773 int (*ndo_validate_addr)(struct net_device *dev);
774 int (*ndo_do_ioctl)(struct net_device *dev,
775 struct ifreq *ifr, int cmd);
776 int (*ndo_set_config)(struct net_device *dev,
777 struct ifmap *map);
778 int (*ndo_change_mtu)(struct net_device *dev,
779 int new_mtu);
780 int (*ndo_neigh_setup)(struct net_device *dev,
781 struct neigh_parms *);
782 void (*ndo_tx_timeout) (struct net_device *dev);
783
784 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
785 struct rtnl_link_stats64 *storage);
786 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
787
788 void (*ndo_vlan_rx_register)(struct net_device *dev,
789 struct vlan_group *grp);
790 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
791 unsigned short vid);
792 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
793 unsigned short vid);
794#ifdef CONFIG_NET_POLL_CONTROLLER
795 void (*ndo_poll_controller)(struct net_device *dev);
796 int (*ndo_netpoll_setup)(struct net_device *dev,
797 struct netpoll_info *info);
798 void (*ndo_netpoll_cleanup)(struct net_device *dev);
799#endif
800 int (*ndo_set_vf_mac)(struct net_device *dev,
801 int queue, u8 *mac);
802 int (*ndo_set_vf_vlan)(struct net_device *dev,
803 int queue, u16 vlan, u8 qos);
804 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
805 int vf, int rate);
806 int (*ndo_get_vf_config)(struct net_device *dev,
807 int vf,
808 struct ifla_vf_info *ivf);
809 int (*ndo_set_vf_port)(struct net_device *dev,
810 int vf,
811 struct nlattr *port[]);
812 int (*ndo_get_vf_port)(struct net_device *dev,
813 int vf, struct sk_buff *skb);
814#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
815 int (*ndo_fcoe_enable)(struct net_device *dev);
816 int (*ndo_fcoe_disable)(struct net_device *dev);
817 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
818 u16 xid,
819 struct scatterlist *sgl,
820 unsigned int sgc);
821 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
822 u16 xid);
823#define NETDEV_FCOE_WWNN 0
824#define NETDEV_FCOE_WWPN 1
825 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
826 u64 *wwn, int type);
827#endif
828};
829
830
831
832
833
834
835
836
837
838
839
840struct net_device {
841
842
843
844
845
846
847 char name[IFNAMSIZ];
848
849 struct pm_qos_request_list pm_qos_req;
850
851
852 struct hlist_node name_hlist;
853
854 char *ifalias;
855
856
857
858
859
860 unsigned long mem_end;
861 unsigned long mem_start;
862 unsigned long base_addr;
863 unsigned int irq;
864
865
866
867
868
869
870 unsigned char if_port;
871 unsigned char dma;
872
873 unsigned long state;
874
875 struct list_head dev_list;
876 struct list_head napi_list;
877 struct list_head unreg_list;
878
879
880 unsigned long features;
881#define NETIF_F_SG 1
882#define NETIF_F_IP_CSUM 2
883#define NETIF_F_NO_CSUM 4
884#define NETIF_F_HW_CSUM 8
885#define NETIF_F_IPV6_CSUM 16
886#define NETIF_F_HIGHDMA 32
887#define NETIF_F_FRAGLIST 64
888#define NETIF_F_HW_VLAN_TX 128
889#define NETIF_F_HW_VLAN_RX 256
890#define NETIF_F_HW_VLAN_FILTER 512
891#define NETIF_F_VLAN_CHALLENGED 1024
892#define NETIF_F_GSO 2048
893#define NETIF_F_LLTX 4096
894
895#define NETIF_F_NETNS_LOCAL 8192
896#define NETIF_F_GRO 16384
897#define NETIF_F_LRO 32768
898
899
900#define NETIF_F_FCOE_CRC (1 << 24)
901#define NETIF_F_SCTP_CSUM (1 << 25)
902#define NETIF_F_FCOE_MTU (1 << 26)
903#define NETIF_F_NTUPLE (1 << 27)
904#define NETIF_F_RXHASH (1 << 28)
905
906
907#define NETIF_F_GSO_SHIFT 16
908#define NETIF_F_GSO_MASK 0x00ff0000
909#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
910#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
911#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
912#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
913#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
914#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
915
916
917#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
918 NETIF_F_TSO6 | NETIF_F_UFO)
919
920
921#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
922#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
923#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
924#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
925
926
927
928
929
930#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
931 NETIF_F_SG | NETIF_F_HIGHDMA | \
932 NETIF_F_FRAGLIST)
933
934
935 int ifindex;
936 int iflink;
937
938 struct net_device_stats stats;
939 atomic_long_t rx_dropped;
940
941
942
943#ifdef CONFIG_WIRELESS_EXT
944
945
946 const struct iw_handler_def * wireless_handlers;
947
948 struct iw_public_data * wireless_data;
949#endif
950
951 const struct net_device_ops *netdev_ops;
952 const struct ethtool_ops *ethtool_ops;
953
954
955 const struct header_ops *header_ops;
956
957 unsigned int flags;
958 unsigned short gflags;
959 unsigned int priv_flags;
960 unsigned short padded;
961
962 unsigned char operstate;
963 unsigned char link_mode;
964
965 unsigned int mtu;
966 unsigned short type;
967 unsigned short hard_header_len;
968
969
970
971
972
973 unsigned short needed_headroom;
974 unsigned short needed_tailroom;
975
976
977 unsigned char perm_addr[MAX_ADDR_LEN];
978 unsigned char addr_assign_type;
979 unsigned char addr_len;
980 unsigned short dev_id;
981
982 spinlock_t addr_list_lock;
983 struct netdev_hw_addr_list uc;
984 struct netdev_hw_addr_list mc;
985 int uc_promisc;
986 unsigned int promiscuity;
987 unsigned int allmulti;
988
989
990
991
992#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
993 struct vlan_group __rcu *vlgrp;
994#endif
995#ifdef CONFIG_NET_DSA
996 void *dsa_ptr;
997#endif
998 void *atalk_ptr;
999 struct in_device __rcu *ip_ptr;
1000 struct dn_dev __rcu *dn_ptr;
1001 struct inet6_dev __rcu *ip6_ptr;
1002 void *ec_ptr;
1003 void *ax25_ptr;
1004 struct wireless_dev *ieee80211_ptr;
1005
1006
1007
1008
1009
1010 unsigned long last_rx;
1011
1012
1013
1014
1015
1016
1017
1018 struct net_device *master;
1019
1020
1021
1022
1023 unsigned char *dev_addr;
1024
1025
1026
1027 struct netdev_hw_addr_list dev_addrs;
1028
1029
1030 unsigned char broadcast[MAX_ADDR_LEN];
1031
1032#ifdef CONFIG_RPS
1033 struct kset *queues_kset;
1034
1035 struct netdev_rx_queue *_rx;
1036
1037
1038 unsigned int num_rx_queues;
1039
1040
1041 unsigned int real_num_rx_queues;
1042#endif
1043
1044 rx_handler_func_t __rcu *rx_handler;
1045 void __rcu *rx_handler_data;
1046
1047 struct netdev_queue __rcu *ingress_queue;
1048
1049
1050
1051
1052 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1053
1054
1055 unsigned int num_tx_queues;
1056
1057
1058 unsigned int real_num_tx_queues;
1059
1060
1061 struct Qdisc *qdisc;
1062
1063 unsigned long tx_queue_len;
1064 spinlock_t tx_global_lock;
1065
1066#ifdef CONFIG_XPS
1067 struct xps_dev_maps __rcu *xps_maps;
1068#endif
1069
1070
1071
1072
1073
1074
1075
1076 unsigned long trans_start;
1077
1078 int watchdog_timeo;
1079 struct timer_list watchdog_timer;
1080
1081
1082 int __percpu *pcpu_refcnt;
1083
1084
1085 struct list_head todo_list;
1086
1087 struct hlist_node index_hlist;
1088
1089 struct list_head link_watch_list;
1090
1091
1092 enum { NETREG_UNINITIALIZED=0,
1093 NETREG_REGISTERED,
1094 NETREG_UNREGISTERING,
1095 NETREG_UNREGISTERED,
1096 NETREG_RELEASED,
1097 NETREG_DUMMY,
1098 } reg_state:16;
1099
1100 enum {
1101 RTNL_LINK_INITIALIZED,
1102 RTNL_LINK_INITIALIZING,
1103 } rtnl_link_state:16;
1104
1105
1106 void (*destructor)(struct net_device *dev);
1107
1108#ifdef CONFIG_NETPOLL
1109 struct netpoll_info *npinfo;
1110#endif
1111
1112#ifdef CONFIG_NET_NS
1113
1114 struct net *nd_net;
1115#endif
1116
1117
1118 union {
1119 void *ml_priv;
1120 struct pcpu_lstats __percpu *lstats;
1121 struct pcpu_tstats __percpu *tstats;
1122 struct pcpu_dstats __percpu *dstats;
1123 };
1124
1125 struct garp_port __rcu *garp_port;
1126
1127
1128 struct device dev;
1129
1130 const struct attribute_group *sysfs_groups[4];
1131
1132
1133 const struct rtnl_link_ops *rtnl_link_ops;
1134
1135
1136 unsigned long vlan_features;
1137
1138
1139#define GSO_MAX_SIZE 65536
1140 unsigned int gso_max_size;
1141
1142#ifdef CONFIG_DCB
1143
1144 const struct dcbnl_rtnl_ops *dcbnl_ops;
1145#endif
1146
1147#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
1148
1149 unsigned int fcoe_ddp_xid;
1150#endif
1151
1152 struct ethtool_rx_ntuple_list ethtool_ntuple_list;
1153
1154
1155 struct phy_device *phydev;
1156};
1157#define to_net_dev(d) container_of(d, struct net_device, dev)
1158
1159#define NETDEV_ALIGN 32
1160
1161static inline
1162struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1163 unsigned int index)
1164{
1165 return &dev->_tx[index];
1166}
1167
1168static inline void netdev_for_each_tx_queue(struct net_device *dev,
1169 void (*f)(struct net_device *,
1170 struct netdev_queue *,
1171 void *),
1172 void *arg)
1173{
1174 unsigned int i;
1175
1176 for (i = 0; i < dev->num_tx_queues; i++)
1177 f(dev, &dev->_tx[i], arg);
1178}
1179
1180
1181
1182
1183static inline
1184struct net *dev_net(const struct net_device *dev)
1185{
1186 return read_pnet(&dev->nd_net);
1187}
1188
1189static inline
1190void dev_net_set(struct net_device *dev, struct net *net)
1191{
1192#ifdef CONFIG_NET_NS
1193 release_net(dev->nd_net);
1194 dev->nd_net = hold_net(net);
1195#endif
1196}
1197
1198static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1199{
1200#ifdef CONFIG_NET_DSA_TAG_DSA
1201 if (dev->dsa_ptr != NULL)
1202 return dsa_uses_dsa_tags(dev->dsa_ptr);
1203#endif
1204
1205 return 0;
1206}
1207
1208#ifndef CONFIG_NET_NS
1209static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1210{
1211 skb->dev = dev;
1212}
1213#else
1214void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
1215#endif
1216
1217static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1218{
1219#ifdef CONFIG_NET_DSA_TAG_TRAILER
1220 if (dev->dsa_ptr != NULL)
1221 return dsa_uses_trailer_tags(dev->dsa_ptr);
1222#endif
1223
1224 return 0;
1225}
1226
1227
1228
1229
1230
1231
1232
1233static inline void *netdev_priv(const struct net_device *dev)
1234{
1235 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1236}
1237
1238
1239
1240
1241#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1242
1243
1244
1245
1246
1247#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1260 int (*poll)(struct napi_struct *, int), int weight);
1261
1262
1263
1264
1265
1266
1267
1268void netif_napi_del(struct napi_struct *napi);
1269
1270struct napi_gro_cb {
1271
1272 void *frag0;
1273
1274
1275 unsigned int frag0_len;
1276
1277
1278 int data_offset;
1279
1280
1281 int same_flow;
1282
1283
1284 int flush;
1285
1286
1287 int count;
1288
1289
1290 int free;
1291};
1292
1293#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1294
1295struct packet_type {
1296 __be16 type;
1297 struct net_device *dev;
1298 int (*func) (struct sk_buff *,
1299 struct net_device *,
1300 struct packet_type *,
1301 struct net_device *);
1302 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1303 int features);
1304 int (*gso_send_check)(struct sk_buff *skb);
1305 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1306 struct sk_buff *skb);
1307 int (*gro_complete)(struct sk_buff *skb);
1308 void *af_packet_priv;
1309 struct list_head list;
1310};
1311
1312#include <linux/interrupt.h>
1313#include <linux/notifier.h>
1314
1315extern rwlock_t dev_base_lock;
1316
1317
1318#define for_each_netdev(net, d) \
1319 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1320#define for_each_netdev_reverse(net, d) \
1321 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1322#define for_each_netdev_rcu(net, d) \
1323 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1324#define for_each_netdev_safe(net, d, n) \
1325 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1326#define for_each_netdev_continue(net, d) \
1327 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1328#define for_each_netdev_continue_rcu(net, d) \
1329 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1330#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1331
1332static inline struct net_device *next_net_device(struct net_device *dev)
1333{
1334 struct list_head *lh;
1335 struct net *net;
1336
1337 net = dev_net(dev);
1338 lh = dev->dev_list.next;
1339 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1340}
1341
1342static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1343{
1344 struct list_head *lh;
1345 struct net *net;
1346
1347 net = dev_net(dev);
1348 lh = rcu_dereference(dev->dev_list.next);
1349 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1350}
1351
1352static inline struct net_device *first_net_device(struct net *net)
1353{
1354 return list_empty(&net->dev_base_head) ? NULL :
1355 net_device_entry(net->dev_base_head.next);
1356}
1357
1358extern int netdev_boot_setup_check(struct net_device *dev);
1359extern unsigned long netdev_boot_base(const char *prefix, int unit);
1360extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1361 const char *hwaddr);
1362extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1363extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1364extern void dev_add_pack(struct packet_type *pt);
1365extern void dev_remove_pack(struct packet_type *pt);
1366extern void __dev_remove_pack(struct packet_type *pt);
1367
1368extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1369 unsigned short mask);
1370extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1371extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1372extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1373extern int dev_alloc_name(struct net_device *dev, const char *name);
1374extern int dev_open(struct net_device *dev);
1375extern int dev_close(struct net_device *dev);
1376extern void dev_disable_lro(struct net_device *dev);
1377extern int dev_queue_xmit(struct sk_buff *skb);
1378extern int register_netdevice(struct net_device *dev);
1379extern void unregister_netdevice_queue(struct net_device *dev,
1380 struct list_head *head);
1381extern void unregister_netdevice_many(struct list_head *head);
1382static inline void unregister_netdevice(struct net_device *dev)
1383{
1384 unregister_netdevice_queue(dev, NULL);
1385}
1386
1387extern int netdev_refcnt_read(const struct net_device *dev);
1388extern void free_netdev(struct net_device *dev);
1389extern void synchronize_net(void);
1390extern int register_netdevice_notifier(struct notifier_block *nb);
1391extern int unregister_netdevice_notifier(struct notifier_block *nb);
1392extern int init_dummy_netdev(struct net_device *dev);
1393extern void netdev_resync_ops(struct net_device *dev);
1394
1395extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1396extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1397extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1398extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1399extern int dev_restart(struct net_device *dev);
1400#ifdef CONFIG_NETPOLL_TRAP
1401extern int netpoll_trap(void);
1402#endif
1403extern int skb_gro_receive(struct sk_buff **head,
1404 struct sk_buff *skb);
1405extern void skb_gro_reset_offset(struct sk_buff *skb);
1406
1407static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1408{
1409 return NAPI_GRO_CB(skb)->data_offset;
1410}
1411
1412static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1413{
1414 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1415}
1416
1417static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1418{
1419 NAPI_GRO_CB(skb)->data_offset += len;
1420}
1421
1422static inline void *skb_gro_header_fast(struct sk_buff *skb,
1423 unsigned int offset)
1424{
1425 return NAPI_GRO_CB(skb)->frag0 + offset;
1426}
1427
1428static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1429{
1430 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1431}
1432
1433static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1434 unsigned int offset)
1435{
1436 NAPI_GRO_CB(skb)->frag0 = NULL;
1437 NAPI_GRO_CB(skb)->frag0_len = 0;
1438 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
1439}
1440
1441static inline void *skb_gro_mac_header(struct sk_buff *skb)
1442{
1443 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1444}
1445
1446static inline void *skb_gro_network_header(struct sk_buff *skb)
1447{
1448 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1449 skb_network_offset(skb);
1450}
1451
1452static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1453 unsigned short type,
1454 const void *daddr, const void *saddr,
1455 unsigned len)
1456{
1457 if (!dev->header_ops || !dev->header_ops->create)
1458 return 0;
1459
1460 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1461}
1462
1463static inline int dev_parse_header(const struct sk_buff *skb,
1464 unsigned char *haddr)
1465{
1466 const struct net_device *dev = skb->dev;
1467
1468 if (!dev->header_ops || !dev->header_ops->parse)
1469 return 0;
1470 return dev->header_ops->parse(skb, haddr);
1471}
1472
1473typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1474extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1475static inline int unregister_gifconf(unsigned int family)
1476{
1477 return register_gifconf(family, NULL);
1478}
1479
1480
1481
1482
1483struct softnet_data {
1484 struct Qdisc *output_queue;
1485 struct Qdisc **output_queue_tailp;
1486 struct list_head poll_list;
1487 struct sk_buff *completion_queue;
1488 struct sk_buff_head process_queue;
1489
1490
1491 unsigned int processed;
1492 unsigned int time_squeeze;
1493 unsigned int cpu_collision;
1494 unsigned int received_rps;
1495
1496#ifdef CONFIG_RPS
1497 struct softnet_data *rps_ipi_list;
1498
1499
1500 struct call_single_data csd ____cacheline_aligned_in_smp;
1501 struct softnet_data *rps_ipi_next;
1502 unsigned int cpu;
1503 unsigned int input_queue_head;
1504 unsigned int input_queue_tail;
1505#endif
1506 unsigned dropped;
1507 struct sk_buff_head input_pkt_queue;
1508 struct napi_struct backlog;
1509};
1510
1511static inline void input_queue_head_incr(struct softnet_data *sd)
1512{
1513#ifdef CONFIG_RPS
1514 sd->input_queue_head++;
1515#endif
1516}
1517
1518static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1519 unsigned int *qtail)
1520{
1521#ifdef CONFIG_RPS
1522 *qtail = ++sd->input_queue_tail;
1523#endif
1524}
1525
1526DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1527
1528#define HAVE_NETIF_QUEUE
1529
1530extern void __netif_schedule(struct Qdisc *q);
1531
1532static inline void netif_schedule_queue(struct netdev_queue *txq)
1533{
1534 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1535 __netif_schedule(txq->qdisc);
1536}
1537
1538static inline void netif_tx_schedule_all(struct net_device *dev)
1539{
1540 unsigned int i;
1541
1542 for (i = 0; i < dev->num_tx_queues; i++)
1543 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1544}
1545
1546static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1547{
1548 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1549}
1550
1551
1552
1553
1554
1555
1556
1557static inline void netif_start_queue(struct net_device *dev)
1558{
1559 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1560}
1561
1562static inline void netif_tx_start_all_queues(struct net_device *dev)
1563{
1564 unsigned int i;
1565
1566 for (i = 0; i < dev->num_tx_queues; i++) {
1567 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1568 netif_tx_start_queue(txq);
1569 }
1570}
1571
1572static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1573{
1574#ifdef CONFIG_NETPOLL_TRAP
1575 if (netpoll_trap()) {
1576 netif_tx_start_queue(dev_queue);
1577 return;
1578 }
1579#endif
1580 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
1581 __netif_schedule(dev_queue->qdisc);
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591static inline void netif_wake_queue(struct net_device *dev)
1592{
1593 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1594}
1595
1596static inline void netif_tx_wake_all_queues(struct net_device *dev)
1597{
1598 unsigned int i;
1599
1600 for (i = 0; i < dev->num_tx_queues; i++) {
1601 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1602 netif_tx_wake_queue(txq);
1603 }
1604}
1605
1606static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1607{
1608 if (WARN_ON(!dev_queue)) {
1609 printk(KERN_INFO "netif_stop_queue() cannot be called before "
1610 "register_netdev()");
1611 return;
1612 }
1613 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1614}
1615
1616
1617
1618
1619
1620
1621
1622
1623static inline void netif_stop_queue(struct net_device *dev)
1624{
1625 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1626}
1627
1628static inline void netif_tx_stop_all_queues(struct net_device *dev)
1629{
1630 unsigned int i;
1631
1632 for (i = 0; i < dev->num_tx_queues; i++) {
1633 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1634 netif_tx_stop_queue(txq);
1635 }
1636}
1637
1638static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1639{
1640 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1641}
1642
1643
1644
1645
1646
1647
1648
1649static inline int netif_queue_stopped(const struct net_device *dev)
1650{
1651 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1652}
1653
1654static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
1655{
1656 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
1657}
1658
1659
1660
1661
1662
1663
1664
1665static inline int netif_running(const struct net_device *dev)
1666{
1667 return test_bit(__LINK_STATE_START, &dev->state);
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1685{
1686 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1687
1688 netif_tx_start_queue(txq);
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1699{
1700 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1701#ifdef CONFIG_NETPOLL_TRAP
1702 if (netpoll_trap())
1703 return;
1704#endif
1705 netif_tx_stop_queue(txq);
1706}
1707
1708
1709
1710
1711
1712
1713
1714
1715static inline int __netif_subqueue_stopped(const struct net_device *dev,
1716 u16 queue_index)
1717{
1718 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1719
1720 return netif_tx_queue_stopped(txq);
1721}
1722
1723static inline int netif_subqueue_stopped(const struct net_device *dev,
1724 struct sk_buff *skb)
1725{
1726 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1737{
1738 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1739#ifdef CONFIG_NETPOLL_TRAP
1740 if (netpoll_trap())
1741 return;
1742#endif
1743 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
1744 __netif_schedule(txq->qdisc);
1745}
1746
1747
1748
1749
1750
1751static inline u16 skb_tx_hash(const struct net_device *dev,
1752 const struct sk_buff *skb)
1753{
1754 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
1755}
1756
1757
1758
1759
1760
1761
1762
1763static inline int netif_is_multiqueue(const struct net_device *dev)
1764{
1765 return dev->num_tx_queues > 1;
1766}
1767
1768extern int netif_set_real_num_tx_queues(struct net_device *dev,
1769 unsigned int txq);
1770
1771#ifdef CONFIG_RPS
1772extern int netif_set_real_num_rx_queues(struct net_device *dev,
1773 unsigned int rxq);
1774#else
1775static inline int netif_set_real_num_rx_queues(struct net_device *dev,
1776 unsigned int rxq)
1777{
1778 return 0;
1779}
1780#endif
1781
1782static inline int netif_copy_real_num_queues(struct net_device *to_dev,
1783 const struct net_device *from_dev)
1784{
1785 netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
1786#ifdef CONFIG_RPS
1787 return netif_set_real_num_rx_queues(to_dev,
1788 from_dev->real_num_rx_queues);
1789#else
1790 return 0;
1791#endif
1792}
1793
1794
1795
1796
1797
1798extern void dev_kfree_skb_irq(struct sk_buff *skb);
1799
1800
1801
1802
1803
1804extern void dev_kfree_skb_any(struct sk_buff *skb);
1805
1806#define HAVE_NETIF_RX 1
1807extern int netif_rx(struct sk_buff *skb);
1808extern int netif_rx_ni(struct sk_buff *skb);
1809#define HAVE_NETIF_RECEIVE_SKB 1
1810extern int netif_receive_skb(struct sk_buff *skb);
1811extern gro_result_t dev_gro_receive(struct napi_struct *napi,
1812 struct sk_buff *skb);
1813extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
1814extern gro_result_t napi_gro_receive(struct napi_struct *napi,
1815 struct sk_buff *skb);
1816extern void napi_gro_flush(struct napi_struct *napi);
1817extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
1818extern gro_result_t napi_frags_finish(struct napi_struct *napi,
1819 struct sk_buff *skb,
1820 gro_result_t ret);
1821extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
1822extern gro_result_t napi_gro_frags(struct napi_struct *napi);
1823
1824static inline void napi_free_frags(struct napi_struct *napi)
1825{
1826 kfree_skb(napi->skb);
1827 napi->skb = NULL;
1828}
1829
1830extern int netdev_rx_handler_register(struct net_device *dev,
1831 rx_handler_func_t *rx_handler,
1832 void *rx_handler_data);
1833extern void netdev_rx_handler_unregister(struct net_device *dev);
1834
1835extern int dev_valid_name(const char *name);
1836extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1837extern int dev_ethtool(struct net *net, struct ifreq *);
1838extern unsigned dev_get_flags(const struct net_device *);
1839extern int __dev_change_flags(struct net_device *, unsigned int flags);
1840extern int dev_change_flags(struct net_device *, unsigned);
1841extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
1842extern int dev_change_name(struct net_device *, const char *);
1843extern int dev_set_alias(struct net_device *, const char *, size_t);
1844extern int dev_change_net_namespace(struct net_device *,
1845 struct net *, const char *);
1846extern int dev_set_mtu(struct net_device *, int);
1847extern int dev_set_mac_address(struct net_device *,
1848 struct sockaddr *);
1849extern int dev_hard_start_xmit(struct sk_buff *skb,
1850 struct net_device *dev,
1851 struct netdev_queue *txq);
1852extern int dev_forward_skb(struct net_device *dev,
1853 struct sk_buff *skb);
1854
1855extern int netdev_budget;
1856
1857
1858extern void netdev_run_todo(void);
1859
1860
1861
1862
1863
1864
1865
1866static inline void dev_put(struct net_device *dev)
1867{
1868 irqsafe_cpu_dec(*dev->pcpu_refcnt);
1869}
1870
1871
1872
1873
1874
1875
1876
1877static inline void dev_hold(struct net_device *dev)
1878{
1879 irqsafe_cpu_inc(*dev->pcpu_refcnt);
1880}
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891extern void linkwatch_fire_event(struct net_device *dev);
1892extern void linkwatch_forget_dev(struct net_device *dev);
1893
1894
1895
1896
1897
1898
1899
1900static inline int netif_carrier_ok(const struct net_device *dev)
1901{
1902 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1903}
1904
1905extern unsigned long dev_trans_start(struct net_device *dev);
1906
1907extern void __netdev_watchdog_up(struct net_device *dev);
1908
1909extern void netif_carrier_on(struct net_device *dev);
1910
1911extern void netif_carrier_off(struct net_device *dev);
1912
1913extern void netif_notify_peers(struct net_device *dev);
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928static inline void netif_dormant_on(struct net_device *dev)
1929{
1930 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1931 linkwatch_fire_event(dev);
1932}
1933
1934
1935
1936
1937
1938
1939
1940static inline void netif_dormant_off(struct net_device *dev)
1941{
1942 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1943 linkwatch_fire_event(dev);
1944}
1945
1946
1947
1948
1949
1950
1951
1952static inline int netif_dormant(const struct net_device *dev)
1953{
1954 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964static inline int netif_oper_up(const struct net_device *dev)
1965{
1966 return (dev->operstate == IF_OPER_UP ||
1967 dev->operstate == IF_OPER_UNKNOWN );
1968}
1969
1970
1971
1972
1973
1974
1975
1976static inline int netif_device_present(struct net_device *dev)
1977{
1978 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1979}
1980
1981extern void netif_device_detach(struct net_device *dev);
1982
1983extern void netif_device_attach(struct net_device *dev);
1984
1985
1986
1987
1988#define HAVE_NETIF_MSG 1
1989
1990enum {
1991 NETIF_MSG_DRV = 0x0001,
1992 NETIF_MSG_PROBE = 0x0002,
1993 NETIF_MSG_LINK = 0x0004,
1994 NETIF_MSG_TIMER = 0x0008,
1995 NETIF_MSG_IFDOWN = 0x0010,
1996 NETIF_MSG_IFUP = 0x0020,
1997 NETIF_MSG_RX_ERR = 0x0040,
1998 NETIF_MSG_TX_ERR = 0x0080,
1999 NETIF_MSG_TX_QUEUED = 0x0100,
2000 NETIF_MSG_INTR = 0x0200,
2001 NETIF_MSG_TX_DONE = 0x0400,
2002 NETIF_MSG_RX_STATUS = 0x0800,
2003 NETIF_MSG_PKTDATA = 0x1000,
2004 NETIF_MSG_HW = 0x2000,
2005 NETIF_MSG_WOL = 0x4000,
2006};
2007
2008#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2009#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2010#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2011#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2012#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2013#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2014#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2015#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2016#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2017#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2018#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2019#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2020#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2021#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2022#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2023
2024static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2025{
2026
2027 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2028 return default_msg_enable_bits;
2029 if (debug_value == 0)
2030 return 0;
2031
2032 return (1 << debug_value) - 1;
2033}
2034
2035static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2036{
2037 spin_lock(&txq->_xmit_lock);
2038 txq->xmit_lock_owner = cpu;
2039}
2040
2041static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2042{
2043 spin_lock_bh(&txq->_xmit_lock);
2044 txq->xmit_lock_owner = smp_processor_id();
2045}
2046
2047static inline int __netif_tx_trylock(struct netdev_queue *txq)
2048{
2049 int ok = spin_trylock(&txq->_xmit_lock);
2050 if (likely(ok))
2051 txq->xmit_lock_owner = smp_processor_id();
2052 return ok;
2053}
2054
2055static inline void __netif_tx_unlock(struct netdev_queue *txq)
2056{
2057 txq->xmit_lock_owner = -1;
2058 spin_unlock(&txq->_xmit_lock);
2059}
2060
2061static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2062{
2063 txq->xmit_lock_owner = -1;
2064 spin_unlock_bh(&txq->_xmit_lock);
2065}
2066
2067static inline void txq_trans_update(struct netdev_queue *txq)
2068{
2069 if (txq->xmit_lock_owner != -1)
2070 txq->trans_start = jiffies;
2071}
2072
2073
2074
2075
2076
2077
2078
2079static inline void netif_tx_lock(struct net_device *dev)
2080{
2081 unsigned int i;
2082 int cpu;
2083
2084 spin_lock(&dev->tx_global_lock);
2085 cpu = smp_processor_id();
2086 for (i = 0; i < dev->num_tx_queues; i++) {
2087 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2088
2089
2090
2091
2092
2093
2094
2095 __netif_tx_lock(txq, cpu);
2096 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2097 __netif_tx_unlock(txq);
2098 }
2099}
2100
2101static inline void netif_tx_lock_bh(struct net_device *dev)
2102{
2103 local_bh_disable();
2104 netif_tx_lock(dev);
2105}
2106
2107static inline void netif_tx_unlock(struct net_device *dev)
2108{
2109 unsigned int i;
2110
2111 for (i = 0; i < dev->num_tx_queues; i++) {
2112 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2113
2114
2115
2116
2117
2118 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2119 netif_schedule_queue(txq);
2120 }
2121 spin_unlock(&dev->tx_global_lock);
2122}
2123
2124static inline void netif_tx_unlock_bh(struct net_device *dev)
2125{
2126 netif_tx_unlock(dev);
2127 local_bh_enable();
2128}
2129
2130#define HARD_TX_LOCK(dev, txq, cpu) { \
2131 if ((dev->features & NETIF_F_LLTX) == 0) { \
2132 __netif_tx_lock(txq, cpu); \
2133 } \
2134}
2135
2136#define HARD_TX_UNLOCK(dev, txq) { \
2137 if ((dev->features & NETIF_F_LLTX) == 0) { \
2138 __netif_tx_unlock(txq); \
2139 } \
2140}
2141
2142static inline void netif_tx_disable(struct net_device *dev)
2143{
2144 unsigned int i;
2145 int cpu;
2146
2147 local_bh_disable();
2148 cpu = smp_processor_id();
2149 for (i = 0; i < dev->num_tx_queues; i++) {
2150 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2151
2152 __netif_tx_lock(txq, cpu);
2153 netif_tx_stop_queue(txq);
2154 __netif_tx_unlock(txq);
2155 }
2156 local_bh_enable();
2157}
2158
2159static inline void netif_addr_lock(struct net_device *dev)
2160{
2161 spin_lock(&dev->addr_list_lock);
2162}
2163
2164static inline void netif_addr_lock_bh(struct net_device *dev)
2165{
2166 spin_lock_bh(&dev->addr_list_lock);
2167}
2168
2169static inline void netif_addr_unlock(struct net_device *dev)
2170{
2171 spin_unlock(&dev->addr_list_lock);
2172}
2173
2174static inline void netif_addr_unlock_bh(struct net_device *dev)
2175{
2176 spin_unlock_bh(&dev->addr_list_lock);
2177}
2178
2179
2180
2181
2182
2183#define for_each_dev_addr(dev, ha) \
2184 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2185
2186
2187
2188extern void ether_setup(struct net_device *dev);
2189
2190
2191extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2192 void (*setup)(struct net_device *),
2193 unsigned int txqs, unsigned int rxqs);
2194#define alloc_netdev(sizeof_priv, name, setup) \
2195 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2196
2197#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2198 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2199
2200extern int register_netdev(struct net_device *dev);
2201extern void unregister_netdev(struct net_device *dev);
2202
2203
2204extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2205 struct netdev_hw_addr_list *from_list,
2206 int addr_len, unsigned char addr_type);
2207extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2208 struct netdev_hw_addr_list *from_list,
2209 int addr_len, unsigned char addr_type);
2210extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2211 struct netdev_hw_addr_list *from_list,
2212 int addr_len);
2213extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2214 struct netdev_hw_addr_list *from_list,
2215 int addr_len);
2216extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2217extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2218
2219
2220extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
2221 unsigned char addr_type);
2222extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
2223 unsigned char addr_type);
2224extern int dev_addr_add_multiple(struct net_device *to_dev,
2225 struct net_device *from_dev,
2226 unsigned char addr_type);
2227extern int dev_addr_del_multiple(struct net_device *to_dev,
2228 struct net_device *from_dev,
2229 unsigned char addr_type);
2230extern void dev_addr_flush(struct net_device *dev);
2231extern int dev_addr_init(struct net_device *dev);
2232
2233
2234extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2235extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2236extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2237extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2238extern void dev_uc_flush(struct net_device *dev);
2239extern void dev_uc_init(struct net_device *dev);
2240
2241
2242extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2243extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2244extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2245extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2246extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2247extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2248extern void dev_mc_flush(struct net_device *dev);
2249extern void dev_mc_init(struct net_device *dev);
2250
2251
2252extern void dev_set_rx_mode(struct net_device *dev);
2253extern void __dev_set_rx_mode(struct net_device *dev);
2254extern int dev_set_promiscuity(struct net_device *dev, int inc);
2255extern int dev_set_allmulti(struct net_device *dev, int inc);
2256extern void netdev_state_change(struct net_device *dev);
2257extern int netdev_bonding_change(struct net_device *dev,
2258 unsigned long event);
2259extern void netdev_features_change(struct net_device *dev);
2260
2261extern void dev_load(struct net *net, const char *name);
2262extern void dev_mcast_init(void);
2263extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2264 struct rtnl_link_stats64 *storage);
2265
2266extern int netdev_max_backlog;
2267extern int netdev_tstamp_prequeue;
2268extern int weight_p;
2269extern int netdev_set_master(struct net_device *dev, struct net_device *master);
2270extern int skb_checksum_help(struct sk_buff *skb);
2271extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
2272#ifdef CONFIG_BUG
2273extern void netdev_rx_csum_fault(struct net_device *dev);
2274#else
2275static inline void netdev_rx_csum_fault(struct net_device *dev)
2276{
2277}
2278#endif
2279
2280extern void net_enable_timestamp(void);
2281extern void net_disable_timestamp(void);
2282
2283#ifdef CONFIG_PROC_FS
2284extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2285extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2286extern void dev_seq_stop(struct seq_file *seq, void *v);
2287#endif
2288
2289extern int netdev_class_create_file(struct class_attribute *class_attr);
2290extern void netdev_class_remove_file(struct class_attribute *class_attr);
2291
2292extern struct kobj_ns_type_operations net_ns_type_operations;
2293
2294extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
2295
2296extern void linkwatch_run_queue(void);
2297
2298unsigned long netdev_increment_features(unsigned long all, unsigned long one,
2299 unsigned long mask);
2300unsigned long netdev_fix_features(unsigned long features, const char *name);
2301
2302void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2303 struct net_device *dev);
2304
2305int netif_skb_features(struct sk_buff *skb);
2306
2307static inline int net_gso_ok(int features, int gso_type)
2308{
2309 int feature = gso_type << NETIF_F_GSO_SHIFT;
2310 return (features & feature) == feature;
2311}
2312
2313static inline int skb_gso_ok(struct sk_buff *skb, int features)
2314{
2315 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2316 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2317}
2318
2319static inline int netif_needs_gso(struct sk_buff *skb, int features)
2320{
2321 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2322 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2323}
2324
2325static inline void netif_set_gso_max_size(struct net_device *dev,
2326 unsigned int size)
2327{
2328 dev->gso_max_size = size;
2329}
2330
2331extern int __skb_bond_should_drop(struct sk_buff *skb,
2332 struct net_device *master);
2333
2334static inline int skb_bond_should_drop(struct sk_buff *skb,
2335 struct net_device *master)
2336{
2337 if (master)
2338 return __skb_bond_should_drop(skb, master);
2339 return 0;
2340}
2341
2342extern struct pernet_operations __net_initdata loopback_net_ops;
2343
2344static inline int dev_ethtool_get_settings(struct net_device *dev,
2345 struct ethtool_cmd *cmd)
2346{
2347 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
2348 return -EOPNOTSUPP;
2349 return dev->ethtool_ops->get_settings(dev, cmd);
2350}
2351
2352static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
2353{
2354 if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
2355 return 0;
2356 return dev->ethtool_ops->get_rx_csum(dev);
2357}
2358
2359static inline u32 dev_ethtool_get_flags(struct net_device *dev)
2360{
2361 if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
2362 return 0;
2363 return dev->ethtool_ops->get_flags(dev);
2364}
2365
2366
2367
2368
2369
2370static inline const char *netdev_name(const struct net_device *dev)
2371{
2372 if (dev->reg_state != NETREG_REGISTERED)
2373 return "(unregistered net_device)";
2374 return dev->name;
2375}
2376
2377extern int netdev_printk(const char *level, const struct net_device *dev,
2378 const char *format, ...)
2379 __attribute__ ((format (printf, 3, 4)));
2380extern int netdev_emerg(const struct net_device *dev, const char *format, ...)
2381 __attribute__ ((format (printf, 2, 3)));
2382extern int netdev_alert(const struct net_device *dev, const char *format, ...)
2383 __attribute__ ((format (printf, 2, 3)));
2384extern int netdev_crit(const struct net_device *dev, const char *format, ...)
2385 __attribute__ ((format (printf, 2, 3)));
2386extern int netdev_err(const struct net_device *dev, const char *format, ...)
2387 __attribute__ ((format (printf, 2, 3)));
2388extern int netdev_warn(const struct net_device *dev, const char *format, ...)
2389 __attribute__ ((format (printf, 2, 3)));
2390extern int netdev_notice(const struct net_device *dev, const char *format, ...)
2391 __attribute__ ((format (printf, 2, 3)));
2392extern int netdev_info(const struct net_device *dev, const char *format, ...)
2393 __attribute__ ((format (printf, 2, 3)));
2394
2395#define MODULE_ALIAS_NETDEV(device) \
2396 MODULE_ALIAS("netdev-" device)
2397
2398#if defined(DEBUG)
2399#define netdev_dbg(__dev, format, args...) \
2400 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2401#elif defined(CONFIG_DYNAMIC_DEBUG)
2402#define netdev_dbg(__dev, format, args...) \
2403do { \
2404 dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
2405 netdev_name(__dev), ##args); \
2406} while (0)
2407#else
2408#define netdev_dbg(__dev, format, args...) \
2409({ \
2410 if (0) \
2411 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2412 0; \
2413})
2414#endif
2415
2416#if defined(VERBOSE_DEBUG)
2417#define netdev_vdbg netdev_dbg
2418#else
2419
2420#define netdev_vdbg(dev, format, args...) \
2421({ \
2422 if (0) \
2423 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2424 0; \
2425})
2426#endif
2427
2428
2429
2430
2431
2432
2433#define netdev_WARN(dev, format, args...) \
2434 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2435
2436
2437
2438#define netif_printk(priv, type, level, dev, fmt, args...) \
2439do { \
2440 if (netif_msg_##type(priv)) \
2441 netdev_printk(level, (dev), fmt, ##args); \
2442} while (0)
2443
2444#define netif_level(level, priv, type, dev, fmt, args...) \
2445do { \
2446 if (netif_msg_##type(priv)) \
2447 netdev_##level(dev, fmt, ##args); \
2448} while (0)
2449
2450#define netif_emerg(priv, type, dev, fmt, args...) \
2451 netif_level(emerg, priv, type, dev, fmt, ##args)
2452#define netif_alert(priv, type, dev, fmt, args...) \
2453 netif_level(alert, priv, type, dev, fmt, ##args)
2454#define netif_crit(priv, type, dev, fmt, args...) \
2455 netif_level(crit, priv, type, dev, fmt, ##args)
2456#define netif_err(priv, type, dev, fmt, args...) \
2457 netif_level(err, priv, type, dev, fmt, ##args)
2458#define netif_warn(priv, type, dev, fmt, args...) \
2459 netif_level(warn, priv, type, dev, fmt, ##args)
2460#define netif_notice(priv, type, dev, fmt, args...) \
2461 netif_level(notice, priv, type, dev, fmt, ##args)
2462#define netif_info(priv, type, dev, fmt, args...) \
2463 netif_level(info, priv, type, dev, fmt, ##args)
2464
2465#if defined(DEBUG)
2466#define netif_dbg(priv, type, dev, format, args...) \
2467 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2468#elif defined(CONFIG_DYNAMIC_DEBUG)
2469#define netif_dbg(priv, type, netdev, format, args...) \
2470do { \
2471 if (netif_msg_##type(priv)) \
2472 dynamic_dev_dbg((netdev)->dev.parent, \
2473 "%s: " format, \
2474 netdev_name(netdev), ##args); \
2475} while (0)
2476#else
2477#define netif_dbg(priv, type, dev, format, args...) \
2478({ \
2479 if (0) \
2480 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2481 0; \
2482})
2483#endif
2484
2485#if defined(VERBOSE_DEBUG)
2486#define netif_vdbg netif_dbg
2487#else
2488#define netif_vdbg(priv, type, dev, format, args...) \
2489({ \
2490 if (0) \
2491 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2492 0; \
2493})
2494#endif
2495
2496#endif
2497
2498#endif
2499