1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31
32#ifdef __KERNEL__
33#include <linux/timer.h>
34#include <linux/delay.h>
35#include <linux/mm.h>
36#include <asm/atomic.h>
37#include <asm/cache.h>
38#include <asm/byteorder.h>
39
40#include <linux/device.h>
41#include <linux/percpu.h>
42#include <linux/rculist.h>
43#include <linux/dmaengine.h>
44#include <linux/workqueue.h>
45
46#include <linux/ethtool.h>
47#include <net/net_namespace.h>
48#include <net/dsa.h>
49#ifdef CONFIG_DCB
50#include <net/dcbnl.h>
51#endif
52
53struct vlan_group;
54struct netpoll_info;
55
56struct wireless_dev;
57
58#define SET_ETHTOOL_OPS(netdev,ops) \
59 ( (netdev)->ethtool_ops = (ops) )
60
61#define HAVE_ALLOC_NETDEV
62
63#define HAVE_FREE_NETDEV
64#define HAVE_NETDEV_PRIV
65
66#define NET_XMIT_SUCCESS 0
67#define NET_XMIT_DROP 1
68#define NET_XMIT_CN 2
69#define NET_XMIT_POLICED 3
70#define NET_XMIT_MASK 0xFFFF
71
72
73#define NET_RX_SUCCESS 0
74#define NET_RX_DROP 1
75
76
77
78
79#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
80#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
81
82
83enum netdev_tx {
84 NETDEV_TX_OK = 0,
85 NETDEV_TX_BUSY,
86 NETDEV_TX_LOCKED = -1,
87};
88typedef enum netdev_tx netdev_tx_t;
89
90#endif
91
92#define MAX_ADDR_LEN 32
93
94#ifdef __KERNEL__
95
96
97
98
99
100#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
101# if defined(CONFIG_MAC80211_MESH)
102# define LL_MAX_HEADER 128
103# else
104# define LL_MAX_HEADER 96
105# endif
106#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
107# define LL_MAX_HEADER 48
108#else
109# define LL_MAX_HEADER 32
110#endif
111
112#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
113 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
114 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
115 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
116#define MAX_HEADER LL_MAX_HEADER
117#else
118#define MAX_HEADER (LL_MAX_HEADER + 48)
119#endif
120
121#endif
122
123
124
125
126
127
128struct net_device_stats
129{
130 unsigned long rx_packets;
131 unsigned long tx_packets;
132 unsigned long rx_bytes;
133 unsigned long tx_bytes;
134 unsigned long rx_errors;
135 unsigned long tx_errors;
136 unsigned long rx_dropped;
137 unsigned long tx_dropped;
138 unsigned long multicast;
139 unsigned long collisions;
140
141
142 unsigned long rx_length_errors;
143 unsigned long rx_over_errors;
144 unsigned long rx_crc_errors;
145 unsigned long rx_frame_errors;
146 unsigned long rx_fifo_errors;
147 unsigned long rx_missed_errors;
148
149
150 unsigned long tx_aborted_errors;
151 unsigned long tx_carrier_errors;
152 unsigned long tx_fifo_errors;
153 unsigned long tx_heartbeat_errors;
154 unsigned long tx_window_errors;
155
156
157 unsigned long rx_compressed;
158 unsigned long tx_compressed;
159};
160
161
162
163enum {
164 IF_PORT_UNKNOWN = 0,
165 IF_PORT_10BASE2,
166 IF_PORT_10BASET,
167 IF_PORT_AUI,
168 IF_PORT_100BASET,
169 IF_PORT_100BASETX,
170 IF_PORT_100BASEFX
171};
172
173#ifdef __KERNEL__
174
175#include <linux/cache.h>
176#include <linux/skbuff.h>
177
178struct neighbour;
179struct neigh_parms;
180struct sk_buff;
181
182struct netif_rx_stats
183{
184 unsigned total;
185 unsigned dropped;
186 unsigned time_squeeze;
187 unsigned cpu_collision;
188};
189
190DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
191
192struct dev_addr_list
193{
194 struct dev_addr_list *next;
195 u8 da_addr[MAX_ADDR_LEN];
196 u8 da_addrlen;
197 u8 da_synced;
198 int da_users;
199 int da_gusers;
200};
201
202
203
204
205
206#define dev_mc_list dev_addr_list
207#define dmi_addr da_addr
208#define dmi_addrlen da_addrlen
209#define dmi_users da_users
210#define dmi_gusers da_gusers
211
212struct netdev_hw_addr {
213 struct list_head list;
214 unsigned char addr[MAX_ADDR_LEN];
215 unsigned char type;
216#define NETDEV_HW_ADDR_T_LAN 1
217#define NETDEV_HW_ADDR_T_SAN 2
218#define NETDEV_HW_ADDR_T_SLAVE 3
219#define NETDEV_HW_ADDR_T_UNICAST 4
220 int refcount;
221 bool synced;
222 struct rcu_head rcu_head;
223};
224
225struct netdev_hw_addr_list {
226 struct list_head list;
227 int count;
228};
229
230struct hh_cache
231{
232 struct hh_cache *hh_next;
233 atomic_t hh_refcnt;
234
235
236
237
238
239
240 __be16 hh_type ____cacheline_aligned_in_smp;
241
242
243
244
245 u16 hh_len;
246 int (*hh_output)(struct sk_buff *skb);
247 seqlock_t hh_lock;
248
249
250#define HH_DATA_MOD 16
251#define HH_DATA_OFF(__len) \
252 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
253#define HH_DATA_ALIGN(__len) \
254 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
255 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
256};
257
258
259
260
261
262
263
264
265
266
267
268
269#define LL_RESERVED_SPACE(dev) \
270 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
271#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
272 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
273#define LL_ALLOCATED_SPACE(dev) \
274 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
275
276struct header_ops {
277 int (*create) (struct sk_buff *skb, struct net_device *dev,
278 unsigned short type, const void *daddr,
279 const void *saddr, unsigned len);
280 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
281 int (*rebuild)(struct sk_buff *skb);
282#define HAVE_HEADER_CACHE
283 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
284 void (*cache_update)(struct hh_cache *hh,
285 const struct net_device *dev,
286 const unsigned char *haddr);
287};
288
289
290
291
292
293
294enum netdev_state_t
295{
296 __LINK_STATE_START,
297 __LINK_STATE_PRESENT,
298 __LINK_STATE_NOCARRIER,
299 __LINK_STATE_LINKWATCH_PENDING,
300 __LINK_STATE_DORMANT,
301};
302
303
304
305
306
307
308struct netdev_boot_setup {
309 char name[IFNAMSIZ];
310 struct ifmap map;
311};
312#define NETDEV_BOOT_SETUP_MAX 8
313
314extern int __init netdev_boot_setup(char *str);
315
316
317
318
319struct napi_struct {
320
321
322
323
324
325
326 struct list_head poll_list;
327
328 unsigned long state;
329 int weight;
330 int (*poll)(struct napi_struct *, int);
331#ifdef CONFIG_NETPOLL
332 spinlock_t poll_lock;
333 int poll_owner;
334#endif
335
336 unsigned int gro_count;
337
338 struct net_device *dev;
339 struct list_head dev_list;
340 struct sk_buff *gro_list;
341 struct sk_buff *skb;
342};
343
344enum
345{
346 NAPI_STATE_SCHED,
347 NAPI_STATE_DISABLE,
348 NAPI_STATE_NPSVC,
349};
350
351enum {
352 GRO_MERGED,
353 GRO_MERGED_FREE,
354 GRO_HELD,
355 GRO_NORMAL,
356 GRO_DROP,
357};
358
359extern void __napi_schedule(struct napi_struct *n);
360
361static inline int napi_disable_pending(struct napi_struct *n)
362{
363 return test_bit(NAPI_STATE_DISABLE, &n->state);
364}
365
366
367
368
369
370
371
372
373
374
375static inline int napi_schedule_prep(struct napi_struct *n)
376{
377 return !napi_disable_pending(n) &&
378 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
379}
380
381
382
383
384
385
386
387
388static inline void napi_schedule(struct napi_struct *n)
389{
390 if (napi_schedule_prep(n))
391 __napi_schedule(n);
392}
393
394
395static inline int napi_reschedule(struct napi_struct *napi)
396{
397 if (napi_schedule_prep(napi)) {
398 __napi_schedule(napi);
399 return 1;
400 }
401 return 0;
402}
403
404
405
406
407
408
409
410extern void __napi_complete(struct napi_struct *n);
411extern void napi_complete(struct napi_struct *n);
412
413
414
415
416
417
418
419
420static inline void napi_disable(struct napi_struct *n)
421{
422 set_bit(NAPI_STATE_DISABLE, &n->state);
423 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
424 msleep(1);
425 clear_bit(NAPI_STATE_DISABLE, &n->state);
426}
427
428
429
430
431
432
433
434
435static inline void napi_enable(struct napi_struct *n)
436{
437 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
438 smp_mb__before_clear_bit();
439 clear_bit(NAPI_STATE_SCHED, &n->state);
440}
441
442#ifdef CONFIG_SMP
443
444
445
446
447
448
449
450
451static inline void napi_synchronize(const struct napi_struct *n)
452{
453 while (test_bit(NAPI_STATE_SCHED, &n->state))
454 msleep(1);
455}
456#else
457# define napi_synchronize(n) barrier()
458#endif
459
460enum netdev_queue_state_t
461{
462 __QUEUE_STATE_XOFF,
463 __QUEUE_STATE_FROZEN,
464};
465
466struct netdev_queue {
467
468
469
470 struct net_device *dev;
471 struct Qdisc *qdisc;
472 unsigned long state;
473 struct Qdisc *qdisc_sleeping;
474
475
476
477 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
478 int xmit_lock_owner;
479
480
481
482 unsigned long trans_start;
483 unsigned long tx_bytes;
484 unsigned long tx_packets;
485 unsigned long tx_dropped;
486} ____cacheline_aligned_in_smp;
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581#define HAVE_NET_DEVICE_OPS
582struct net_device_ops {
583 int (*ndo_init)(struct net_device *dev);
584 void (*ndo_uninit)(struct net_device *dev);
585 int (*ndo_open)(struct net_device *dev);
586 int (*ndo_stop)(struct net_device *dev);
587 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
588 struct net_device *dev);
589 u16 (*ndo_select_queue)(struct net_device *dev,
590 struct sk_buff *skb);
591#define HAVE_CHANGE_RX_FLAGS
592 void (*ndo_change_rx_flags)(struct net_device *dev,
593 int flags);
594#define HAVE_SET_RX_MODE
595 void (*ndo_set_rx_mode)(struct net_device *dev);
596#define HAVE_MULTICAST
597 void (*ndo_set_multicast_list)(struct net_device *dev);
598#define HAVE_SET_MAC_ADDR
599 int (*ndo_set_mac_address)(struct net_device *dev,
600 void *addr);
601#define HAVE_VALIDATE_ADDR
602 int (*ndo_validate_addr)(struct net_device *dev);
603#define HAVE_PRIVATE_IOCTL
604 int (*ndo_do_ioctl)(struct net_device *dev,
605 struct ifreq *ifr, int cmd);
606#define HAVE_SET_CONFIG
607 int (*ndo_set_config)(struct net_device *dev,
608 struct ifmap *map);
609#define HAVE_CHANGE_MTU
610 int (*ndo_change_mtu)(struct net_device *dev,
611 int new_mtu);
612 int (*ndo_neigh_setup)(struct net_device *dev,
613 struct neigh_parms *);
614#define HAVE_TX_TIMEOUT
615 void (*ndo_tx_timeout) (struct net_device *dev);
616
617 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
618
619 void (*ndo_vlan_rx_register)(struct net_device *dev,
620 struct vlan_group *grp);
621 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
622 unsigned short vid);
623 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
624 unsigned short vid);
625#ifdef CONFIG_NET_POLL_CONTROLLER
626#define HAVE_NETDEV_POLL
627 void (*ndo_poll_controller)(struct net_device *dev);
628#endif
629#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
630 int (*ndo_fcoe_enable)(struct net_device *dev);
631 int (*ndo_fcoe_disable)(struct net_device *dev);
632 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
633 u16 xid,
634 struct scatterlist *sgl,
635 unsigned int sgc);
636 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
637 u16 xid);
638#endif
639};
640
641
642
643
644
645
646
647
648
649
650
651struct net_device
652{
653
654
655
656
657
658
659 char name[IFNAMSIZ];
660
661 struct hlist_node name_hlist;
662
663 char *ifalias;
664
665
666
667
668
669 unsigned long mem_end;
670 unsigned long mem_start;
671 unsigned long base_addr;
672 unsigned int irq;
673
674
675
676
677
678
679 unsigned char if_port;
680 unsigned char dma;
681
682 unsigned long state;
683
684 struct list_head dev_list;
685 struct list_head napi_list;
686
687
688 unsigned long features;
689#define NETIF_F_SG 1
690#define NETIF_F_IP_CSUM 2
691#define NETIF_F_NO_CSUM 4
692#define NETIF_F_HW_CSUM 8
693#define NETIF_F_IPV6_CSUM 16
694#define NETIF_F_HIGHDMA 32
695#define NETIF_F_FRAGLIST 64
696#define NETIF_F_HW_VLAN_TX 128
697#define NETIF_F_HW_VLAN_RX 256
698#define NETIF_F_HW_VLAN_FILTER 512
699#define NETIF_F_VLAN_CHALLENGED 1024
700#define NETIF_F_GSO 2048
701#define NETIF_F_LLTX 4096
702
703#define NETIF_F_NETNS_LOCAL 8192
704#define NETIF_F_GRO 16384
705#define NETIF_F_LRO 32768
706
707
708#define NETIF_F_FCOE_CRC (1 << 24)
709#define NETIF_F_SCTP_CSUM (1 << 25)
710#define NETIF_F_FCOE_MTU (1 << 26)
711
712
713#define NETIF_F_GSO_SHIFT 16
714#define NETIF_F_GSO_MASK 0x00ff0000
715#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
716#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
717#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
718#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
719#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
720#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
721
722
723#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
724
725
726#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
727#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
728#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
729#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
730
731
732
733
734
735#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
736 NETIF_F_SG | NETIF_F_HIGHDMA | \
737 NETIF_F_FRAGLIST)
738
739
740 int ifindex;
741 int iflink;
742
743 struct net_device_stats stats;
744
745#ifdef CONFIG_WIRELESS_EXT
746
747
748 const struct iw_handler_def * wireless_handlers;
749
750 struct iw_public_data * wireless_data;
751#endif
752
753 const struct net_device_ops *netdev_ops;
754 const struct ethtool_ops *ethtool_ops;
755
756
757 const struct header_ops *header_ops;
758
759 unsigned int flags;
760 unsigned short gflags;
761 unsigned short priv_flags;
762 unsigned short padded;
763
764 unsigned char operstate;
765 unsigned char link_mode;
766
767 unsigned mtu;
768 unsigned short type;
769 unsigned short hard_header_len;
770
771
772
773
774
775 unsigned short needed_headroom;
776 unsigned short needed_tailroom;
777
778 struct net_device *master;
779
780
781
782
783 unsigned char perm_addr[MAX_ADDR_LEN];
784 unsigned char addr_len;
785 unsigned short dev_id;
786
787 struct netdev_hw_addr_list uc;
788
789 int uc_promisc;
790 spinlock_t addr_list_lock;
791 struct dev_addr_list *mc_list;
792 int mc_count;
793 unsigned int promiscuity;
794 unsigned int allmulti;
795
796
797
798
799#ifdef CONFIG_NET_DSA
800 void *dsa_ptr;
801#endif
802 void *atalk_ptr;
803 void *ip_ptr;
804 void *dn_ptr;
805 void *ip6_ptr;
806 void *ec_ptr;
807 void *ax25_ptr;
808 struct wireless_dev *ieee80211_ptr;
809
810
811
812
813
814 unsigned long last_rx;
815
816 unsigned char *dev_addr;
817
818
819
820 struct netdev_hw_addr_list dev_addrs;
821
822
823 unsigned char broadcast[MAX_ADDR_LEN];
824
825 struct netdev_queue rx_queue;
826
827 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
828
829
830 unsigned int num_tx_queues;
831
832
833 unsigned int real_num_tx_queues;
834
835
836 struct Qdisc *qdisc;
837
838 unsigned long tx_queue_len;
839 spinlock_t tx_global_lock;
840
841
842
843
844
845
846
847
848
849 unsigned long trans_start;
850
851 int watchdog_timeo;
852 struct timer_list watchdog_timer;
853
854
855 atomic_t refcnt ____cacheline_aligned_in_smp;
856
857
858 struct list_head todo_list;
859
860 struct hlist_node index_hlist;
861
862 struct net_device *link_watch_next;
863
864
865 enum { NETREG_UNINITIALIZED=0,
866 NETREG_REGISTERED,
867 NETREG_UNREGISTERING,
868 NETREG_UNREGISTERED,
869 NETREG_RELEASED,
870 NETREG_DUMMY,
871 } reg_state;
872
873
874 void (*destructor)(struct net_device *dev);
875
876#ifdef CONFIG_NETPOLL
877 struct netpoll_info *npinfo;
878#endif
879
880#ifdef CONFIG_NET_NS
881
882 struct net *nd_net;
883#endif
884
885
886 void *ml_priv;
887
888
889 struct net_bridge_port *br_port;
890
891 struct macvlan_port *macvlan_port;
892
893 struct garp_port *garp_port;
894
895
896 struct device dev;
897
898 const struct attribute_group *sysfs_groups[3];
899
900
901 const struct rtnl_link_ops *rtnl_link_ops;
902
903
904 unsigned long vlan_features;
905
906
907#define GSO_MAX_SIZE 65536
908 unsigned int gso_max_size;
909
910#ifdef CONFIG_DCB
911
912 struct dcbnl_rtnl_ops *dcbnl_ops;
913#endif
914
915#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
916
917 unsigned int fcoe_ddp_xid;
918#endif
919};
920#define to_net_dev(d) container_of(d, struct net_device, dev)
921
922#define NETDEV_ALIGN 32
923
924static inline
925struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
926 unsigned int index)
927{
928 return &dev->_tx[index];
929}
930
931static inline void netdev_for_each_tx_queue(struct net_device *dev,
932 void (*f)(struct net_device *,
933 struct netdev_queue *,
934 void *),
935 void *arg)
936{
937 unsigned int i;
938
939 for (i = 0; i < dev->num_tx_queues; i++)
940 f(dev, &dev->_tx[i], arg);
941}
942
943
944
945
946static inline
947struct net *dev_net(const struct net_device *dev)
948{
949#ifdef CONFIG_NET_NS
950 return dev->nd_net;
951#else
952 return &init_net;
953#endif
954}
955
956static inline
957void dev_net_set(struct net_device *dev, struct net *net)
958{
959#ifdef CONFIG_NET_NS
960 release_net(dev->nd_net);
961 dev->nd_net = hold_net(net);
962#endif
963}
964
965static inline bool netdev_uses_dsa_tags(struct net_device *dev)
966{
967#ifdef CONFIG_NET_DSA_TAG_DSA
968 if (dev->dsa_ptr != NULL)
969 return dsa_uses_dsa_tags(dev->dsa_ptr);
970#endif
971
972 return 0;
973}
974
975static inline bool netdev_uses_trailer_tags(struct net_device *dev)
976{
977#ifdef CONFIG_NET_DSA_TAG_TRAILER
978 if (dev->dsa_ptr != NULL)
979 return dsa_uses_trailer_tags(dev->dsa_ptr);
980#endif
981
982 return 0;
983}
984
985
986
987
988
989
990
991static inline void *netdev_priv(const struct net_device *dev)
992{
993 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
994}
995
996
997
998
999#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1000
1001
1002
1003
1004
1005#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1018 int (*poll)(struct napi_struct *, int), int weight);
1019
1020
1021
1022
1023
1024
1025
1026void netif_napi_del(struct napi_struct *napi);
1027
1028struct napi_gro_cb {
1029
1030 void *frag0;
1031
1032
1033 unsigned int frag0_len;
1034
1035
1036 int data_offset;
1037
1038
1039 int same_flow;
1040
1041
1042 int flush;
1043
1044
1045 int count;
1046
1047
1048 int free;
1049};
1050
1051#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1052
1053struct packet_type {
1054 __be16 type;
1055 struct net_device *dev;
1056 int (*func) (struct sk_buff *,
1057 struct net_device *,
1058 struct packet_type *,
1059 struct net_device *);
1060 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1061 int features);
1062 int (*gso_send_check)(struct sk_buff *skb);
1063 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1064 struct sk_buff *skb);
1065 int (*gro_complete)(struct sk_buff *skb);
1066 void *af_packet_priv;
1067 struct list_head list;
1068};
1069
1070#include <linux/interrupt.h>
1071#include <linux/notifier.h>
1072
1073extern rwlock_t dev_base_lock;
1074
1075
1076#define for_each_netdev(net, d) \
1077 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1078#define for_each_netdev_safe(net, d, n) \
1079 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1080#define for_each_netdev_continue(net, d) \
1081 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1082#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1083
1084static inline struct net_device *next_net_device(struct net_device *dev)
1085{
1086 struct list_head *lh;
1087 struct net *net;
1088
1089 net = dev_net(dev);
1090 lh = dev->dev_list.next;
1091 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1092}
1093
1094static inline struct net_device *first_net_device(struct net *net)
1095{
1096 return list_empty(&net->dev_base_head) ? NULL :
1097 net_device_entry(net->dev_base_head.next);
1098}
1099
1100extern int netdev_boot_setup_check(struct net_device *dev);
1101extern unsigned long netdev_boot_base(const char *prefix, int unit);
1102extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
1103extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1104extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1105extern void dev_add_pack(struct packet_type *pt);
1106extern void dev_remove_pack(struct packet_type *pt);
1107extern void __dev_remove_pack(struct packet_type *pt);
1108
1109extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
1110 unsigned short mask);
1111extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1112extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1113extern int dev_alloc_name(struct net_device *dev, const char *name);
1114extern int dev_open(struct net_device *dev);
1115extern int dev_close(struct net_device *dev);
1116extern void dev_disable_lro(struct net_device *dev);
1117extern int dev_queue_xmit(struct sk_buff *skb);
1118extern int register_netdevice(struct net_device *dev);
1119extern void unregister_netdevice(struct net_device *dev);
1120extern void free_netdev(struct net_device *dev);
1121extern void synchronize_net(void);
1122extern int register_netdevice_notifier(struct notifier_block *nb);
1123extern int unregister_netdevice_notifier(struct notifier_block *nb);
1124extern int init_dummy_netdev(struct net_device *dev);
1125extern void netdev_resync_ops(struct net_device *dev);
1126
1127extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1128extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1129extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1130extern int dev_restart(struct net_device *dev);
1131#ifdef CONFIG_NETPOLL_TRAP
1132extern int netpoll_trap(void);
1133#endif
1134extern int skb_gro_receive(struct sk_buff **head,
1135 struct sk_buff *skb);
1136extern void skb_gro_reset_offset(struct sk_buff *skb);
1137
1138static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1139{
1140 return NAPI_GRO_CB(skb)->data_offset;
1141}
1142
1143static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1144{
1145 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1146}
1147
1148static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1149{
1150 NAPI_GRO_CB(skb)->data_offset += len;
1151}
1152
1153static inline void *skb_gro_header_fast(struct sk_buff *skb,
1154 unsigned int offset)
1155{
1156 return NAPI_GRO_CB(skb)->frag0 + offset;
1157}
1158
1159static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1160{
1161 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1162}
1163
1164static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1165 unsigned int offset)
1166{
1167 NAPI_GRO_CB(skb)->frag0 = NULL;
1168 NAPI_GRO_CB(skb)->frag0_len = 0;
1169 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
1170}
1171
1172static inline void *skb_gro_mac_header(struct sk_buff *skb)
1173{
1174 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1175}
1176
1177static inline void *skb_gro_network_header(struct sk_buff *skb)
1178{
1179 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1180 skb_network_offset(skb);
1181}
1182
1183static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1184 unsigned short type,
1185 const void *daddr, const void *saddr,
1186 unsigned len)
1187{
1188 if (!dev->header_ops || !dev->header_ops->create)
1189 return 0;
1190
1191 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1192}
1193
1194static inline int dev_parse_header(const struct sk_buff *skb,
1195 unsigned char *haddr)
1196{
1197 const struct net_device *dev = skb->dev;
1198
1199 if (!dev->header_ops || !dev->header_ops->parse)
1200 return 0;
1201 return dev->header_ops->parse(skb, haddr);
1202}
1203
1204typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1205extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1206static inline int unregister_gifconf(unsigned int family)
1207{
1208 return register_gifconf(family, NULL);
1209}
1210
1211
1212
1213
1214
1215struct softnet_data
1216{
1217 struct Qdisc *output_queue;
1218 struct sk_buff_head input_pkt_queue;
1219 struct list_head poll_list;
1220 struct sk_buff *completion_queue;
1221
1222 struct napi_struct backlog;
1223};
1224
1225DECLARE_PER_CPU(struct softnet_data,softnet_data);
1226
1227#define HAVE_NETIF_QUEUE
1228
1229extern void __netif_schedule(struct Qdisc *q);
1230
1231static inline void netif_schedule_queue(struct netdev_queue *txq)
1232{
1233 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1234 __netif_schedule(txq->qdisc);
1235}
1236
1237static inline void netif_tx_schedule_all(struct net_device *dev)
1238{
1239 unsigned int i;
1240
1241 for (i = 0; i < dev->num_tx_queues; i++)
1242 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1243}
1244
1245static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1246{
1247 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1248}
1249
1250
1251
1252
1253
1254
1255
1256static inline void netif_start_queue(struct net_device *dev)
1257{
1258 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1259}
1260
1261static inline void netif_tx_start_all_queues(struct net_device *dev)
1262{
1263 unsigned int i;
1264
1265 for (i = 0; i < dev->num_tx_queues; i++) {
1266 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1267 netif_tx_start_queue(txq);
1268 }
1269}
1270
1271static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1272{
1273#ifdef CONFIG_NETPOLL_TRAP
1274 if (netpoll_trap()) {
1275 netif_tx_start_queue(dev_queue);
1276 return;
1277 }
1278#endif
1279 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
1280 __netif_schedule(dev_queue->qdisc);
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290static inline void netif_wake_queue(struct net_device *dev)
1291{
1292 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1293}
1294
1295static inline void netif_tx_wake_all_queues(struct net_device *dev)
1296{
1297 unsigned int i;
1298
1299 for (i = 0; i < dev->num_tx_queues; i++) {
1300 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1301 netif_tx_wake_queue(txq);
1302 }
1303}
1304
1305static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1306{
1307 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317static inline void netif_stop_queue(struct net_device *dev)
1318{
1319 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1320}
1321
1322static inline void netif_tx_stop_all_queues(struct net_device *dev)
1323{
1324 unsigned int i;
1325
1326 for (i = 0; i < dev->num_tx_queues; i++) {
1327 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1328 netif_tx_stop_queue(txq);
1329 }
1330}
1331
1332static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1333{
1334 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1335}
1336
1337
1338
1339
1340
1341
1342
1343static inline int netif_queue_stopped(const struct net_device *dev)
1344{
1345 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1346}
1347
1348static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1349{
1350 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1351}
1352
1353
1354
1355
1356
1357
1358
1359static inline int netif_running(const struct net_device *dev)
1360{
1361 return test_bit(__LINK_STATE_START, &dev->state);
1362}
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1379{
1380 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1381
1382 netif_tx_start_queue(txq);
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1393{
1394 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1395#ifdef CONFIG_NETPOLL_TRAP
1396 if (netpoll_trap())
1397 return;
1398#endif
1399 netif_tx_stop_queue(txq);
1400}
1401
1402
1403
1404
1405
1406
1407
1408
1409static inline int __netif_subqueue_stopped(const struct net_device *dev,
1410 u16 queue_index)
1411{
1412 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1413
1414 return netif_tx_queue_stopped(txq);
1415}
1416
1417static inline int netif_subqueue_stopped(const struct net_device *dev,
1418 struct sk_buff *skb)
1419{
1420 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1431{
1432 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1433#ifdef CONFIG_NETPOLL_TRAP
1434 if (netpoll_trap())
1435 return;
1436#endif
1437 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
1438 __netif_schedule(txq->qdisc);
1439}
1440
1441
1442
1443
1444
1445
1446
1447static inline int netif_is_multiqueue(const struct net_device *dev)
1448{
1449 return (dev->num_tx_queues > 1);
1450}
1451
1452
1453
1454
1455
1456extern void dev_kfree_skb_irq(struct sk_buff *skb);
1457
1458
1459
1460
1461
1462extern void dev_kfree_skb_any(struct sk_buff *skb);
1463
1464#define HAVE_NETIF_RX 1
1465extern int netif_rx(struct sk_buff *skb);
1466extern int netif_rx_ni(struct sk_buff *skb);
1467#define HAVE_NETIF_RECEIVE_SKB 1
1468extern int netif_receive_skb(struct sk_buff *skb);
1469extern void napi_gro_flush(struct napi_struct *napi);
1470extern int dev_gro_receive(struct napi_struct *napi,
1471 struct sk_buff *skb);
1472extern int napi_skb_finish(int ret, struct sk_buff *skb);
1473extern int napi_gro_receive(struct napi_struct *napi,
1474 struct sk_buff *skb);
1475extern void napi_reuse_skb(struct napi_struct *napi,
1476 struct sk_buff *skb);
1477extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
1478extern int napi_frags_finish(struct napi_struct *napi,
1479 struct sk_buff *skb, int ret);
1480extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
1481extern int napi_gro_frags(struct napi_struct *napi);
1482
1483static inline void napi_free_frags(struct napi_struct *napi)
1484{
1485 kfree_skb(napi->skb);
1486 napi->skb = NULL;
1487}
1488
1489extern void netif_nit_deliver(struct sk_buff *skb);
1490extern int dev_valid_name(const char *name);
1491extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1492extern int dev_ethtool(struct net *net, struct ifreq *);
1493extern unsigned dev_get_flags(const struct net_device *);
1494extern int dev_change_flags(struct net_device *, unsigned);
1495extern int dev_change_name(struct net_device *, const char *);
1496extern int dev_set_alias(struct net_device *, const char *, size_t);
1497extern int dev_change_net_namespace(struct net_device *,
1498 struct net *, const char *);
1499extern int dev_set_mtu(struct net_device *, int);
1500extern int dev_set_mac_address(struct net_device *,
1501 struct sockaddr *);
1502extern int dev_hard_start_xmit(struct sk_buff *skb,
1503 struct net_device *dev,
1504 struct netdev_queue *txq);
1505
1506extern int netdev_budget;
1507
1508
1509extern void netdev_run_todo(void);
1510
1511
1512
1513
1514
1515
1516
1517static inline void dev_put(struct net_device *dev)
1518{
1519 atomic_dec(&dev->refcnt);
1520}
1521
1522
1523
1524
1525
1526
1527
1528static inline void dev_hold(struct net_device *dev)
1529{
1530 atomic_inc(&dev->refcnt);
1531}
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542extern void linkwatch_fire_event(struct net_device *dev);
1543
1544
1545
1546
1547
1548
1549
1550static inline int netif_carrier_ok(const struct net_device *dev)
1551{
1552 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1553}
1554
1555extern unsigned long dev_trans_start(struct net_device *dev);
1556
1557extern void __netdev_watchdog_up(struct net_device *dev);
1558
1559extern void netif_carrier_on(struct net_device *dev);
1560
1561extern void netif_carrier_off(struct net_device *dev);
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576static inline void netif_dormant_on(struct net_device *dev)
1577{
1578 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1579 linkwatch_fire_event(dev);
1580}
1581
1582
1583
1584
1585
1586
1587
1588static inline void netif_dormant_off(struct net_device *dev)
1589{
1590 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1591 linkwatch_fire_event(dev);
1592}
1593
1594
1595
1596
1597
1598
1599
1600static inline int netif_dormant(const struct net_device *dev)
1601{
1602 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612static inline int netif_oper_up(const struct net_device *dev) {
1613 return (dev->operstate == IF_OPER_UP ||
1614 dev->operstate == IF_OPER_UNKNOWN );
1615}
1616
1617
1618
1619
1620
1621
1622
1623static inline int netif_device_present(struct net_device *dev)
1624{
1625 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1626}
1627
1628extern void netif_device_detach(struct net_device *dev);
1629
1630extern void netif_device_attach(struct net_device *dev);
1631
1632
1633
1634
1635#define HAVE_NETIF_MSG 1
1636
1637enum {
1638 NETIF_MSG_DRV = 0x0001,
1639 NETIF_MSG_PROBE = 0x0002,
1640 NETIF_MSG_LINK = 0x0004,
1641 NETIF_MSG_TIMER = 0x0008,
1642 NETIF_MSG_IFDOWN = 0x0010,
1643 NETIF_MSG_IFUP = 0x0020,
1644 NETIF_MSG_RX_ERR = 0x0040,
1645 NETIF_MSG_TX_ERR = 0x0080,
1646 NETIF_MSG_TX_QUEUED = 0x0100,
1647 NETIF_MSG_INTR = 0x0200,
1648 NETIF_MSG_TX_DONE = 0x0400,
1649 NETIF_MSG_RX_STATUS = 0x0800,
1650 NETIF_MSG_PKTDATA = 0x1000,
1651 NETIF_MSG_HW = 0x2000,
1652 NETIF_MSG_WOL = 0x4000,
1653};
1654
1655#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1656#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1657#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1658#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1659#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1660#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1661#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1662#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1663#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1664#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1665#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1666#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1667#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1668#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1669#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1670
1671static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1672{
1673
1674 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1675 return default_msg_enable_bits;
1676 if (debug_value == 0)
1677 return 0;
1678
1679 return (1 << debug_value) - 1;
1680}
1681
1682static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1683{
1684 spin_lock(&txq->_xmit_lock);
1685 txq->xmit_lock_owner = cpu;
1686}
1687
1688static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1689{
1690 spin_lock_bh(&txq->_xmit_lock);
1691 txq->xmit_lock_owner = smp_processor_id();
1692}
1693
1694static inline int __netif_tx_trylock(struct netdev_queue *txq)
1695{
1696 int ok = spin_trylock(&txq->_xmit_lock);
1697 if (likely(ok))
1698 txq->xmit_lock_owner = smp_processor_id();
1699 return ok;
1700}
1701
1702static inline void __netif_tx_unlock(struct netdev_queue *txq)
1703{
1704 txq->xmit_lock_owner = -1;
1705 spin_unlock(&txq->_xmit_lock);
1706}
1707
1708static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1709{
1710 txq->xmit_lock_owner = -1;
1711 spin_unlock_bh(&txq->_xmit_lock);
1712}
1713
1714static inline void txq_trans_update(struct netdev_queue *txq)
1715{
1716 if (txq->xmit_lock_owner != -1)
1717 txq->trans_start = jiffies;
1718}
1719
1720
1721
1722
1723
1724
1725
1726static inline void netif_tx_lock(struct net_device *dev)
1727{
1728 unsigned int i;
1729 int cpu;
1730
1731 spin_lock(&dev->tx_global_lock);
1732 cpu = smp_processor_id();
1733 for (i = 0; i < dev->num_tx_queues; i++) {
1734 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1735
1736
1737
1738
1739
1740
1741
1742 __netif_tx_lock(txq, cpu);
1743 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1744 __netif_tx_unlock(txq);
1745 }
1746}
1747
1748static inline void netif_tx_lock_bh(struct net_device *dev)
1749{
1750 local_bh_disable();
1751 netif_tx_lock(dev);
1752}
1753
1754static inline void netif_tx_unlock(struct net_device *dev)
1755{
1756 unsigned int i;
1757
1758 for (i = 0; i < dev->num_tx_queues; i++) {
1759 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1760
1761
1762
1763
1764
1765 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
1766 netif_schedule_queue(txq);
1767 }
1768 spin_unlock(&dev->tx_global_lock);
1769}
1770
1771static inline void netif_tx_unlock_bh(struct net_device *dev)
1772{
1773 netif_tx_unlock(dev);
1774 local_bh_enable();
1775}
1776
1777#define HARD_TX_LOCK(dev, txq, cpu) { \
1778 if ((dev->features & NETIF_F_LLTX) == 0) { \
1779 __netif_tx_lock(txq, cpu); \
1780 } \
1781}
1782
1783#define HARD_TX_UNLOCK(dev, txq) { \
1784 if ((dev->features & NETIF_F_LLTX) == 0) { \
1785 __netif_tx_unlock(txq); \
1786 } \
1787}
1788
1789static inline void netif_tx_disable(struct net_device *dev)
1790{
1791 unsigned int i;
1792 int cpu;
1793
1794 local_bh_disable();
1795 cpu = smp_processor_id();
1796 for (i = 0; i < dev->num_tx_queues; i++) {
1797 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1798
1799 __netif_tx_lock(txq, cpu);
1800 netif_tx_stop_queue(txq);
1801 __netif_tx_unlock(txq);
1802 }
1803 local_bh_enable();
1804}
1805
1806static inline void netif_addr_lock(struct net_device *dev)
1807{
1808 spin_lock(&dev->addr_list_lock);
1809}
1810
1811static inline void netif_addr_lock_bh(struct net_device *dev)
1812{
1813 spin_lock_bh(&dev->addr_list_lock);
1814}
1815
1816static inline void netif_addr_unlock(struct net_device *dev)
1817{
1818 spin_unlock(&dev->addr_list_lock);
1819}
1820
1821static inline void netif_addr_unlock_bh(struct net_device *dev)
1822{
1823 spin_unlock_bh(&dev->addr_list_lock);
1824}
1825
1826
1827
1828
1829
1830#define for_each_dev_addr(dev, ha) \
1831 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
1832
1833
1834
1835extern void ether_setup(struct net_device *dev);
1836
1837
1838extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1839 void (*setup)(struct net_device *),
1840 unsigned int queue_count);
1841#define alloc_netdev(sizeof_priv, name, setup) \
1842 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1843extern int register_netdev(struct net_device *dev);
1844extern void unregister_netdev(struct net_device *dev);
1845
1846
1847extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
1848 unsigned char addr_type);
1849extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
1850 unsigned char addr_type);
1851extern int dev_addr_add_multiple(struct net_device *to_dev,
1852 struct net_device *from_dev,
1853 unsigned char addr_type);
1854extern int dev_addr_del_multiple(struct net_device *to_dev,
1855 struct net_device *from_dev,
1856 unsigned char addr_type);
1857
1858
1859extern void dev_set_rx_mode(struct net_device *dev);
1860extern void __dev_set_rx_mode(struct net_device *dev);
1861extern int dev_unicast_delete(struct net_device *dev, void *addr);
1862extern int dev_unicast_add(struct net_device *dev, void *addr);
1863extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1864extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
1865extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1866extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1867extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1868extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
1869extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1870extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
1871extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1872extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1873extern int dev_set_promiscuity(struct net_device *dev, int inc);
1874extern int dev_set_allmulti(struct net_device *dev, int inc);
1875extern void netdev_state_change(struct net_device *dev);
1876extern void netdev_bonding_change(struct net_device *dev,
1877 unsigned long event);
1878extern void netdev_features_change(struct net_device *dev);
1879
1880extern void dev_load(struct net *net, const char *name);
1881extern void dev_mcast_init(void);
1882extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
1883
1884extern int netdev_max_backlog;
1885extern int weight_p;
1886extern int netdev_set_master(struct net_device *dev, struct net_device *master);
1887extern int skb_checksum_help(struct sk_buff *skb);
1888extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
1889#ifdef CONFIG_BUG
1890extern void netdev_rx_csum_fault(struct net_device *dev);
1891#else
1892static inline void netdev_rx_csum_fault(struct net_device *dev)
1893{
1894}
1895#endif
1896
1897extern void net_enable_timestamp(void);
1898extern void net_disable_timestamp(void);
1899
1900#ifdef CONFIG_PROC_FS
1901extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1902extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1903extern void dev_seq_stop(struct seq_file *seq, void *v);
1904#endif
1905
1906extern int netdev_class_create_file(struct class_attribute *class_attr);
1907extern void netdev_class_remove_file(struct class_attribute *class_attr);
1908
1909extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
1910
1911extern void linkwatch_run_queue(void);
1912
1913unsigned long netdev_increment_features(unsigned long all, unsigned long one,
1914 unsigned long mask);
1915unsigned long netdev_fix_features(unsigned long features, const char *name);
1916
1917static inline int net_gso_ok(int features, int gso_type)
1918{
1919 int feature = gso_type << NETIF_F_GSO_SHIFT;
1920 return (features & feature) == feature;
1921}
1922
1923static inline int skb_gso_ok(struct sk_buff *skb, int features)
1924{
1925 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
1926 (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST));
1927}
1928
1929static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1930{
1931 return skb_is_gso(skb) &&
1932 (!skb_gso_ok(skb, dev->features) ||
1933 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
1934}
1935
1936static inline void netif_set_gso_max_size(struct net_device *dev,
1937 unsigned int size)
1938{
1939 dev->gso_max_size = size;
1940}
1941
1942static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
1943 struct net_device *master)
1944{
1945 if (skb->pkt_type == PACKET_HOST) {
1946 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
1947
1948 memcpy(dest, master->dev_addr, ETH_ALEN);
1949 }
1950}
1951
1952
1953
1954
1955
1956static inline int skb_bond_should_drop(struct sk_buff *skb)
1957{
1958 struct net_device *dev = skb->dev;
1959 struct net_device *master = dev->master;
1960
1961 if (master) {
1962 if (master->priv_flags & IFF_MASTER_ARPMON)
1963 dev->last_rx = jiffies;
1964
1965 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
1966
1967
1968
1969
1970 skb_bond_set_mac_by_master(skb, master);
1971 }
1972
1973 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1974 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1975 skb->protocol == __cpu_to_be16(ETH_P_ARP))
1976 return 0;
1977
1978 if (master->priv_flags & IFF_MASTER_ALB) {
1979 if (skb->pkt_type != PACKET_BROADCAST &&
1980 skb->pkt_type != PACKET_MULTICAST)
1981 return 0;
1982 }
1983 if (master->priv_flags & IFF_MASTER_8023AD &&
1984 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
1985 return 0;
1986
1987 return 1;
1988 }
1989 }
1990 return 0;
1991}
1992
1993extern struct pernet_operations __net_initdata loopback_net_ops;
1994
1995static inline int dev_ethtool_get_settings(struct net_device *dev,
1996 struct ethtool_cmd *cmd)
1997{
1998 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
1999 return -EOPNOTSUPP;
2000 return dev->ethtool_ops->get_settings(dev, cmd);
2001}
2002
2003static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
2004{
2005 if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
2006 return 0;
2007 return dev->ethtool_ops->get_rx_csum(dev);
2008}
2009
2010static inline u32 dev_ethtool_get_flags(struct net_device *dev)
2011{
2012 if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
2013 return 0;
2014 return dev->ethtool_ops->get_flags(dev);
2015}
2016#endif
2017
2018#endif
2019