linux/include/linux/netdevice.h
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Definitions for the Interfaces handler.
   7 *
   8 * Version:     @(#)dev.h       1.0.10  08/12/93
   9 *
  10 * Authors:     Ross Biro
  11 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *              Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14 *              Alan Cox, <alan@lxorguk.ukuu.org.uk>
  15 *              Bjorn Ekwall. <bj0rn@blox.se>
  16 *              Pekka Riikonen <priikone@poseidon.pspt.fi>
  17 *
  18 *              This program is free software; you can redistribute it and/or
  19 *              modify it under the terms of the GNU General Public License
  20 *              as published by the Free Software Foundation; either version
  21 *              2 of the License, or (at your option) any later version.
  22 *
  23 *              Moved to /usr/include/linux for NET3
  24 */
  25#ifndef _LINUX_NETDEVICE_H
  26#define _LINUX_NETDEVICE_H
  27
  28#include <linux/if.h>
  29#include <linux/if_ether.h>
  30#include <linux/if_packet.h>
  31
  32#ifdef __KERNEL__
  33#include <linux/timer.h>
  34#include <linux/delay.h>
  35#include <linux/mm.h>
  36#include <asm/atomic.h>
  37#include <asm/cache.h>
  38#include <asm/byteorder.h>
  39
  40#include <linux/device.h>
  41#include <linux/percpu.h>
  42#include <linux/rculist.h>
  43#include <linux/dmaengine.h>
  44#include <linux/workqueue.h>
  45
  46#include <linux/ethtool.h>
  47#include <net/net_namespace.h>
  48#include <net/dsa.h>
  49#ifdef CONFIG_DCB
  50#include <net/dcbnl.h>
  51#endif
  52
  53struct vlan_group;
  54struct netpoll_info;
  55/* 802.11 specific */
  56struct wireless_dev;
  57                                        /* source back-compat hooks */
  58#define SET_ETHTOOL_OPS(netdev,ops) \
  59        ( (netdev)->ethtool_ops = (ops) )
  60
  61#define HAVE_ALLOC_NETDEV               /* feature macro: alloc_xxxdev
  62                                           functions are available. */
  63#define HAVE_FREE_NETDEV                /* free_netdev() */
  64#define HAVE_NETDEV_PRIV                /* netdev_priv() */
  65
  66#define NET_XMIT_SUCCESS        0
  67#define NET_XMIT_DROP           1       /* skb dropped                  */
  68#define NET_XMIT_CN             2       /* congestion notification      */
  69#define NET_XMIT_POLICED        3       /* skb is shot by police        */
  70#define NET_XMIT_MASK           0xFFFF  /* qdisc flags in net/sch_generic.h */
  71
  72/* Backlog congestion levels */
  73#define NET_RX_SUCCESS          0   /* keep 'em coming, baby */
  74#define NET_RX_DROP             1  /* packet dropped */
  75
  76/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  77 * indicates that the device will soon be dropping packets, or already drops
  78 * some packets of the same priority; prompting us to send less aggressively. */
  79#define net_xmit_eval(e)        ((e) == NET_XMIT_CN? 0 : (e))
  80#define net_xmit_errno(e)       ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  81
  82/* Driver transmit return codes */
  83enum netdev_tx {
  84        NETDEV_TX_OK = 0,       /* driver took care of packet */
  85        NETDEV_TX_BUSY,         /* driver tx path was busy*/
  86        NETDEV_TX_LOCKED = -1,  /* driver tx lock was already taken */
  87};
  88typedef enum netdev_tx netdev_tx_t;
  89
  90#endif
  91
  92#define MAX_ADDR_LEN    32              /* Largest hardware address length */
  93
  94#ifdef  __KERNEL__
  95/*
  96 *      Compute the worst case header length according to the protocols
  97 *      used.
  98 */
  99
 100#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
 101# if defined(CONFIG_MAC80211_MESH)
 102#  define LL_MAX_HEADER 128
 103# else
 104#  define LL_MAX_HEADER 96
 105# endif
 106#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
 107# define LL_MAX_HEADER 48
 108#else
 109# define LL_MAX_HEADER 32
 110#endif
 111
 112#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
 113    !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \
 114    !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
 115    !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
 116#define MAX_HEADER LL_MAX_HEADER
 117#else
 118#define MAX_HEADER (LL_MAX_HEADER + 48)
 119#endif
 120
 121#endif  /*  __KERNEL__  */
 122
 123/*
 124 *      Network device statistics. Akin to the 2.0 ether stats but
 125 *      with byte counters.
 126 */
 127
 128struct net_device_stats
 129{
 130        unsigned long   rx_packets;             /* total packets received       */
 131        unsigned long   tx_packets;             /* total packets transmitted    */
 132        unsigned long   rx_bytes;               /* total bytes received         */
 133        unsigned long   tx_bytes;               /* total bytes transmitted      */
 134        unsigned long   rx_errors;              /* bad packets received         */
 135        unsigned long   tx_errors;              /* packet transmit problems     */
 136        unsigned long   rx_dropped;             /* no space in linux buffers    */
 137        unsigned long   tx_dropped;             /* no space available in linux  */
 138        unsigned long   multicast;              /* multicast packets received   */
 139        unsigned long   collisions;
 140
 141        /* detailed rx_errors: */
 142        unsigned long   rx_length_errors;
 143        unsigned long   rx_over_errors;         /* receiver ring buff overflow  */
 144        unsigned long   rx_crc_errors;          /* recved pkt with crc error    */
 145        unsigned long   rx_frame_errors;        /* recv'd frame alignment error */
 146        unsigned long   rx_fifo_errors;         /* recv'r fifo overrun          */
 147        unsigned long   rx_missed_errors;       /* receiver missed packet       */
 148
 149        /* detailed tx_errors */
 150        unsigned long   tx_aborted_errors;
 151        unsigned long   tx_carrier_errors;
 152        unsigned long   tx_fifo_errors;
 153        unsigned long   tx_heartbeat_errors;
 154        unsigned long   tx_window_errors;
 155        
 156        /* for cslip etc */
 157        unsigned long   rx_compressed;
 158        unsigned long   tx_compressed;
 159};
 160
 161
 162/* Media selection options. */
 163enum {
 164        IF_PORT_UNKNOWN = 0,
 165        IF_PORT_10BASE2,
 166        IF_PORT_10BASET,
 167        IF_PORT_AUI,
 168        IF_PORT_100BASET,
 169        IF_PORT_100BASETX,
 170        IF_PORT_100BASEFX
 171};
 172
 173#ifdef __KERNEL__
 174
 175#include <linux/cache.h>
 176#include <linux/skbuff.h>
 177
 178struct neighbour;
 179struct neigh_parms;
 180struct sk_buff;
 181
 182struct netif_rx_stats
 183{
 184        unsigned total;
 185        unsigned dropped;
 186        unsigned time_squeeze;
 187        unsigned cpu_collision;
 188};
 189
 190DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
 191
 192struct dev_addr_list
 193{
 194        struct dev_addr_list    *next;
 195        u8                      da_addr[MAX_ADDR_LEN];
 196        u8                      da_addrlen;
 197        u8                      da_synced;
 198        int                     da_users;
 199        int                     da_gusers;
 200};
 201
 202/*
 203 *      We tag multicasts with these structures.
 204 */
 205
 206#define dev_mc_list     dev_addr_list
 207#define dmi_addr        da_addr
 208#define dmi_addrlen     da_addrlen
 209#define dmi_users       da_users
 210#define dmi_gusers      da_gusers
 211
 212struct netdev_hw_addr {
 213        struct list_head        list;
 214        unsigned char           addr[MAX_ADDR_LEN];
 215        unsigned char           type;
 216#define NETDEV_HW_ADDR_T_LAN            1
 217#define NETDEV_HW_ADDR_T_SAN            2
 218#define NETDEV_HW_ADDR_T_SLAVE          3
 219#define NETDEV_HW_ADDR_T_UNICAST        4
 220        int                     refcount;
 221        bool                    synced;
 222        struct rcu_head         rcu_head;
 223};
 224
 225struct netdev_hw_addr_list {
 226        struct list_head        list;
 227        int                     count;
 228};
 229
 230struct hh_cache
 231{
 232        struct hh_cache *hh_next;       /* Next entry                        */
 233        atomic_t        hh_refcnt;      /* number of users                   */
 234/*
 235 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
 236 * cache line on SMP.
 237 * They are mostly read, but hh_refcnt may be changed quite frequently,
 238 * incurring cache line ping pongs.
 239 */
 240        __be16          hh_type ____cacheline_aligned_in_smp;
 241                                        /* protocol identifier, f.e ETH_P_IP
 242                                         *  NOTE:  For VLANs, this will be the
 243                                         *  encapuslated type. --BLG
 244                                         */
 245        u16             hh_len;         /* length of header */
 246        int             (*hh_output)(struct sk_buff *skb);
 247        seqlock_t       hh_lock;
 248
 249        /* cached hardware header; allow for machine alignment needs.        */
 250#define HH_DATA_MOD     16
 251#define HH_DATA_OFF(__len) \
 252        (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
 253#define HH_DATA_ALIGN(__len) \
 254        (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
 255        unsigned long   hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
 256};
 257
 258/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
 259 * Alternative is:
 260 *   dev->hard_header_len ? (dev->hard_header_len +
 261 *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
 262 *
 263 * We could use other alignment values, but we must maintain the
 264 * relationship HH alignment <= LL alignment.
 265 *
 266 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
 267 * may need.
 268 */
 269#define LL_RESERVED_SPACE(dev) \
 270        ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 271#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
 272        ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 273#define LL_ALLOCATED_SPACE(dev) \
 274        ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 275
 276struct header_ops {
 277        int     (*create) (struct sk_buff *skb, struct net_device *dev,
 278                           unsigned short type, const void *daddr,
 279                           const void *saddr, unsigned len);
 280        int     (*parse)(const struct sk_buff *skb, unsigned char *haddr);
 281        int     (*rebuild)(struct sk_buff *skb);
 282#define HAVE_HEADER_CACHE
 283        int     (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
 284        void    (*cache_update)(struct hh_cache *hh,
 285                                const struct net_device *dev,
 286                                const unsigned char *haddr);
 287};
 288
 289/* These flag bits are private to the generic network queueing
 290 * layer, they may not be explicitly referenced by any other
 291 * code.
 292 */
 293
 294enum netdev_state_t
 295{
 296        __LINK_STATE_START,
 297        __LINK_STATE_PRESENT,
 298        __LINK_STATE_NOCARRIER,
 299        __LINK_STATE_LINKWATCH_PENDING,
 300        __LINK_STATE_DORMANT,
 301};
 302
 303
 304/*
 305 * This structure holds at boot time configured netdevice settings. They
 306 * are then used in the device probing.
 307 */
 308struct netdev_boot_setup {
 309        char name[IFNAMSIZ];
 310        struct ifmap map;
 311};
 312#define NETDEV_BOOT_SETUP_MAX 8
 313
 314extern int __init netdev_boot_setup(char *str);
 315
 316/*
 317 * Structure for NAPI scheduling similar to tasklet but with weighting
 318 */
 319struct napi_struct {
 320        /* The poll_list must only be managed by the entity which
 321         * changes the state of the NAPI_STATE_SCHED bit.  This means
 322         * whoever atomically sets that bit can add this napi_struct
 323         * to the per-cpu poll_list, and whoever clears that bit
 324         * can remove from the list right before clearing the bit.
 325         */
 326        struct list_head        poll_list;
 327
 328        unsigned long           state;
 329        int                     weight;
 330        int                     (*poll)(struct napi_struct *, int);
 331#ifdef CONFIG_NETPOLL
 332        spinlock_t              poll_lock;
 333        int                     poll_owner;
 334#endif
 335
 336        unsigned int            gro_count;
 337
 338        struct net_device       *dev;
 339        struct list_head        dev_list;
 340        struct sk_buff          *gro_list;
 341        struct sk_buff          *skb;
 342};
 343
 344enum
 345{
 346        NAPI_STATE_SCHED,       /* Poll is scheduled */
 347        NAPI_STATE_DISABLE,     /* Disable pending */
 348        NAPI_STATE_NPSVC,       /* Netpoll - don't dequeue from poll_list */
 349};
 350
 351enum {
 352        GRO_MERGED,
 353        GRO_MERGED_FREE,
 354        GRO_HELD,
 355        GRO_NORMAL,
 356        GRO_DROP,
 357};
 358
 359extern void __napi_schedule(struct napi_struct *n);
 360
 361static inline int napi_disable_pending(struct napi_struct *n)
 362{
 363        return test_bit(NAPI_STATE_DISABLE, &n->state);
 364}
 365
 366/**
 367 *      napi_schedule_prep - check if napi can be scheduled
 368 *      @n: napi context
 369 *
 370 * Test if NAPI routine is already running, and if not mark
 371 * it as running.  This is used as a condition variable
 372 * insure only one NAPI poll instance runs.  We also make
 373 * sure there is no pending NAPI disable.
 374 */
 375static inline int napi_schedule_prep(struct napi_struct *n)
 376{
 377        return !napi_disable_pending(n) &&
 378                !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
 379}
 380
 381/**
 382 *      napi_schedule - schedule NAPI poll
 383 *      @n: napi context
 384 *
 385 * Schedule NAPI poll routine to be called if it is not already
 386 * running.
 387 */
 388static inline void napi_schedule(struct napi_struct *n)
 389{
 390        if (napi_schedule_prep(n))
 391                __napi_schedule(n);
 392}
 393
 394/* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
 395static inline int napi_reschedule(struct napi_struct *napi)
 396{
 397        if (napi_schedule_prep(napi)) {
 398                __napi_schedule(napi);
 399                return 1;
 400        }
 401        return 0;
 402}
 403
 404/**
 405 *      napi_complete - NAPI processing complete
 406 *      @n: napi context
 407 *
 408 * Mark NAPI processing as complete.
 409 */
 410extern void __napi_complete(struct napi_struct *n);
 411extern void napi_complete(struct napi_struct *n);
 412
 413/**
 414 *      napi_disable - prevent NAPI from scheduling
 415 *      @n: napi context
 416 *
 417 * Stop NAPI from being scheduled on this context.
 418 * Waits till any outstanding processing completes.
 419 */
 420static inline void napi_disable(struct napi_struct *n)
 421{
 422        set_bit(NAPI_STATE_DISABLE, &n->state);
 423        while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
 424                msleep(1);
 425        clear_bit(NAPI_STATE_DISABLE, &n->state);
 426}
 427
 428/**
 429 *      napi_enable - enable NAPI scheduling
 430 *      @n: napi context
 431 *
 432 * Resume NAPI from being scheduled on this context.
 433 * Must be paired with napi_disable.
 434 */
 435static inline void napi_enable(struct napi_struct *n)
 436{
 437        BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
 438        smp_mb__before_clear_bit();
 439        clear_bit(NAPI_STATE_SCHED, &n->state);
 440}
 441
 442#ifdef CONFIG_SMP
 443/**
 444 *      napi_synchronize - wait until NAPI is not running
 445 *      @n: napi context
 446 *
 447 * Wait until NAPI is done being scheduled on this context.
 448 * Waits till any outstanding processing completes but
 449 * does not disable future activations.
 450 */
 451static inline void napi_synchronize(const struct napi_struct *n)
 452{
 453        while (test_bit(NAPI_STATE_SCHED, &n->state))
 454                msleep(1);
 455}
 456#else
 457# define napi_synchronize(n)    barrier()
 458#endif
 459
 460enum netdev_queue_state_t
 461{
 462        __QUEUE_STATE_XOFF,
 463        __QUEUE_STATE_FROZEN,
 464};
 465
 466struct netdev_queue {
 467/*
 468 * read mostly part
 469 */
 470        struct net_device       *dev;
 471        struct Qdisc            *qdisc;
 472        unsigned long           state;
 473        struct Qdisc            *qdisc_sleeping;
 474/*
 475 * write mostly part
 476 */
 477        spinlock_t              _xmit_lock ____cacheline_aligned_in_smp;
 478        int                     xmit_lock_owner;
 479        /*
 480         * please use this field instead of dev->trans_start
 481         */
 482        unsigned long           trans_start;
 483        unsigned long           tx_bytes;
 484        unsigned long           tx_packets;
 485        unsigned long           tx_dropped;
 486} ____cacheline_aligned_in_smp;
 487
 488
 489/*
 490 * This structure defines the management hooks for network devices.
 491 * The following hooks can be defined; unless noted otherwise, they are
 492 * optional and can be filled with a null pointer.
 493 *
 494 * int (*ndo_init)(struct net_device *dev);
 495 *     This function is called once when network device is registered.
 496 *     The network device can use this to any late stage initializaton
 497 *     or semantic validattion. It can fail with an error code which will
 498 *     be propogated back to register_netdev
 499 *
 500 * void (*ndo_uninit)(struct net_device *dev);
 501 *     This function is called when device is unregistered or when registration
 502 *     fails. It is not called if init fails.
 503 *
 504 * int (*ndo_open)(struct net_device *dev);
 505 *     This function is called when network device transistions to the up
 506 *     state.
 507 *
 508 * int (*ndo_stop)(struct net_device *dev);
 509 *     This function is called when network device transistions to the down
 510 *     state.
 511 *
 512 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
 513 *                               struct net_device *dev);
 514 *      Called when a packet needs to be transmitted.
 515 *      Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
 516 *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
 517 *      Required can not be NULL.
 518 *
 519 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
 520 *      Called to decide which queue to when device supports multiple
 521 *      transmit queues.
 522 *
 523 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
 524 *      This function is called to allow device receiver to make
 525 *      changes to configuration when multicast or promiscious is enabled.
 526 *
 527 * void (*ndo_set_rx_mode)(struct net_device *dev);
 528 *      This function is called device changes address list filtering.
 529 *
 530 * void (*ndo_set_multicast_list)(struct net_device *dev);
 531 *      This function is called when the multicast address list changes.
 532 *
 533 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
 534 *      This function  is called when the Media Access Control address
 535 *      needs to be changed. If this interface is not defined, the
 536 *      mac address can not be changed.
 537 *
 538 * int (*ndo_validate_addr)(struct net_device *dev);
 539 *      Test if Media Access Control address is valid for the device.
 540 *
 541 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
 542 *      Called when a user request an ioctl which can't be handled by
 543 *      the generic interface code. If not defined ioctl's return
 544 *      not supported error code.
 545 *
 546 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
 547 *      Used to set network devices bus interface parameters. This interface
 548 *      is retained for legacy reason, new devices should use the bus
 549 *      interface (PCI) for low level management.
 550 *
 551 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
 552 *      Called when a user wants to change the Maximum Transfer Unit
 553 *      of a device. If not defined, any request to change MTU will
 554 *      will return an error.
 555 *
 556 * void (*ndo_tx_timeout)(struct net_device *dev);
 557 *      Callback uses when the transmitter has not made any progress
 558 *      for dev->watchdog ticks.
 559 *
 560 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 561 *      Called when a user wants to get the network device usage
 562 *      statistics. If not defined, the counters in dev->stats will
 563 *      be used.
 564 *
 565 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
 566 *      If device support VLAN receive accleration
 567 *      (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
 568 *      when vlan groups for the device changes.  Note: grp is NULL
 569 *      if no vlan's groups are being used.
 570 *
 571 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
 572 *      If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
 573 *      this function is called when a VLAN id is registered.
 574 *
 575 * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
 576 *      If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
 577 *      this function is called when a VLAN id is unregistered.
 578 *
 579 * void (*ndo_poll_controller)(struct net_device *dev);
 580 */
 581#define HAVE_NET_DEVICE_OPS
 582struct net_device_ops {
 583        int                     (*ndo_init)(struct net_device *dev);
 584        void                    (*ndo_uninit)(struct net_device *dev);
 585        int                     (*ndo_open)(struct net_device *dev);
 586        int                     (*ndo_stop)(struct net_device *dev);
 587        netdev_tx_t             (*ndo_start_xmit) (struct sk_buff *skb,
 588                                                   struct net_device *dev);
 589        u16                     (*ndo_select_queue)(struct net_device *dev,
 590                                                    struct sk_buff *skb);
 591#define HAVE_CHANGE_RX_FLAGS
 592        void                    (*ndo_change_rx_flags)(struct net_device *dev,
 593                                                       int flags);
 594#define HAVE_SET_RX_MODE
 595        void                    (*ndo_set_rx_mode)(struct net_device *dev);
 596#define HAVE_MULTICAST
 597        void                    (*ndo_set_multicast_list)(struct net_device *dev);
 598#define HAVE_SET_MAC_ADDR
 599        int                     (*ndo_set_mac_address)(struct net_device *dev,
 600                                                       void *addr);
 601#define HAVE_VALIDATE_ADDR
 602        int                     (*ndo_validate_addr)(struct net_device *dev);
 603#define HAVE_PRIVATE_IOCTL
 604        int                     (*ndo_do_ioctl)(struct net_device *dev,
 605                                                struct ifreq *ifr, int cmd);
 606#define HAVE_SET_CONFIG
 607        int                     (*ndo_set_config)(struct net_device *dev,
 608                                                  struct ifmap *map);
 609#define HAVE_CHANGE_MTU
 610        int                     (*ndo_change_mtu)(struct net_device *dev,
 611                                                  int new_mtu);
 612        int                     (*ndo_neigh_setup)(struct net_device *dev,
 613                                                   struct neigh_parms *);
 614#define HAVE_TX_TIMEOUT
 615        void                    (*ndo_tx_timeout) (struct net_device *dev);
 616
 617        struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 618
 619        void                    (*ndo_vlan_rx_register)(struct net_device *dev,
 620                                                        struct vlan_group *grp);
 621        void                    (*ndo_vlan_rx_add_vid)(struct net_device *dev,
 622                                                       unsigned short vid);
 623        void                    (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
 624                                                        unsigned short vid);
 625#ifdef CONFIG_NET_POLL_CONTROLLER
 626#define HAVE_NETDEV_POLL
 627        void                    (*ndo_poll_controller)(struct net_device *dev);
 628#endif
 629#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 630        int                     (*ndo_fcoe_enable)(struct net_device *dev);
 631        int                     (*ndo_fcoe_disable)(struct net_device *dev);
 632        int                     (*ndo_fcoe_ddp_setup)(struct net_device *dev,
 633                                                      u16 xid,
 634                                                      struct scatterlist *sgl,
 635                                                      unsigned int sgc);
 636        int                     (*ndo_fcoe_ddp_done)(struct net_device *dev,
 637                                                     u16 xid);
 638#endif
 639};
 640
 641/*
 642 *      The DEVICE structure.
 643 *      Actually, this whole structure is a big mistake.  It mixes I/O
 644 *      data with strictly "high-level" data, and it has to know about
 645 *      almost every data structure used in the INET module.
 646 *
 647 *      FIXME: cleanup struct net_device such that network protocol info
 648 *      moves out.
 649 */
 650
 651struct net_device
 652{
 653
 654        /*
 655         * This is the first field of the "visible" part of this structure
 656         * (i.e. as seen by users in the "Space.c" file).  It is the name
 657         * the interface.
 658         */
 659        char                    name[IFNAMSIZ];
 660        /* device name hash chain */
 661        struct hlist_node       name_hlist;
 662        /* snmp alias */
 663        char                    *ifalias;
 664
 665        /*
 666         *      I/O specific fields
 667         *      FIXME: Merge these and struct ifmap into one
 668         */
 669        unsigned long           mem_end;        /* shared mem end       */
 670        unsigned long           mem_start;      /* shared mem start     */
 671        unsigned long           base_addr;      /* device I/O address   */
 672        unsigned int            irq;            /* device IRQ number    */
 673
 674        /*
 675         *      Some hardware also needs these fields, but they are not
 676         *      part of the usual set specified in Space.c.
 677         */
 678
 679        unsigned char           if_port;        /* Selectable AUI, TP,..*/
 680        unsigned char           dma;            /* DMA channel          */
 681
 682        unsigned long           state;
 683
 684        struct list_head        dev_list;
 685        struct list_head        napi_list;
 686
 687        /* Net device features */
 688        unsigned long           features;
 689#define NETIF_F_SG              1       /* Scatter/gather IO. */
 690#define NETIF_F_IP_CSUM         2       /* Can checksum TCP/UDP over IPv4. */
 691#define NETIF_F_NO_CSUM         4       /* Does not require checksum. F.e. loopack. */
 692#define NETIF_F_HW_CSUM         8       /* Can checksum all the packets. */
 693#define NETIF_F_IPV6_CSUM       16      /* Can checksum TCP/UDP over IPV6 */
 694#define NETIF_F_HIGHDMA         32      /* Can DMA to high memory. */
 695#define NETIF_F_FRAGLIST        64      /* Scatter/gather IO. */
 696#define NETIF_F_HW_VLAN_TX      128     /* Transmit VLAN hw acceleration */
 697#define NETIF_F_HW_VLAN_RX      256     /* Receive VLAN hw acceleration */
 698#define NETIF_F_HW_VLAN_FILTER  512     /* Receive filtering on VLAN */
 699#define NETIF_F_VLAN_CHALLENGED 1024    /* Device cannot handle VLAN packets */
 700#define NETIF_F_GSO             2048    /* Enable software GSO. */
 701#define NETIF_F_LLTX            4096    /* LockLess TX - deprecated. Please */
 702                                        /* do not use LLTX in new drivers */
 703#define NETIF_F_NETNS_LOCAL     8192    /* Does not change network namespaces */
 704#define NETIF_F_GRO             16384   /* Generic receive offload */
 705#define NETIF_F_LRO             32768   /* large receive offload */
 706
 707/* the GSO_MASK reserves bits 16 through 23 */
 708#define NETIF_F_FCOE_CRC        (1 << 24) /* FCoE CRC32 */
 709#define NETIF_F_SCTP_CSUM       (1 << 25) /* SCTP checksum offload */
 710#define NETIF_F_FCOE_MTU        (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
 711
 712        /* Segmentation offload features */
 713#define NETIF_F_GSO_SHIFT       16
 714#define NETIF_F_GSO_MASK        0x00ff0000
 715#define NETIF_F_TSO             (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
 716#define NETIF_F_UFO             (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
 717#define NETIF_F_GSO_ROBUST      (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
 718#define NETIF_F_TSO_ECN         (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
 719#define NETIF_F_TSO6            (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
 720#define NETIF_F_FSO             (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
 721
 722        /* List of features with software fallbacks. */
 723#define NETIF_F_GSO_SOFTWARE    (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
 724
 725
 726#define NETIF_F_GEN_CSUM        (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
 727#define NETIF_F_V4_CSUM         (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
 728#define NETIF_F_V6_CSUM         (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
 729#define NETIF_F_ALL_CSUM        (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
 730
 731        /*
 732         * If one device supports one of these features, then enable them
 733         * for all in netdev_increment_features.
 734         */
 735#define NETIF_F_ONE_FOR_ALL     (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
 736                                 NETIF_F_SG | NETIF_F_HIGHDMA |         \
 737                                 NETIF_F_FRAGLIST)
 738
 739        /* Interface index. Unique device identifier    */
 740        int                     ifindex;
 741        int                     iflink;
 742
 743        struct net_device_stats stats;
 744
 745#ifdef CONFIG_WIRELESS_EXT
 746        /* List of functions to handle Wireless Extensions (instead of ioctl).
 747         * See <net/iw_handler.h> for details. Jean II */
 748        const struct iw_handler_def *   wireless_handlers;
 749        /* Instance data managed by the core of Wireless Extensions. */
 750        struct iw_public_data * wireless_data;
 751#endif
 752        /* Management operations */
 753        const struct net_device_ops *netdev_ops;
 754        const struct ethtool_ops *ethtool_ops;
 755
 756        /* Hardware header description */
 757        const struct header_ops *header_ops;
 758
 759        unsigned int            flags;  /* interface flags (a la BSD)   */
 760        unsigned short          gflags;
 761        unsigned short          priv_flags; /* Like 'flags' but invisible to userspace. */
 762        unsigned short          padded; /* How much padding added by alloc_netdev() */
 763
 764        unsigned char           operstate; /* RFC2863 operstate */
 765        unsigned char           link_mode; /* mapping policy to operstate */
 766
 767        unsigned                mtu;    /* interface MTU value          */
 768        unsigned short          type;   /* interface hardware type      */
 769        unsigned short          hard_header_len;        /* hardware hdr length  */
 770
 771        /* extra head- and tailroom the hardware may need, but not in all cases
 772         * can this be guaranteed, especially tailroom. Some cases also use
 773         * LL_MAX_HEADER instead to allocate the skb.
 774         */
 775        unsigned short          needed_headroom;
 776        unsigned short          needed_tailroom;
 777
 778        struct net_device       *master; /* Pointer to master device of a group,
 779                                          * which this device is member of.
 780                                          */
 781
 782        /* Interface address info. */
 783        unsigned char           perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
 784        unsigned char           addr_len;       /* hardware address length      */
 785        unsigned short          dev_id;         /* for shared network cards */
 786
 787        struct netdev_hw_addr_list      uc;     /* Secondary unicast
 788                                                   mac addresses */
 789        int                     uc_promisc;
 790        spinlock_t              addr_list_lock;
 791        struct dev_addr_list    *mc_list;       /* Multicast mac addresses      */
 792        int                     mc_count;       /* Number of installed mcasts   */
 793        unsigned int            promiscuity;
 794        unsigned int            allmulti;
 795
 796
 797        /* Protocol specific pointers */
 798        
 799#ifdef CONFIG_NET_DSA
 800        void                    *dsa_ptr;       /* dsa specific data */
 801#endif
 802        void                    *atalk_ptr;     /* AppleTalk link       */
 803        void                    *ip_ptr;        /* IPv4 specific data   */
 804        void                    *dn_ptr;        /* DECnet specific data */
 805        void                    *ip6_ptr;       /* IPv6 specific data */
 806        void                    *ec_ptr;        /* Econet specific data */
 807        void                    *ax25_ptr;      /* AX.25 specific data */
 808        struct wireless_dev     *ieee80211_ptr; /* IEEE 802.11 specific data,
 809                                                   assign before registering */
 810
 811/*
 812 * Cache line mostly used on receive path (including eth_type_trans())
 813 */
 814        unsigned long           last_rx;        /* Time of last Rx      */
 815        /* Interface address info used in eth_type_trans() */
 816        unsigned char           *dev_addr;      /* hw address, (before bcast
 817                                                   because most packets are
 818                                                   unicast) */
 819
 820        struct netdev_hw_addr_list      dev_addrs; /* list of device
 821                                                      hw addresses */
 822
 823        unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add */
 824
 825        struct netdev_queue     rx_queue;
 826
 827        struct netdev_queue     *_tx ____cacheline_aligned_in_smp;
 828
 829        /* Number of TX queues allocated at alloc_netdev_mq() time  */
 830        unsigned int            num_tx_queues;
 831
 832        /* Number of TX queues currently active in device  */
 833        unsigned int            real_num_tx_queues;
 834
 835        /* root qdisc from userspace point of view */
 836        struct Qdisc            *qdisc;
 837
 838        unsigned long           tx_queue_len;   /* Max frames per queue allowed */
 839        spinlock_t              tx_global_lock;
 840/*
 841 * One part is mostly used on xmit path (device)
 842 */
 843        /* These may be needed for future network-power-down code. */
 844
 845        /*
 846         * trans_start here is expensive for high speed devices on SMP,
 847         * please use netdev_queue->trans_start instead.
 848         */
 849        unsigned long           trans_start;    /* Time (in jiffies) of last Tx */
 850
 851        int                     watchdog_timeo; /* used by dev_watchdog() */
 852        struct timer_list       watchdog_timer;
 853
 854        /* Number of references to this device */
 855        atomic_t                refcnt ____cacheline_aligned_in_smp;
 856
 857        /* delayed register/unregister */
 858        struct list_head        todo_list;
 859        /* device index hash chain */
 860        struct hlist_node       index_hlist;
 861
 862        struct net_device       *link_watch_next;
 863
 864        /* register/unregister state machine */
 865        enum { NETREG_UNINITIALIZED=0,
 866               NETREG_REGISTERED,       /* completed register_netdevice */
 867               NETREG_UNREGISTERING,    /* called unregister_netdevice */
 868               NETREG_UNREGISTERED,     /* completed unregister todo */
 869               NETREG_RELEASED,         /* called free_netdev */
 870               NETREG_DUMMY,            /* dummy device for NAPI poll */
 871        } reg_state;
 872
 873        /* Called from unregister, can be used to call free_netdev */
 874        void (*destructor)(struct net_device *dev);
 875
 876#ifdef CONFIG_NETPOLL
 877        struct netpoll_info     *npinfo;
 878#endif
 879
 880#ifdef CONFIG_NET_NS
 881        /* Network namespace this network device is inside */
 882        struct net              *nd_net;
 883#endif
 884
 885        /* mid-layer private */
 886        void                    *ml_priv;
 887
 888        /* bridge stuff */
 889        struct net_bridge_port  *br_port;
 890        /* macvlan */
 891        struct macvlan_port     *macvlan_port;
 892        /* GARP */
 893        struct garp_port        *garp_port;
 894
 895        /* class/net/name entry */
 896        struct device           dev;
 897        /* space for optional statistics and wireless sysfs groups */
 898        const struct attribute_group *sysfs_groups[3];
 899
 900        /* rtnetlink link ops */
 901        const struct rtnl_link_ops *rtnl_link_ops;
 902
 903        /* VLAN feature mask */
 904        unsigned long vlan_features;
 905
 906        /* for setting kernel sock attribute on TCP connection setup */
 907#define GSO_MAX_SIZE            65536
 908        unsigned int            gso_max_size;
 909
 910#ifdef CONFIG_DCB
 911        /* Data Center Bridging netlink ops */
 912        struct dcbnl_rtnl_ops *dcbnl_ops;
 913#endif
 914
 915#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 916        /* max exchange id for FCoE LRO by ddp */
 917        unsigned int            fcoe_ddp_xid;
 918#endif
 919};
 920#define to_net_dev(d) container_of(d, struct net_device, dev)
 921
 922#define NETDEV_ALIGN            32
 923
 924static inline
 925struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
 926                                         unsigned int index)
 927{
 928        return &dev->_tx[index];
 929}
 930
 931static inline void netdev_for_each_tx_queue(struct net_device *dev,
 932                                            void (*f)(struct net_device *,
 933                                                      struct netdev_queue *,
 934                                                      void *),
 935                                            void *arg)
 936{
 937        unsigned int i;
 938
 939        for (i = 0; i < dev->num_tx_queues; i++)
 940                f(dev, &dev->_tx[i], arg);
 941}
 942
 943/*
 944 * Net namespace inlines
 945 */
 946static inline
 947struct net *dev_net(const struct net_device *dev)
 948{
 949#ifdef CONFIG_NET_NS
 950        return dev->nd_net;
 951#else
 952        return &init_net;
 953#endif
 954}
 955
 956static inline
 957void dev_net_set(struct net_device *dev, struct net *net)
 958{
 959#ifdef CONFIG_NET_NS
 960        release_net(dev->nd_net);
 961        dev->nd_net = hold_net(net);
 962#endif
 963}
 964
 965static inline bool netdev_uses_dsa_tags(struct net_device *dev)
 966{
 967#ifdef CONFIG_NET_DSA_TAG_DSA
 968        if (dev->dsa_ptr != NULL)
 969                return dsa_uses_dsa_tags(dev->dsa_ptr);
 970#endif
 971
 972        return 0;
 973}
 974
 975static inline bool netdev_uses_trailer_tags(struct net_device *dev)
 976{
 977#ifdef CONFIG_NET_DSA_TAG_TRAILER
 978        if (dev->dsa_ptr != NULL)
 979                return dsa_uses_trailer_tags(dev->dsa_ptr);
 980#endif
 981
 982        return 0;
 983}
 984
 985/**
 986 *      netdev_priv - access network device private data
 987 *      @dev: network device
 988 *
 989 * Get network device private data
 990 */
 991static inline void *netdev_priv(const struct net_device *dev)
 992{
 993        return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
 994}
 995
 996/* Set the sysfs physical device reference for the network logical device
 997 * if set prior to registration will cause a symlink during initialization.
 998 */
 999#define SET_NETDEV_DEV(net, pdev)       ((net)->dev.parent = (pdev))
1000
1001/* Set the sysfs device type for the network logical device to allow
1002 * fin grained indentification of different network device types. For
1003 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1004 */
1005#define SET_NETDEV_DEVTYPE(net, devtype)        ((net)->dev.type = (devtype))
1006
1007/**
1008 *      netif_napi_add - initialize a napi context
1009 *      @dev:  network device
1010 *      @napi: napi context
1011 *      @poll: polling function
1012 *      @weight: default weight
1013 *
1014 * netif_napi_add() must be used to initialize a napi context prior to calling
1015 * *any* of the other napi related functions.
1016 */
1017void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1018                    int (*poll)(struct napi_struct *, int), int weight);
1019
1020/**
1021 *  netif_napi_del - remove a napi context
1022 *  @napi: napi context
1023 *
1024 *  netif_napi_del() removes a napi context from the network device napi list
1025 */
1026void netif_napi_del(struct napi_struct *napi);
1027
1028struct napi_gro_cb {
1029        /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1030        void *frag0;
1031
1032        /* Length of frag0. */
1033        unsigned int frag0_len;
1034
1035        /* This indicates where we are processing relative to skb->data. */
1036        int data_offset;
1037
1038        /* This is non-zero if the packet may be of the same flow. */
1039        int same_flow;
1040
1041        /* This is non-zero if the packet cannot be merged with the new skb. */
1042        int flush;
1043
1044        /* Number of segments aggregated. */
1045        int count;
1046
1047        /* Free the skb? */
1048        int free;
1049};
1050
1051#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1052
1053struct packet_type {
1054        __be16                  type;   /* This is really htons(ether_type). */
1055        struct net_device       *dev;   /* NULL is wildcarded here           */
1056        int                     (*func) (struct sk_buff *,
1057                                         struct net_device *,
1058                                         struct packet_type *,
1059                                         struct net_device *);
1060        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
1061                                                int features);
1062        int                     (*gso_send_check)(struct sk_buff *skb);
1063        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
1064                                               struct sk_buff *skb);
1065        int                     (*gro_complete)(struct sk_buff *skb);
1066        void                    *af_packet_priv;
1067        struct list_head        list;
1068};
1069
1070#include <linux/interrupt.h>
1071#include <linux/notifier.h>
1072
1073extern rwlock_t                         dev_base_lock;          /* Device list lock */
1074
1075
1076#define for_each_netdev(net, d)         \
1077                list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1078#define for_each_netdev_safe(net, d, n) \
1079                list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1080#define for_each_netdev_continue(net, d)                \
1081                list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1082#define net_device_entry(lh)    list_entry(lh, struct net_device, dev_list)
1083
1084static inline struct net_device *next_net_device(struct net_device *dev)
1085{
1086        struct list_head *lh;
1087        struct net *net;
1088
1089        net = dev_net(dev);
1090        lh = dev->dev_list.next;
1091        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1092}
1093
1094static inline struct net_device *first_net_device(struct net *net)
1095{
1096        return list_empty(&net->dev_base_head) ? NULL :
1097                net_device_entry(net->dev_base_head.next);
1098}
1099
1100extern int                      netdev_boot_setup_check(struct net_device *dev);
1101extern unsigned long            netdev_boot_base(const char *prefix, int unit);
1102extern struct net_device    *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
1103extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1104extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1105extern void             dev_add_pack(struct packet_type *pt);
1106extern void             dev_remove_pack(struct packet_type *pt);
1107extern void             __dev_remove_pack(struct packet_type *pt);
1108
1109extern struct net_device        *dev_get_by_flags(struct net *net, unsigned short flags,
1110                                                  unsigned short mask);
1111extern struct net_device        *dev_get_by_name(struct net *net, const char *name);
1112extern struct net_device        *__dev_get_by_name(struct net *net, const char *name);
1113extern int              dev_alloc_name(struct net_device *dev, const char *name);
1114extern int              dev_open(struct net_device *dev);
1115extern int              dev_close(struct net_device *dev);
1116extern void             dev_disable_lro(struct net_device *dev);
1117extern int              dev_queue_xmit(struct sk_buff *skb);
1118extern int              register_netdevice(struct net_device *dev);
1119extern void             unregister_netdevice(struct net_device *dev);
1120extern void             free_netdev(struct net_device *dev);
1121extern void             synchronize_net(void);
1122extern int              register_netdevice_notifier(struct notifier_block *nb);
1123extern int              unregister_netdevice_notifier(struct notifier_block *nb);
1124extern int              init_dummy_netdev(struct net_device *dev);
1125extern void             netdev_resync_ops(struct net_device *dev);
1126
1127extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1128extern struct net_device        *dev_get_by_index(struct net *net, int ifindex);
1129extern struct net_device        *__dev_get_by_index(struct net *net, int ifindex);
1130extern int              dev_restart(struct net_device *dev);
1131#ifdef CONFIG_NETPOLL_TRAP
1132extern int              netpoll_trap(void);
1133#endif
1134extern int             skb_gro_receive(struct sk_buff **head,
1135                                       struct sk_buff *skb);
1136extern void            skb_gro_reset_offset(struct sk_buff *skb);
1137
1138static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1139{
1140        return NAPI_GRO_CB(skb)->data_offset;
1141}
1142
1143static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1144{
1145        return skb->len - NAPI_GRO_CB(skb)->data_offset;
1146}
1147
1148static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1149{
1150        NAPI_GRO_CB(skb)->data_offset += len;
1151}
1152
1153static inline void *skb_gro_header_fast(struct sk_buff *skb,
1154                                        unsigned int offset)
1155{
1156        return NAPI_GRO_CB(skb)->frag0 + offset;
1157}
1158
1159static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1160{
1161        return NAPI_GRO_CB(skb)->frag0_len < hlen;
1162}
1163
1164static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1165                                        unsigned int offset)
1166{
1167        NAPI_GRO_CB(skb)->frag0 = NULL;
1168        NAPI_GRO_CB(skb)->frag0_len = 0;
1169        return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
1170}
1171
1172static inline void *skb_gro_mac_header(struct sk_buff *skb)
1173{
1174        return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1175}
1176
1177static inline void *skb_gro_network_header(struct sk_buff *skb)
1178{
1179        return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1180               skb_network_offset(skb);
1181}
1182
1183static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1184                                  unsigned short type,
1185                                  const void *daddr, const void *saddr,
1186                                  unsigned len)
1187{
1188        if (!dev->header_ops || !dev->header_ops->create)
1189                return 0;
1190
1191        return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1192}
1193
1194static inline int dev_parse_header(const struct sk_buff *skb,
1195                                   unsigned char *haddr)
1196{
1197        const struct net_device *dev = skb->dev;
1198
1199        if (!dev->header_ops || !dev->header_ops->parse)
1200                return 0;
1201        return dev->header_ops->parse(skb, haddr);
1202}
1203
1204typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1205extern int              register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1206static inline int unregister_gifconf(unsigned int family)
1207{
1208        return register_gifconf(family, NULL);
1209}
1210
1211/*
1212 * Incoming packets are placed on per-cpu queues so that
1213 * no locking is needed.
1214 */
1215struct softnet_data
1216{
1217        struct Qdisc            *output_queue;
1218        struct sk_buff_head     input_pkt_queue;
1219        struct list_head        poll_list;
1220        struct sk_buff          *completion_queue;
1221
1222        struct napi_struct      backlog;
1223};
1224
1225DECLARE_PER_CPU(struct softnet_data,softnet_data);
1226
1227#define HAVE_NETIF_QUEUE
1228
1229extern void __netif_schedule(struct Qdisc *q);
1230
1231static inline void netif_schedule_queue(struct netdev_queue *txq)
1232{
1233        if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1234                __netif_schedule(txq->qdisc);
1235}
1236
1237static inline void netif_tx_schedule_all(struct net_device *dev)
1238{
1239        unsigned int i;
1240
1241        for (i = 0; i < dev->num_tx_queues; i++)
1242                netif_schedule_queue(netdev_get_tx_queue(dev, i));
1243}
1244
1245static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1246{
1247        clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1248}
1249
1250/**
1251 *      netif_start_queue - allow transmit
1252 *      @dev: network device
1253 *
1254 *      Allow upper layers to call the device hard_start_xmit routine.
1255 */
1256static inline void netif_start_queue(struct net_device *dev)
1257{
1258        netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1259}
1260
1261static inline void netif_tx_start_all_queues(struct net_device *dev)
1262{
1263        unsigned int i;
1264
1265        for (i = 0; i < dev->num_tx_queues; i++) {
1266                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1267                netif_tx_start_queue(txq);
1268        }
1269}
1270
1271static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1272{
1273#ifdef CONFIG_NETPOLL_TRAP
1274        if (netpoll_trap()) {
1275                netif_tx_start_queue(dev_queue);
1276                return;
1277        }
1278#endif
1279        if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
1280                __netif_schedule(dev_queue->qdisc);
1281}
1282
1283/**
1284 *      netif_wake_queue - restart transmit
1285 *      @dev: network device
1286 *
1287 *      Allow upper layers to call the device hard_start_xmit routine.
1288 *      Used for flow control when transmit resources are available.
1289 */
1290static inline void netif_wake_queue(struct net_device *dev)
1291{
1292        netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1293}
1294
1295static inline void netif_tx_wake_all_queues(struct net_device *dev)
1296{
1297        unsigned int i;
1298
1299        for (i = 0; i < dev->num_tx_queues; i++) {
1300                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1301                netif_tx_wake_queue(txq);
1302        }
1303}
1304
1305static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1306{
1307        set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1308}
1309
1310/**
1311 *      netif_stop_queue - stop transmitted packets
1312 *      @dev: network device
1313 *
1314 *      Stop upper layers calling the device hard_start_xmit routine.
1315 *      Used for flow control when transmit resources are unavailable.
1316 */
1317static inline void netif_stop_queue(struct net_device *dev)
1318{
1319        netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1320}
1321
1322static inline void netif_tx_stop_all_queues(struct net_device *dev)
1323{
1324        unsigned int i;
1325
1326        for (i = 0; i < dev->num_tx_queues; i++) {
1327                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1328                netif_tx_stop_queue(txq);
1329        }
1330}
1331
1332static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1333{
1334        return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1335}
1336
1337/**
1338 *      netif_queue_stopped - test if transmit queue is flowblocked
1339 *      @dev: network device
1340 *
1341 *      Test if transmit queue on device is currently unable to send.
1342 */
1343static inline int netif_queue_stopped(const struct net_device *dev)
1344{
1345        return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1346}
1347
1348static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1349{
1350        return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1351}
1352
1353/**
1354 *      netif_running - test if up
1355 *      @dev: network device
1356 *
1357 *      Test if the device has been brought up.
1358 */
1359static inline int netif_running(const struct net_device *dev)
1360{
1361        return test_bit(__LINK_STATE_START, &dev->state);
1362}
1363
1364/*
1365 * Routines to manage the subqueues on a device.  We only need start
1366 * stop, and a check if it's stopped.  All other device management is
1367 * done at the overall netdevice level.
1368 * Also test the device if we're multiqueue.
1369 */
1370
1371/**
1372 *      netif_start_subqueue - allow sending packets on subqueue
1373 *      @dev: network device
1374 *      @queue_index: sub queue index
1375 *
1376 * Start individual transmit queue of a device with multiple transmit queues.
1377 */
1378static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1379{
1380        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1381
1382        netif_tx_start_queue(txq);
1383}
1384
1385/**
1386 *      netif_stop_subqueue - stop sending packets on subqueue
1387 *      @dev: network device
1388 *      @queue_index: sub queue index
1389 *
1390 * Stop individual transmit queue of a device with multiple transmit queues.
1391 */
1392static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1393{
1394        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1395#ifdef CONFIG_NETPOLL_TRAP
1396        if (netpoll_trap())
1397                return;
1398#endif
1399        netif_tx_stop_queue(txq);
1400}
1401
1402/**
1403 *      netif_subqueue_stopped - test status of subqueue
1404 *      @dev: network device
1405 *      @queue_index: sub queue index
1406 *
1407 * Check individual transmit queue of a device with multiple transmit queues.
1408 */
1409static inline int __netif_subqueue_stopped(const struct net_device *dev,
1410                                         u16 queue_index)
1411{
1412        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1413
1414        return netif_tx_queue_stopped(txq);
1415}
1416
1417static inline int netif_subqueue_stopped(const struct net_device *dev,
1418                                         struct sk_buff *skb)
1419{
1420        return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1421}
1422
1423/**
1424 *      netif_wake_subqueue - allow sending packets on subqueue
1425 *      @dev: network device
1426 *      @queue_index: sub queue index
1427 *
1428 * Resume individual transmit queue of a device with multiple transmit queues.
1429 */
1430static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1431{
1432        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1433#ifdef CONFIG_NETPOLL_TRAP
1434        if (netpoll_trap())
1435                return;
1436#endif
1437        if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
1438                __netif_schedule(txq->qdisc);
1439}
1440
1441/**
1442 *      netif_is_multiqueue - test if device has multiple transmit queues
1443 *      @dev: network device
1444 *
1445 * Check if device has multiple transmit queues
1446 */
1447static inline int netif_is_multiqueue(const struct net_device *dev)
1448{
1449        return (dev->num_tx_queues > 1);
1450}
1451
1452/* Use this variant when it is known for sure that it
1453 * is executing from hardware interrupt context or with hardware interrupts
1454 * disabled.
1455 */
1456extern void dev_kfree_skb_irq(struct sk_buff *skb);
1457
1458/* Use this variant in places where it could be invoked
1459 * from either hardware interrupt or other context, with hardware interrupts
1460 * either disabled or enabled.
1461 */
1462extern void dev_kfree_skb_any(struct sk_buff *skb);
1463
1464#define HAVE_NETIF_RX 1
1465extern int              netif_rx(struct sk_buff *skb);
1466extern int              netif_rx_ni(struct sk_buff *skb);
1467#define HAVE_NETIF_RECEIVE_SKB 1
1468extern int              netif_receive_skb(struct sk_buff *skb);
1469extern void             napi_gro_flush(struct napi_struct *napi);
1470extern int              dev_gro_receive(struct napi_struct *napi,
1471                                        struct sk_buff *skb);
1472extern int              napi_skb_finish(int ret, struct sk_buff *skb);
1473extern int              napi_gro_receive(struct napi_struct *napi,
1474                                         struct sk_buff *skb);
1475extern void             napi_reuse_skb(struct napi_struct *napi,
1476                                       struct sk_buff *skb);
1477extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
1478extern int              napi_frags_finish(struct napi_struct *napi,
1479                                          struct sk_buff *skb, int ret);
1480extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
1481extern int              napi_gro_frags(struct napi_struct *napi);
1482
1483static inline void napi_free_frags(struct napi_struct *napi)
1484{
1485        kfree_skb(napi->skb);
1486        napi->skb = NULL;
1487}
1488
1489extern void             netif_nit_deliver(struct sk_buff *skb);
1490extern int              dev_valid_name(const char *name);
1491extern int              dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1492extern int              dev_ethtool(struct net *net, struct ifreq *);
1493extern unsigned         dev_get_flags(const struct net_device *);
1494extern int              dev_change_flags(struct net_device *, unsigned);
1495extern int              dev_change_name(struct net_device *, const char *);
1496extern int              dev_set_alias(struct net_device *, const char *, size_t);
1497extern int              dev_change_net_namespace(struct net_device *,
1498                                                 struct net *, const char *);
1499extern int              dev_set_mtu(struct net_device *, int);
1500extern int              dev_set_mac_address(struct net_device *,
1501                                            struct sockaddr *);
1502extern int              dev_hard_start_xmit(struct sk_buff *skb,
1503                                            struct net_device *dev,
1504                                            struct netdev_queue *txq);
1505
1506extern int              netdev_budget;
1507
1508/* Called by rtnetlink.c:rtnl_unlock() */
1509extern void netdev_run_todo(void);
1510
1511/**
1512 *      dev_put - release reference to device
1513 *      @dev: network device
1514 *
1515 * Release reference to device to allow it to be freed.
1516 */
1517static inline void dev_put(struct net_device *dev)
1518{
1519        atomic_dec(&dev->refcnt);
1520}
1521
1522/**
1523 *      dev_hold - get reference to device
1524 *      @dev: network device
1525 *
1526 * Hold reference to device to keep it from being freed.
1527 */
1528static inline void dev_hold(struct net_device *dev)
1529{
1530        atomic_inc(&dev->refcnt);
1531}
1532
1533/* Carrier loss detection, dial on demand. The functions netif_carrier_on
1534 * and _off may be called from IRQ context, but it is caller
1535 * who is responsible for serialization of these calls.
1536 *
1537 * The name carrier is inappropriate, these functions should really be
1538 * called netif_lowerlayer_*() because they represent the state of any
1539 * kind of lower layer not just hardware media.
1540 */
1541
1542extern void linkwatch_fire_event(struct net_device *dev);
1543
1544/**
1545 *      netif_carrier_ok - test if carrier present
1546 *      @dev: network device
1547 *
1548 * Check if carrier is present on device
1549 */
1550static inline int netif_carrier_ok(const struct net_device *dev)
1551{
1552        return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1553}
1554
1555extern unsigned long dev_trans_start(struct net_device *dev);
1556
1557extern void __netdev_watchdog_up(struct net_device *dev);
1558
1559extern void netif_carrier_on(struct net_device *dev);
1560
1561extern void netif_carrier_off(struct net_device *dev);
1562
1563/**
1564 *      netif_dormant_on - mark device as dormant.
1565 *      @dev: network device
1566 *
1567 * Mark device as dormant (as per RFC2863).
1568 *
1569 * The dormant state indicates that the relevant interface is not
1570 * actually in a condition to pass packets (i.e., it is not 'up') but is
1571 * in a "pending" state, waiting for some external event.  For "on-
1572 * demand" interfaces, this new state identifies the situation where the
1573 * interface is waiting for events to place it in the up state.
1574 *
1575 */
1576static inline void netif_dormant_on(struct net_device *dev)
1577{
1578        if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1579                linkwatch_fire_event(dev);
1580}
1581
1582/**
1583 *      netif_dormant_off - set device as not dormant.
1584 *      @dev: network device
1585 *
1586 * Device is not in dormant state.
1587 */
1588static inline void netif_dormant_off(struct net_device *dev)
1589{
1590        if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1591                linkwatch_fire_event(dev);
1592}
1593
1594/**
1595 *      netif_dormant - test if carrier present
1596 *      @dev: network device
1597 *
1598 * Check if carrier is present on device
1599 */
1600static inline int netif_dormant(const struct net_device *dev)
1601{
1602        return test_bit(__LINK_STATE_DORMANT, &dev->state);
1603}
1604
1605
1606/**
1607 *      netif_oper_up - test if device is operational
1608 *      @dev: network device
1609 *
1610 * Check if carrier is operational
1611 */
1612static inline int netif_oper_up(const struct net_device *dev) {
1613        return (dev->operstate == IF_OPER_UP ||
1614                dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1615}
1616
1617/**
1618 *      netif_device_present - is device available or removed
1619 *      @dev: network device
1620 *
1621 * Check if device has not been removed from system.
1622 */
1623static inline int netif_device_present(struct net_device *dev)
1624{
1625        return test_bit(__LINK_STATE_PRESENT, &dev->state);
1626}
1627
1628extern void netif_device_detach(struct net_device *dev);
1629
1630extern void netif_device_attach(struct net_device *dev);
1631
1632/*
1633 * Network interface message level settings
1634 */
1635#define HAVE_NETIF_MSG 1
1636
1637enum {
1638        NETIF_MSG_DRV           = 0x0001,
1639        NETIF_MSG_PROBE         = 0x0002,
1640        NETIF_MSG_LINK          = 0x0004,
1641        NETIF_MSG_TIMER         = 0x0008,
1642        NETIF_MSG_IFDOWN        = 0x0010,
1643        NETIF_MSG_IFUP          = 0x0020,
1644        NETIF_MSG_RX_ERR        = 0x0040,
1645        NETIF_MSG_TX_ERR        = 0x0080,
1646        NETIF_MSG_TX_QUEUED     = 0x0100,
1647        NETIF_MSG_INTR          = 0x0200,
1648        NETIF_MSG_TX_DONE       = 0x0400,
1649        NETIF_MSG_RX_STATUS     = 0x0800,
1650        NETIF_MSG_PKTDATA       = 0x1000,
1651        NETIF_MSG_HW            = 0x2000,
1652        NETIF_MSG_WOL           = 0x4000,
1653};
1654
1655#define netif_msg_drv(p)        ((p)->msg_enable & NETIF_MSG_DRV)
1656#define netif_msg_probe(p)      ((p)->msg_enable & NETIF_MSG_PROBE)
1657#define netif_msg_link(p)       ((p)->msg_enable & NETIF_MSG_LINK)
1658#define netif_msg_timer(p)      ((p)->msg_enable & NETIF_MSG_TIMER)
1659#define netif_msg_ifdown(p)     ((p)->msg_enable & NETIF_MSG_IFDOWN)
1660#define netif_msg_ifup(p)       ((p)->msg_enable & NETIF_MSG_IFUP)
1661#define netif_msg_rx_err(p)     ((p)->msg_enable & NETIF_MSG_RX_ERR)
1662#define netif_msg_tx_err(p)     ((p)->msg_enable & NETIF_MSG_TX_ERR)
1663#define netif_msg_tx_queued(p)  ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1664#define netif_msg_intr(p)       ((p)->msg_enable & NETIF_MSG_INTR)
1665#define netif_msg_tx_done(p)    ((p)->msg_enable & NETIF_MSG_TX_DONE)
1666#define netif_msg_rx_status(p)  ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1667#define netif_msg_pktdata(p)    ((p)->msg_enable & NETIF_MSG_PKTDATA)
1668#define netif_msg_hw(p)         ((p)->msg_enable & NETIF_MSG_HW)
1669#define netif_msg_wol(p)        ((p)->msg_enable & NETIF_MSG_WOL)
1670
1671static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1672{
1673        /* use default */
1674        if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1675                return default_msg_enable_bits;
1676        if (debug_value == 0)   /* no output */
1677                return 0;
1678        /* set low N bits */
1679        return (1 << debug_value) - 1;
1680}
1681
1682static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1683{
1684        spin_lock(&txq->_xmit_lock);
1685        txq->xmit_lock_owner = cpu;
1686}
1687
1688static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1689{
1690        spin_lock_bh(&txq->_xmit_lock);
1691        txq->xmit_lock_owner = smp_processor_id();
1692}
1693
1694static inline int __netif_tx_trylock(struct netdev_queue *txq)
1695{
1696        int ok = spin_trylock(&txq->_xmit_lock);
1697        if (likely(ok))
1698                txq->xmit_lock_owner = smp_processor_id();
1699        return ok;
1700}
1701
1702static inline void __netif_tx_unlock(struct netdev_queue *txq)
1703{
1704        txq->xmit_lock_owner = -1;
1705        spin_unlock(&txq->_xmit_lock);
1706}
1707
1708static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1709{
1710        txq->xmit_lock_owner = -1;
1711        spin_unlock_bh(&txq->_xmit_lock);
1712}
1713
1714static inline void txq_trans_update(struct netdev_queue *txq)
1715{
1716        if (txq->xmit_lock_owner != -1)
1717                txq->trans_start = jiffies;
1718}
1719
1720/**
1721 *      netif_tx_lock - grab network device transmit lock
1722 *      @dev: network device
1723 *
1724 * Get network device transmit lock
1725 */
1726static inline void netif_tx_lock(struct net_device *dev)
1727{
1728        unsigned int i;
1729        int cpu;
1730
1731        spin_lock(&dev->tx_global_lock);
1732        cpu = smp_processor_id();
1733        for (i = 0; i < dev->num_tx_queues; i++) {
1734                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1735
1736                /* We are the only thread of execution doing a
1737                 * freeze, but we have to grab the _xmit_lock in
1738                 * order to synchronize with threads which are in
1739                 * the ->hard_start_xmit() handler and already
1740                 * checked the frozen bit.
1741                 */
1742                __netif_tx_lock(txq, cpu);
1743                set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1744                __netif_tx_unlock(txq);
1745        }
1746}
1747
1748static inline void netif_tx_lock_bh(struct net_device *dev)
1749{
1750        local_bh_disable();
1751        netif_tx_lock(dev);
1752}
1753
1754static inline void netif_tx_unlock(struct net_device *dev)
1755{
1756        unsigned int i;
1757
1758        for (i = 0; i < dev->num_tx_queues; i++) {
1759                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1760
1761                /* No need to grab the _xmit_lock here.  If the
1762                 * queue is not stopped for another reason, we
1763                 * force a schedule.
1764                 */
1765                clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
1766                netif_schedule_queue(txq);
1767        }
1768        spin_unlock(&dev->tx_global_lock);
1769}
1770
1771static inline void netif_tx_unlock_bh(struct net_device *dev)
1772{
1773        netif_tx_unlock(dev);
1774        local_bh_enable();
1775}
1776
1777#define HARD_TX_LOCK(dev, txq, cpu) {                   \
1778        if ((dev->features & NETIF_F_LLTX) == 0) {      \
1779                __netif_tx_lock(txq, cpu);              \
1780        }                                               \
1781}
1782
1783#define HARD_TX_UNLOCK(dev, txq) {                      \
1784        if ((dev->features & NETIF_F_LLTX) == 0) {      \
1785                __netif_tx_unlock(txq);                 \
1786        }                                               \
1787}
1788
1789static inline void netif_tx_disable(struct net_device *dev)
1790{
1791        unsigned int i;
1792        int cpu;
1793
1794        local_bh_disable();
1795        cpu = smp_processor_id();
1796        for (i = 0; i < dev->num_tx_queues; i++) {
1797                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1798
1799                __netif_tx_lock(txq, cpu);
1800                netif_tx_stop_queue(txq);
1801                __netif_tx_unlock(txq);
1802        }
1803        local_bh_enable();
1804}
1805
1806static inline void netif_addr_lock(struct net_device *dev)
1807{
1808        spin_lock(&dev->addr_list_lock);
1809}
1810
1811static inline void netif_addr_lock_bh(struct net_device *dev)
1812{
1813        spin_lock_bh(&dev->addr_list_lock);
1814}
1815
1816static inline void netif_addr_unlock(struct net_device *dev)
1817{
1818        spin_unlock(&dev->addr_list_lock);
1819}
1820
1821static inline void netif_addr_unlock_bh(struct net_device *dev)
1822{
1823        spin_unlock_bh(&dev->addr_list_lock);
1824}
1825
1826/*
1827 * dev_addrs walker. Should be used only for read access. Call with
1828 * rcu_read_lock held.
1829 */
1830#define for_each_dev_addr(dev, ha) \
1831                list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
1832
1833/* These functions live elsewhere (drivers/net/net_init.c, but related) */
1834
1835extern void             ether_setup(struct net_device *dev);
1836
1837/* Support for loadable net-drivers */
1838extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1839                                       void (*setup)(struct net_device *),
1840                                       unsigned int queue_count);
1841#define alloc_netdev(sizeof_priv, name, setup) \
1842        alloc_netdev_mq(sizeof_priv, name, setup, 1)
1843extern int              register_netdev(struct net_device *dev);
1844extern void             unregister_netdev(struct net_device *dev);
1845
1846/* Functions used for device addresses handling */
1847extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
1848                        unsigned char addr_type);
1849extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
1850                        unsigned char addr_type);
1851extern int dev_addr_add_multiple(struct net_device *to_dev,
1852                                 struct net_device *from_dev,
1853                                 unsigned char addr_type);
1854extern int dev_addr_del_multiple(struct net_device *to_dev,
1855                                 struct net_device *from_dev,
1856                                 unsigned char addr_type);
1857
1858/* Functions used for secondary unicast and multicast support */
1859extern void             dev_set_rx_mode(struct net_device *dev);
1860extern void             __dev_set_rx_mode(struct net_device *dev);
1861extern int              dev_unicast_delete(struct net_device *dev, void *addr);
1862extern int              dev_unicast_add(struct net_device *dev, void *addr);
1863extern int              dev_unicast_sync(struct net_device *to, struct net_device *from);
1864extern void             dev_unicast_unsync(struct net_device *to, struct net_device *from);
1865extern int              dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1866extern int              dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1867extern int              dev_mc_sync(struct net_device *to, struct net_device *from);
1868extern void             dev_mc_unsync(struct net_device *to, struct net_device *from);
1869extern int              __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1870extern int              __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
1871extern int              __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1872extern void             __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1873extern int              dev_set_promiscuity(struct net_device *dev, int inc);
1874extern int              dev_set_allmulti(struct net_device *dev, int inc);
1875extern void             netdev_state_change(struct net_device *dev);
1876extern void             netdev_bonding_change(struct net_device *dev,
1877                                              unsigned long event);
1878extern void             netdev_features_change(struct net_device *dev);
1879/* Load a device via the kmod */
1880extern void             dev_load(struct net *net, const char *name);
1881extern void             dev_mcast_init(void);
1882extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
1883
1884extern int              netdev_max_backlog;
1885extern int              weight_p;
1886extern int              netdev_set_master(struct net_device *dev, struct net_device *master);
1887extern int skb_checksum_help(struct sk_buff *skb);
1888extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
1889#ifdef CONFIG_BUG
1890extern void netdev_rx_csum_fault(struct net_device *dev);
1891#else
1892static inline void netdev_rx_csum_fault(struct net_device *dev)
1893{
1894}
1895#endif
1896/* rx skb timestamps */
1897extern void             net_enable_timestamp(void);
1898extern void             net_disable_timestamp(void);
1899
1900#ifdef CONFIG_PROC_FS
1901extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1902extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1903extern void dev_seq_stop(struct seq_file *seq, void *v);
1904#endif
1905
1906extern int netdev_class_create_file(struct class_attribute *class_attr);
1907extern void netdev_class_remove_file(struct class_attribute *class_attr);
1908
1909extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
1910
1911extern void linkwatch_run_queue(void);
1912
1913unsigned long netdev_increment_features(unsigned long all, unsigned long one,
1914                                        unsigned long mask);
1915unsigned long netdev_fix_features(unsigned long features, const char *name);
1916
1917static inline int net_gso_ok(int features, int gso_type)
1918{
1919        int feature = gso_type << NETIF_F_GSO_SHIFT;
1920        return (features & feature) == feature;
1921}
1922
1923static inline int skb_gso_ok(struct sk_buff *skb, int features)
1924{
1925        return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
1926               (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST));
1927}
1928
1929static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1930{
1931        return skb_is_gso(skb) &&
1932               (!skb_gso_ok(skb, dev->features) ||
1933                unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
1934}
1935
1936static inline void netif_set_gso_max_size(struct net_device *dev,
1937                                          unsigned int size)
1938{
1939        dev->gso_max_size = size;
1940}
1941
1942static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
1943                                              struct net_device *master)
1944{
1945        if (skb->pkt_type == PACKET_HOST) {
1946                u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
1947
1948                memcpy(dest, master->dev_addr, ETH_ALEN);
1949        }
1950}
1951
1952/* On bonding slaves other than the currently active slave, suppress
1953 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1954 * ARP on active-backup slaves with arp_validate enabled.
1955 */
1956static inline int skb_bond_should_drop(struct sk_buff *skb)
1957{
1958        struct net_device *dev = skb->dev;
1959        struct net_device *master = dev->master;
1960
1961        if (master) {
1962                if (master->priv_flags & IFF_MASTER_ARPMON)
1963                        dev->last_rx = jiffies;
1964
1965                if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
1966                        /* Do address unmangle. The local destination address
1967                         * will be always the one master has. Provides the right
1968                         * functionality in a bridge.
1969                         */
1970                        skb_bond_set_mac_by_master(skb, master);
1971                }
1972
1973                if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1974                        if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1975                            skb->protocol == __cpu_to_be16(ETH_P_ARP))
1976                                return 0;
1977
1978                        if (master->priv_flags & IFF_MASTER_ALB) {
1979                                if (skb->pkt_type != PACKET_BROADCAST &&
1980                                    skb->pkt_type != PACKET_MULTICAST)
1981                                        return 0;
1982                        }
1983                        if (master->priv_flags & IFF_MASTER_8023AD &&
1984                            skb->protocol == __cpu_to_be16(ETH_P_SLOW))
1985                                return 0;
1986
1987                        return 1;
1988                }
1989        }
1990        return 0;
1991}
1992
1993extern struct pernet_operations __net_initdata loopback_net_ops;
1994
1995static inline int dev_ethtool_get_settings(struct net_device *dev,
1996                                           struct ethtool_cmd *cmd)
1997{
1998        if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
1999                return -EOPNOTSUPP;
2000        return dev->ethtool_ops->get_settings(dev, cmd);
2001}
2002
2003static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
2004{
2005        if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
2006                return 0;
2007        return dev->ethtool_ops->get_rx_csum(dev);
2008}
2009
2010static inline u32 dev_ethtool_get_flags(struct net_device *dev)
2011{
2012        if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
2013                return 0;
2014        return dev->ethtool_ops->get_flags(dev);
2015}
2016#endif /* __KERNEL__ */
2017
2018#endif  /* _LINUX_NETDEVICE_H */
2019