linux/drivers/net/vmxnet3/vmxnet3_int.h
<<
>>
Prefs
   1/*
   2 * Linux driver for VMware's vmxnet3 ethernet NIC.
   3 *
   4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the
   8 * Free Software Foundation; version 2 of the License and no later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13 * NON INFRINGEMENT.  See the GNU General Public License for more
  14 * details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 *
  20 * The full GNU General Public License is included in this distribution in
  21 * the file called "COPYING".
  22 *
  23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
  24 *
  25 */
  26
  27#ifndef _VMXNET3_INT_H
  28#define _VMXNET3_INT_H
  29
  30#include <linux/ethtool.h>
  31#include <linux/delay.h>
  32#include <linux/netdevice.h>
  33#include <linux/pci.h>
  34#include <linux/compiler.h>
  35#include <linux/slab.h>
  36#include <linux/spinlock.h>
  37#include <linux/ioport.h>
  38#include <linux/highmem.h>
  39#include <linux/init.h>
  40#include <linux/timer.h>
  41#include <linux/skbuff.h>
  42#include <linux/interrupt.h>
  43#include <linux/workqueue.h>
  44#include <linux/uaccess.h>
  45#include <asm/dma.h>
  46#include <asm/page.h>
  47
  48#include <linux/tcp.h>
  49#include <linux/udp.h>
  50#include <linux/ip.h>
  51#include <linux/ipv6.h>
  52#include <linux/in.h>
  53#include <linux/etherdevice.h>
  54#include <asm/checksum.h>
  55#include <linux/if_vlan.h>
  56#include <linux/if_arp.h>
  57#include <linux/inetdevice.h>
  58
  59#include "vmxnet3_defs.h"
  60
  61#ifdef DEBUG
  62# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
  63#else
  64# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
  65#endif
  66
  67
  68/*
  69 * Version numbers
  70 */
  71#define VMXNET3_DRIVER_VERSION_STRING   "1.0.25.0-k"
  72
  73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
  74#define VMXNET3_DRIVER_VERSION_NUM      0x01001900
  75
  76#if defined(CONFIG_PCI_MSI)
  77        /* RSS only makes sense if MSI-X is supported. */
  78        #define VMXNET3_RSS
  79#endif
  80
  81/*
  82 * Capabilities
  83 */
  84
  85enum {
  86        VMNET_CAP_SG            = 0x0001, /* Can do scatter-gather transmits. */
  87        VMNET_CAP_IP4_CSUM      = 0x0002, /* Can checksum only TCP/UDP over
  88                                           * IPv4 */
  89        VMNET_CAP_HW_CSUM       = 0x0004, /* Can checksum all packets. */
  90        VMNET_CAP_HIGH_DMA      = 0x0008, /* Can DMA to high memory. */
  91        VMNET_CAP_TOE           = 0x0010, /* Supports TCP/IP offload. */
  92        VMNET_CAP_TSO           = 0x0020, /* Supports TCP Segmentation
  93                                           * offload */
  94        VMNET_CAP_SW_TSO        = 0x0040, /* Supports SW TCP Segmentation */
  95        VMNET_CAP_VMXNET_APROM  = 0x0080, /* Vmxnet APROM support */
  96        VMNET_CAP_HW_TX_VLAN    = 0x0100, /* Can we do VLAN tagging in HW */
  97        VMNET_CAP_HW_RX_VLAN    = 0x0200, /* Can we do VLAN untagging in HW */
  98        VMNET_CAP_SW_VLAN       = 0x0400, /* VLAN tagging/untagging in SW */
  99        VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
 100        VMNET_CAP_ENABLE_INT_INLINE = 0x1000,  /* Enable Interrupt Inline */
 101        VMNET_CAP_ENABLE_HEADER_COPY = 0x2000,  /* copy header for vmkernel */
 102        VMNET_CAP_TX_CHAIN      = 0x4000, /* Guest can use multiple tx entries
 103                                          * for a pkt */
 104        VMNET_CAP_RX_CHAIN      = 0x8000, /* pkt can span multiple rx entries */
 105        VMNET_CAP_LPD           = 0x10000, /* large pkt delivery */
 106        VMNET_CAP_BPF           = 0x20000, /* BPF Support in VMXNET Virtual HW*/
 107        VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
 108                                           /* pages transmits */
 109        VMNET_CAP_IP6_CSUM      = 0x80000, /* Can do IPv6 csum offload. */
 110        VMNET_CAP_TSO6         = 0x100000, /* TSO seg. offload for IPv6 pkts. */
 111        VMNET_CAP_TSO256k      = 0x200000, /* Can do TSO seg offload for */
 112                                           /* pkts up to 256kB. */
 113        VMNET_CAP_UPT          = 0x400000  /* Support UPT */
 114};
 115
 116/*
 117 * PCI vendor and device IDs.
 118 */
 119#define PCI_VENDOR_ID_VMWARE            0x15AD
 120#define PCI_DEVICE_ID_VMWARE_VMXNET3    0x07B0
 121#define MAX_ETHERNET_CARDS              10
 122#define MAX_PCI_PASSTHRU_DEVICE         6
 123
 124struct vmxnet3_cmd_ring {
 125        union Vmxnet3_GenericDesc *base;
 126        u32             size;
 127        u32             next2fill;
 128        u32             next2comp;
 129        u8              gen;
 130        dma_addr_t      basePA;
 131};
 132
 133static inline void
 134vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
 135{
 136        ring->next2fill++;
 137        if (unlikely(ring->next2fill == ring->size)) {
 138                ring->next2fill = 0;
 139                VMXNET3_FLIP_RING_GEN(ring->gen);
 140        }
 141}
 142
 143static inline void
 144vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
 145{
 146        VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
 147}
 148
 149static inline int
 150vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
 151{
 152        return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
 153                ring->next2comp - ring->next2fill - 1;
 154}
 155
 156struct vmxnet3_comp_ring {
 157        union Vmxnet3_GenericDesc *base;
 158        u32               size;
 159        u32               next2proc;
 160        u8                gen;
 161        u8                intr_idx;
 162        dma_addr_t           basePA;
 163};
 164
 165static inline void
 166vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
 167{
 168        ring->next2proc++;
 169        if (unlikely(ring->next2proc == ring->size)) {
 170                ring->next2proc = 0;
 171                VMXNET3_FLIP_RING_GEN(ring->gen);
 172        }
 173}
 174
 175struct vmxnet3_tx_data_ring {
 176        struct Vmxnet3_TxDataDesc *base;
 177        u32              size;
 178        dma_addr_t          basePA;
 179};
 180
 181enum vmxnet3_buf_map_type {
 182        VMXNET3_MAP_INVALID = 0,
 183        VMXNET3_MAP_NONE,
 184        VMXNET3_MAP_SINGLE,
 185        VMXNET3_MAP_PAGE,
 186};
 187
 188struct vmxnet3_tx_buf_info {
 189        u32      map_type;
 190        u16      len;
 191        u16      sop_idx;
 192        dma_addr_t  dma_addr;
 193        struct sk_buff *skb;
 194};
 195
 196struct vmxnet3_tq_driver_stats {
 197        u64 drop_total;     /* # of pkts dropped by the driver, the
 198                                * counters below track droppings due to
 199                                * different reasons
 200                                */
 201        u64 drop_too_many_frags;
 202        u64 drop_oversized_hdr;
 203        u64 drop_hdr_inspect_err;
 204        u64 drop_tso;
 205
 206        u64 tx_ring_full;
 207        u64 linearized;         /* # of pkts linearized */
 208        u64 copy_skb_header;    /* # of times we have to copy skb header */
 209        u64 oversized_hdr;
 210};
 211
 212struct vmxnet3_tx_ctx {
 213        bool   ipv4;
 214        u16 mss;
 215        u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
 216                                 * offloading
 217                                 */
 218        u32 l4_hdr_size;     /* only valid if mss != 0 */
 219        u32 copy_size;       /* # of bytes copied into the data ring */
 220        union Vmxnet3_GenericDesc *sop_txd;
 221        union Vmxnet3_GenericDesc *eop_txd;
 222};
 223
 224struct vmxnet3_tx_queue {
 225        char                    name[IFNAMSIZ+8]; /* To identify interrupt */
 226        struct vmxnet3_adapter          *adapter;
 227        spinlock_t                      tx_lock;
 228        struct vmxnet3_cmd_ring         tx_ring;
 229        struct vmxnet3_tx_buf_info      *buf_info;
 230        struct vmxnet3_tx_data_ring     data_ring;
 231        struct vmxnet3_comp_ring        comp_ring;
 232        struct Vmxnet3_TxQueueCtrl      *shared;
 233        struct vmxnet3_tq_driver_stats  stats;
 234        bool                            stopped;
 235        int                             num_stop;  /* # of times the queue is
 236                                                    * stopped */
 237        int                             qid;
 238} __attribute__((__aligned__(SMP_CACHE_BYTES)));
 239
 240enum vmxnet3_rx_buf_type {
 241        VMXNET3_RX_BUF_NONE = 0,
 242        VMXNET3_RX_BUF_SKB = 1,
 243        VMXNET3_RX_BUF_PAGE = 2
 244};
 245
 246struct vmxnet3_rx_buf_info {
 247        enum vmxnet3_rx_buf_type buf_type;
 248        u16     len;
 249        union {
 250                struct sk_buff *skb;
 251                struct page    *page;
 252        };
 253        dma_addr_t dma_addr;
 254};
 255
 256struct vmxnet3_rx_ctx {
 257        struct sk_buff *skb;
 258        u32 sop_idx;
 259};
 260
 261struct vmxnet3_rq_driver_stats {
 262        u64 drop_total;
 263        u64 drop_err;
 264        u64 drop_fcs;
 265        u64 rx_buf_alloc_failure;
 266};
 267
 268struct vmxnet3_rx_queue {
 269        char                    name[IFNAMSIZ + 8]; /* To identify interrupt */
 270        struct vmxnet3_adapter    *adapter;
 271        struct napi_struct        napi;
 272        struct vmxnet3_cmd_ring   rx_ring[2];
 273        struct vmxnet3_comp_ring  comp_ring;
 274        struct vmxnet3_rx_ctx     rx_ctx;
 275        u32 qid;            /* rqID in RCD for buffer from 1st ring */
 276        u32 qid2;           /* rqID in RCD for buffer from 2nd ring */
 277        u32 uncommitted[2]; /* # of buffers allocated since last RXPROD
 278                                * update */
 279        struct vmxnet3_rx_buf_info     *buf_info[2];
 280        struct Vmxnet3_RxQueueCtrl            *shared;
 281        struct vmxnet3_rq_driver_stats  stats;
 282} __attribute__((__aligned__(SMP_CACHE_BYTES)));
 283
 284#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
 285#define VMXNET3_DEVICE_MAX_RX_QUEUES 8   /* Keep this value as a power of 2 */
 286
 287/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
 288#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
 289
 290#define VMXNET3_LINUX_MAX_MSIX_VECT     (VMXNET3_DEVICE_MAX_TX_QUEUES + \
 291                                         VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
 292#define VMXNET3_LINUX_MIN_MSIX_VECT     2 /* 1 for tx-rx pair and 1 for event */
 293
 294
 295struct vmxnet3_intr {
 296        enum vmxnet3_intr_mask_mode  mask_mode;
 297        enum vmxnet3_intr_type       type;      /* MSI-X, MSI, or INTx? */
 298        u8  num_intrs;                  /* # of intr vectors */
 299        u8  event_intr_idx;             /* idx of the intr vector for event */
 300        u8  mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
 301        char    event_msi_vector_name[IFNAMSIZ+11];
 302#ifdef CONFIG_PCI_MSI
 303        struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
 304#endif
 305};
 306
 307/* Interrupt sharing schemes, share_intr */
 308#define VMXNET3_INTR_BUDDYSHARE 0    /* Corresponding tx,rx queues share irq */
 309#define VMXNET3_INTR_TXSHARE 1       /* All tx queues share one irq */
 310#define VMXNET3_INTR_DONTSHARE 2     /* each queue has its own irq */
 311
 312
 313#define VMXNET3_STATE_BIT_RESETTING   0
 314#define VMXNET3_STATE_BIT_QUIESCED    1
 315struct vmxnet3_adapter {
 316        struct vmxnet3_tx_queue         tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
 317        struct vmxnet3_rx_queue         rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
 318        struct vlan_group               *vlan_grp;
 319        struct vmxnet3_intr             intr;
 320        spinlock_t                      cmd_lock;
 321        struct Vmxnet3_DriverShared     *shared;
 322        struct Vmxnet3_PMConf           *pm_conf;
 323        struct Vmxnet3_TxQueueDesc      *tqd_start;     /* all tx queue desc */
 324        struct Vmxnet3_RxQueueDesc      *rqd_start;     /* all rx queue desc */
 325        struct net_device               *netdev;
 326        struct net_device_stats         net_stats;
 327        struct pci_dev                  *pdev;
 328
 329        u8                      __iomem *hw_addr0; /* for BAR 0 */
 330        u8                      __iomem *hw_addr1; /* for BAR 1 */
 331
 332        /* feature control */
 333        bool                            rxcsum;
 334        bool                            lro;
 335        bool                            jumbo_frame;
 336#ifdef VMXNET3_RSS
 337        struct UPT1_RSSConf             *rss_conf;
 338        bool                            rss;
 339#endif
 340        u32                             num_rx_queues;
 341        u32                             num_tx_queues;
 342
 343        /* rx buffer related */
 344        unsigned                        skb_buf_size;
 345        int             rx_buf_per_pkt;  /* only apply to the 1st ring */
 346        dma_addr_t                      shared_pa;
 347        dma_addr_t queue_desc_pa;
 348
 349        /* Wake-on-LAN */
 350        u32     wol;
 351
 352        /* Link speed */
 353        u32     link_speed; /* in mbps */
 354
 355        u64     tx_timeout_count;
 356        struct work_struct work;
 357
 358        unsigned long  state;    /* VMXNET3_STATE_BIT_xxx */
 359
 360        int dev_number;
 361        int share_intr;
 362};
 363
 364#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val)  \
 365        writel((val), (adapter)->hw_addr0 + (reg))
 366#define VMXNET3_READ_BAR0_REG(adapter, reg)        \
 367        readl((adapter)->hw_addr0 + (reg))
 368
 369#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val)  \
 370        writel((val), (adapter)->hw_addr1 + (reg))
 371#define VMXNET3_READ_BAR1_REG(adapter, reg)        \
 372        readl((adapter)->hw_addr1 + (reg))
 373
 374#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq)  (5)
 375#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
 376        ((rq)->rx_ring[ring_idx].size >> 3)
 377
 378#define VMXNET3_GET_ADDR_LO(dma)   ((u32)(dma))
 379#define VMXNET3_GET_ADDR_HI(dma)   ((u32)(((u64)(dma)) >> 32))
 380
 381/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
 382#define VMXNET3_DEF_TX_RING_SIZE    512
 383#define VMXNET3_DEF_RX_RING_SIZE    256
 384
 385#define VMXNET3_MAX_ETH_HDR_SIZE    22
 386#define VMXNET3_MAX_SKB_BUF_SIZE    (3*1024)
 387
 388int
 389vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
 390
 391int
 392vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
 393
 394void
 395vmxnet3_force_close(struct vmxnet3_adapter *adapter);
 396
 397void
 398vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
 399
 400void
 401vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
 402
 403void
 404vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
 405
 406int
 407vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
 408                      u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
 409
 410extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
 411extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev);
 412
 413extern char vmxnet3_driver_name[];
 414#endif
 415