linux/drivers/net/benet/be.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005 - 2009 ServerEngines
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@serverengines.com
  12 *
  13 * ServerEngines
  14 * 209 N. Fair Oaks Ave
  15 * Sunnyvale, CA 94085
  16 */
  17
  18#ifndef BE_H
  19#define BE_H
  20
  21#include <linux/pci.h>
  22#include <linux/etherdevice.h>
  23#include <linux/version.h>
  24#include <linux/delay.h>
  25#include <net/tcp.h>
  26#include <net/ip.h>
  27#include <net/ipv6.h>
  28#include <linux/if_vlan.h>
  29#include <linux/workqueue.h>
  30#include <linux/interrupt.h>
  31#include <linux/firmware.h>
  32
  33#include "be_hw.h"
  34
  35#define DRV_VER                 "2.101.205"
  36#define DRV_NAME                "be2net"
  37#define BE_NAME                 "ServerEngines BladeEngine2 10Gbps NIC"
  38#define OC_NAME                 "Emulex OneConnect 10Gbps NIC"
  39#define DRV_DESC                BE_NAME "Driver"
  40
  41#define BE_VENDOR_ID            0x19a2
  42#define BE_DEVICE_ID1           0x211
  43#define OC_DEVICE_ID1           0x700
  44#define OC_DEVICE_ID2           0x701
  45
  46static inline char *nic_name(struct pci_dev *pdev)
  47{
  48        if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
  49                return OC_NAME;
  50        else
  51                return BE_NAME;
  52}
  53
  54/* Number of bytes of an RX frame that are copied to skb->data */
  55#define BE_HDR_LEN              64
  56#define BE_MAX_JUMBO_FRAME_SIZE 9018
  57#define BE_MIN_MTU              256
  58
  59#define BE_NUM_VLANS_SUPPORTED  64
  60#define BE_MAX_EQD              96
  61#define BE_MAX_TX_FRAG_COUNT    30
  62
  63#define EVNT_Q_LEN              1024
  64#define TX_Q_LEN                2048
  65#define TX_CQ_LEN               1024
  66#define RX_Q_LEN                1024    /* Does not support any other value */
  67#define RX_CQ_LEN               1024
  68#define MCC_Q_LEN               128     /* total size not to exceed 8 pages */
  69#define MCC_CQ_LEN              256
  70
  71#define BE_NAPI_WEIGHT          64
  72#define MAX_RX_POST             BE_NAPI_WEIGHT /* Frags posted at a time */
  73#define RX_FRAGS_REFILL_WM      (RX_Q_LEN - MAX_RX_POST)
  74
  75#define FW_VER_LEN              32
  76
  77struct be_dma_mem {
  78        void *va;
  79        dma_addr_t dma;
  80        u32 size;
  81};
  82
  83struct be_queue_info {
  84        struct be_dma_mem dma_mem;
  85        u16 len;
  86        u16 entry_size; /* Size of an element in the queue */
  87        u16 id;
  88        u16 tail, head;
  89        bool created;
  90        atomic_t used;  /* Number of valid elements in the queue */
  91};
  92
  93static inline u32 MODULO(u16 val, u16 limit)
  94{
  95        BUG_ON(limit & (limit - 1));
  96        return val & (limit - 1);
  97}
  98
  99static inline void index_adv(u16 *index, u16 val, u16 limit)
 100{
 101        *index = MODULO((*index + val), limit);
 102}
 103
 104static inline void index_inc(u16 *index, u16 limit)
 105{
 106        *index = MODULO((*index + 1), limit);
 107}
 108
 109static inline void *queue_head_node(struct be_queue_info *q)
 110{
 111        return q->dma_mem.va + q->head * q->entry_size;
 112}
 113
 114static inline void *queue_tail_node(struct be_queue_info *q)
 115{
 116        return q->dma_mem.va + q->tail * q->entry_size;
 117}
 118
 119static inline void queue_head_inc(struct be_queue_info *q)
 120{
 121        index_inc(&q->head, q->len);
 122}
 123
 124static inline void queue_tail_inc(struct be_queue_info *q)
 125{
 126        index_inc(&q->tail, q->len);
 127}
 128
 129struct be_eq_obj {
 130        struct be_queue_info q;
 131        char desc[32];
 132
 133        /* Adaptive interrupt coalescing (AIC) info */
 134        bool enable_aic;
 135        u16 min_eqd;            /* in usecs */
 136        u16 max_eqd;            /* in usecs */
 137        u16 cur_eqd;            /* in usecs */
 138
 139        struct napi_struct napi;
 140};
 141
 142struct be_mcc_obj {
 143        struct be_queue_info q;
 144        struct be_queue_info cq;
 145};
 146
 147struct be_drvr_stats {
 148        u32 be_tx_reqs;         /* number of TX requests initiated */
 149        u32 be_tx_stops;        /* number of times TX Q was stopped */
 150        u32 be_fwd_reqs;        /* number of send reqs through forwarding i/f */
 151        u32 be_tx_wrbs;         /* number of tx WRBs used */
 152        u32 be_tx_events;       /* number of tx completion events  */
 153        u32 be_tx_compl;        /* number of tx completion entries processed */
 154        ulong be_tx_jiffies;
 155        u64 be_tx_bytes;
 156        u64 be_tx_bytes_prev;
 157        u32 be_tx_rate;
 158
 159        u32 cache_barrier[16];
 160
 161        u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
 162        u32 be_polls;           /* number of times NAPI called poll function */
 163        u32 be_rx_events;       /* number of ucast rx completion events  */
 164        u32 be_rx_compl;        /* number of rx completion entries processed */
 165        ulong be_rx_jiffies;
 166        u64 be_rx_bytes;
 167        u64 be_rx_bytes_prev;
 168        u32 be_rx_rate;
 169        /* number of non ether type II frames dropped where
 170         * frame len > length field of Mac Hdr */
 171        u32 be_802_3_dropped_frames;
 172        /* number of non ether type II frames malformed where
 173         * in frame len < length field of Mac Hdr */
 174        u32 be_802_3_malformed_frames;
 175        u32 be_rxcp_err;        /* Num rx completion entries w/ err set. */
 176        ulong rx_fps_jiffies;   /* jiffies at last FPS calc */
 177        u32 be_rx_frags;
 178        u32 be_prev_rx_frags;
 179        u32 be_rx_fps;          /* Rx frags per second */
 180};
 181
 182struct be_stats_obj {
 183        struct be_drvr_stats drvr_stats;
 184        struct net_device_stats net_stats;
 185        struct be_dma_mem cmd;
 186};
 187
 188struct be_tx_obj {
 189        struct be_queue_info q;
 190        struct be_queue_info cq;
 191        /* Remember the skbs that were transmitted */
 192        struct sk_buff *sent_skb_list[TX_Q_LEN];
 193};
 194
 195/* Struct to remember the pages posted for rx frags */
 196struct be_rx_page_info {
 197        struct page *page;
 198        dma_addr_t bus;
 199        u16 page_offset;
 200        bool last_page_user;
 201};
 202
 203struct be_rx_obj {
 204        struct be_queue_info q;
 205        struct be_queue_info cq;
 206        struct be_rx_page_info page_info_tbl[RX_Q_LEN];
 207};
 208
 209#define BE_NUM_MSIX_VECTORS             2       /* 1 each for Tx and Rx */
 210struct be_adapter {
 211        struct pci_dev *pdev;
 212        struct net_device *netdev;
 213
 214        u8 __iomem *csr;
 215        u8 __iomem *db;         /* Door Bell */
 216        u8 __iomem *pcicfg;     /* PCI config space */
 217
 218        spinlock_t mbox_lock;   /* For serializing mbox cmds to BE card */
 219        struct be_dma_mem mbox_mem;
 220        /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
 221         * is stored for freeing purpose */
 222        struct be_dma_mem mbox_mem_alloced;
 223
 224        struct be_mcc_obj mcc_obj;
 225        spinlock_t mcc_lock;    /* For serializing mcc cmds to BE card */
 226        spinlock_t mcc_cq_lock;
 227
 228        struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
 229        bool msix_enabled;
 230        bool isr_registered;
 231
 232        /* TX Rings */
 233        struct be_eq_obj tx_eq;
 234        struct be_tx_obj tx_obj;
 235
 236        u32 cache_line_break[8];
 237
 238        /* Rx rings */
 239        struct be_eq_obj rx_eq;
 240        struct be_rx_obj rx_obj;
 241        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 242        bool rx_post_starved;   /* Zero rx frags have been posted to BE */
 243
 244        struct vlan_group *vlan_grp;
 245        u16 num_vlans;
 246        u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
 247
 248        struct be_stats_obj stats;
 249        /* Work queue used to perform periodic tasks like getting statistics */
 250        struct delayed_work work;
 251
 252        /* Ethtool knobs and info */
 253        bool rx_csum;           /* BE card must perform rx-checksumming */
 254        char fw_ver[FW_VER_LEN];
 255        u32 if_handle;          /* Used to configure filtering */
 256        u32 pmac_id;            /* MAC addr handle used by BE card */
 257
 258        bool link_up;
 259        u32 port_num;
 260        bool promiscuous;
 261        u32 cap;
 262        u32 rx_fc;              /* Rx flow control */
 263        u32 tx_fc;              /* Tx flow control */
 264};
 265
 266extern const struct ethtool_ops be_ethtool_ops;
 267
 268#define drvr_stats(adapter)             (&adapter->stats.drvr_stats)
 269
 270static inline unsigned int be_pci_func(struct be_adapter *adapter)
 271{
 272        return PCI_FUNC(adapter->pdev->devfn);
 273}
 274
 275#define BE_SET_NETDEV_OPS(netdev, ops)  (netdev->netdev_ops = ops)
 276
 277#define PAGE_SHIFT_4K           12
 278#define PAGE_SIZE_4K            (1 << PAGE_SHIFT_4K)
 279
 280/* Returns number of pages spanned by the data starting at the given addr */
 281#define PAGES_4K_SPANNED(_address, size)                                \
 282                ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) +     \
 283                        (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
 284
 285/* Byte offset into the page corresponding to given address */
 286#define OFFSET_IN_PAGE(addr)                                            \
 287                 ((size_t)(addr) & (PAGE_SIZE_4K-1))
 288
 289/* Returns bit offset within a DWORD of a bitfield */
 290#define AMAP_BIT_OFFSET(_struct, field)                                 \
 291                (((size_t)&(((_struct *)0)->field))%32)
 292
 293/* Returns the bit mask of the field that is NOT shifted into location. */
 294static inline u32 amap_mask(u32 bitsize)
 295{
 296        return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
 297}
 298
 299static inline void
 300amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
 301{
 302        u32 *dw = (u32 *) ptr + dw_offset;
 303        *dw &= ~(mask << offset);
 304        *dw |= (mask & value) << offset;
 305}
 306
 307#define AMAP_SET_BITS(_struct, field, ptr, val)                         \
 308                amap_set(ptr,                                           \
 309                        offsetof(_struct, field)/32,                    \
 310                        amap_mask(sizeof(((_struct *)0)->field)),       \
 311                        AMAP_BIT_OFFSET(_struct, field),                \
 312                        val)
 313
 314static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
 315{
 316        u32 *dw = (u32 *) ptr;
 317        return mask & (*(dw + dw_offset) >> offset);
 318}
 319
 320#define AMAP_GET_BITS(_struct, field, ptr)                              \
 321                amap_get(ptr,                                           \
 322                        offsetof(_struct, field)/32,                    \
 323                        amap_mask(sizeof(((_struct *)0)->field)),       \
 324                        AMAP_BIT_OFFSET(_struct, field))
 325
 326#define be_dws_cpu_to_le(wrb, len)      swap_dws(wrb, len)
 327#define be_dws_le_to_cpu(wrb, len)      swap_dws(wrb, len)
 328static inline void swap_dws(void *wrb, int len)
 329{
 330#ifdef __BIG_ENDIAN
 331        u32 *dw = wrb;
 332        BUG_ON(len % 4);
 333        do {
 334                *dw = cpu_to_le32(*dw);
 335                dw++;
 336                len -= 4;
 337        } while (len);
 338#endif                          /* __BIG_ENDIAN */
 339}
 340
 341static inline u8 is_tcp_pkt(struct sk_buff *skb)
 342{
 343        u8 val = 0;
 344
 345        if (ip_hdr(skb)->version == 4)
 346                val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
 347        else if (ip_hdr(skb)->version == 6)
 348                val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
 349
 350        return val;
 351}
 352
 353static inline u8 is_udp_pkt(struct sk_buff *skb)
 354{
 355        u8 val = 0;
 356
 357        if (ip_hdr(skb)->version == 4)
 358                val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
 359        else if (ip_hdr(skb)->version == 6)
 360                val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
 361
 362        return val;
 363}
 364
 365extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
 366                u16 num_popped);
 367extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
 368extern void netdev_stats_update(struct be_adapter *adapter);
 369extern int be_load_fw(struct be_adapter *adapter, u8 *func);
 370#endif                          /* BE_H */
 371