linux/drivers/net/benet/be.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005 - 2010 ServerEngines
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version 2
   7 * as published by the Free Software Foundation.  The full GNU General
   8 * Public License is included in this distribution in the file called COPYING.
   9 *
  10 * Contact Information:
  11 * linux-drivers@serverengines.com
  12 *
  13 * ServerEngines
  14 * 209 N. Fair Oaks Ave
  15 * Sunnyvale, CA 94085
  16 */
  17
  18#ifndef BE_H
  19#define BE_H
  20
  21#include <linux/pci.h>
  22#include <linux/etherdevice.h>
  23#include <linux/version.h>
  24#include <linux/delay.h>
  25#include <net/tcp.h>
  26#include <net/ip.h>
  27#include <net/ipv6.h>
  28#include <linux/if_vlan.h>
  29#include <linux/workqueue.h>
  30#include <linux/interrupt.h>
  31#include <linux/firmware.h>
  32#include <linux/slab.h>
  33
  34#include "be_hw.h"
  35
  36#define DRV_VER                 "2.103.175u"
  37#define DRV_NAME                "be2net"
  38#define BE_NAME                 "ServerEngines BladeEngine2 10Gbps NIC"
  39#define BE3_NAME                "ServerEngines BladeEngine3 10Gbps NIC"
  40#define OC_NAME                 "Emulex OneConnect 10Gbps NIC"
  41#define OC_NAME_BE              OC_NAME "(be3)"
  42#define OC_NAME_LANCER          OC_NAME "(Lancer)"
  43#define DRV_DESC                "ServerEngines BladeEngine 10Gbps NIC Driver"
  44
  45#define BE_VENDOR_ID            0x19a2
  46#define EMULEX_VENDOR_ID        0x10df
  47#define BE_DEVICE_ID1           0x211
  48#define BE_DEVICE_ID2           0x221
  49#define OC_DEVICE_ID1           0x700   /* Device Id for BE2 cards */
  50#define OC_DEVICE_ID2           0x710   /* Device Id for BE3 cards */
  51#define OC_DEVICE_ID3           0xe220  /* Device id for Lancer cards */
  52
  53static inline char *nic_name(struct pci_dev *pdev)
  54{
  55        switch (pdev->device) {
  56        case OC_DEVICE_ID1:
  57                return OC_NAME;
  58        case OC_DEVICE_ID2:
  59                return OC_NAME_BE;
  60        case OC_DEVICE_ID3:
  61                return OC_NAME_LANCER;
  62        case BE_DEVICE_ID2:
  63                return BE3_NAME;
  64        default:
  65                return BE_NAME;
  66        }
  67}
  68
  69/* Number of bytes of an RX frame that are copied to skb->data */
  70#define BE_HDR_LEN              64
  71#define BE_MAX_JUMBO_FRAME_SIZE 9018
  72#define BE_MIN_MTU              256
  73
  74#define BE_NUM_VLANS_SUPPORTED  64
  75#define BE_MAX_EQD              96
  76#define BE_MAX_TX_FRAG_COUNT    30
  77
  78#define EVNT_Q_LEN              1024
  79#define TX_Q_LEN                2048
  80#define TX_CQ_LEN               1024
  81#define RX_Q_LEN                1024    /* Does not support any other value */
  82#define RX_CQ_LEN               1024
  83#define MCC_Q_LEN               128     /* total size not to exceed 8 pages */
  84#define MCC_CQ_LEN              256
  85
  86#define MAX_RSS_QS              4       /* BE limit is 4 queues/port */
  87#define BE_MAX_MSIX_VECTORS     (MAX_RSS_QS + 1 + 1)/* RSS qs + 1 def Rx + Tx */
  88#define BE_NAPI_WEIGHT          64
  89#define MAX_RX_POST             BE_NAPI_WEIGHT /* Frags posted at a time */
  90#define RX_FRAGS_REFILL_WM      (RX_Q_LEN - MAX_RX_POST)
  91
  92#define FW_VER_LEN              32
  93
  94#define BE_MAX_VF               32
  95
  96struct be_dma_mem {
  97        void *va;
  98        dma_addr_t dma;
  99        u32 size;
 100};
 101
 102struct be_queue_info {
 103        struct be_dma_mem dma_mem;
 104        u16 len;
 105        u16 entry_size; /* Size of an element in the queue */
 106        u16 id;
 107        u16 tail, head;
 108        bool created;
 109        atomic_t used;  /* Number of valid elements in the queue */
 110};
 111
 112static inline u32 MODULO(u16 val, u16 limit)
 113{
 114        BUG_ON(limit & (limit - 1));
 115        return val & (limit - 1);
 116}
 117
 118static inline void index_adv(u16 *index, u16 val, u16 limit)
 119{
 120        *index = MODULO((*index + val), limit);
 121}
 122
 123static inline void index_inc(u16 *index, u16 limit)
 124{
 125        *index = MODULO((*index + 1), limit);
 126}
 127
 128static inline void *queue_head_node(struct be_queue_info *q)
 129{
 130        return q->dma_mem.va + q->head * q->entry_size;
 131}
 132
 133static inline void *queue_tail_node(struct be_queue_info *q)
 134{
 135        return q->dma_mem.va + q->tail * q->entry_size;
 136}
 137
 138static inline void queue_head_inc(struct be_queue_info *q)
 139{
 140        index_inc(&q->head, q->len);
 141}
 142
 143static inline void queue_tail_inc(struct be_queue_info *q)
 144{
 145        index_inc(&q->tail, q->len);
 146}
 147
 148struct be_eq_obj {
 149        struct be_queue_info q;
 150        char desc[32];
 151
 152        /* Adaptive interrupt coalescing (AIC) info */
 153        bool enable_aic;
 154        u16 min_eqd;            /* in usecs */
 155        u16 max_eqd;            /* in usecs */
 156        u16 cur_eqd;            /* in usecs */
 157        u8  msix_vec_idx;
 158
 159        struct napi_struct napi;
 160};
 161
 162struct be_mcc_obj {
 163        struct be_queue_info q;
 164        struct be_queue_info cq;
 165        bool rearm_cq;
 166};
 167
 168struct be_tx_stats {
 169        u32 be_tx_reqs;         /* number of TX requests initiated */
 170        u32 be_tx_stops;        /* number of times TX Q was stopped */
 171        u32 be_tx_wrbs;         /* number of tx WRBs used */
 172        u32 be_tx_events;       /* number of tx completion events  */
 173        u32 be_tx_compl;        /* number of tx completion entries processed */
 174        ulong be_tx_jiffies;
 175        u64 be_tx_bytes;
 176        u64 be_tx_bytes_prev;
 177        u64 be_tx_pkts;
 178        u32 be_tx_rate;
 179};
 180
 181struct be_tx_obj {
 182        struct be_queue_info q;
 183        struct be_queue_info cq;
 184        /* Remember the skbs that were transmitted */
 185        struct sk_buff *sent_skb_list[TX_Q_LEN];
 186};
 187
 188/* Struct to remember the pages posted for rx frags */
 189struct be_rx_page_info {
 190        struct page *page;
 191        DEFINE_DMA_UNMAP_ADDR(bus);
 192        u16 page_offset;
 193        bool last_page_user;
 194};
 195
 196struct be_rx_stats {
 197        u32 rx_post_fail;/* number of ethrx buffer alloc failures */
 198        u32 rx_polls;   /* number of times NAPI called poll function */
 199        u32 rx_events;  /* number of ucast rx completion events  */
 200        u32 rx_compl;   /* number of rx completion entries processed */
 201        ulong rx_jiffies;
 202        u64 rx_bytes;
 203        u64 rx_bytes_prev;
 204        u64 rx_pkts;
 205        u32 rx_rate;
 206        u32 rx_mcast_pkts;
 207        u32 rxcp_err;   /* Num rx completion entries w/ err set. */
 208        ulong rx_fps_jiffies;   /* jiffies at last FPS calc */
 209        u32 rx_frags;
 210        u32 prev_rx_frags;
 211        u32 rx_fps;             /* Rx frags per second */
 212};
 213
 214struct be_rx_obj {
 215        struct be_adapter *adapter;
 216        struct be_queue_info q;
 217        struct be_queue_info cq;
 218        struct be_rx_page_info page_info_tbl[RX_Q_LEN];
 219        struct be_eq_obj rx_eq;
 220        struct be_rx_stats stats;
 221        u8 rss_id;
 222        bool rx_post_starved;   /* Zero rx frags have been posted to BE */
 223        u16 last_frag_index;
 224        u16 rsvd;
 225        u32 cache_line_barrier[15];
 226};
 227
 228struct be_vf_cfg {
 229        unsigned char vf_mac_addr[ETH_ALEN];
 230        u32 vf_if_handle;
 231        u32 vf_pmac_id;
 232        u16 vf_vlan_tag;
 233        u32 vf_tx_rate;
 234};
 235
 236#define BE_INVALID_PMAC_ID              0xffffffff
 237struct be_adapter {
 238        struct pci_dev *pdev;
 239        struct net_device *netdev;
 240
 241        u8 __iomem *csr;
 242        u8 __iomem *db;         /* Door Bell */
 243        u8 __iomem *pcicfg;     /* PCI config space */
 244
 245        struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
 246        struct be_dma_mem mbox_mem;
 247        /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
 248         * is stored for freeing purpose */
 249        struct be_dma_mem mbox_mem_alloced;
 250
 251        struct be_mcc_obj mcc_obj;
 252        spinlock_t mcc_lock;    /* For serializing mcc cmds to BE card */
 253        spinlock_t mcc_cq_lock;
 254
 255        struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
 256        bool msix_enabled;
 257        bool isr_registered;
 258
 259        /* TX Rings */
 260        struct be_eq_obj tx_eq;
 261        struct be_tx_obj tx_obj;
 262        struct be_tx_stats tx_stats;
 263
 264        u32 cache_line_break[8];
 265
 266        /* Rx rings */
 267        struct be_rx_obj rx_obj[MAX_RSS_QS + 1]; /* one default non-rss Q */
 268        u32 num_rx_qs;
 269        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 270
 271        u8 msix_vec_next_idx;
 272
 273        struct vlan_group *vlan_grp;
 274        u16 vlans_added;
 275        u16 max_vlans;  /* Number of vlans supported */
 276        u8 vlan_tag[VLAN_N_VID];
 277        u8 vlan_prio_bmap;      /* Available Priority BitMap */
 278        u16 recommended_prio;   /* Recommended Priority */
 279        struct be_dma_mem mc_cmd_mem;
 280
 281        struct be_dma_mem stats_cmd;
 282        /* Work queue used to perform periodic tasks like getting statistics */
 283        struct delayed_work work;
 284
 285        /* Ethtool knobs and info */
 286        bool rx_csum;           /* BE card must perform rx-checksumming */
 287        char fw_ver[FW_VER_LEN];
 288        u32 if_handle;          /* Used to configure filtering */
 289        u32 pmac_id;            /* MAC addr handle used by BE card */
 290
 291        bool eeh_err;
 292        bool link_up;
 293        u32 port_num;
 294        bool promiscuous;
 295        bool wol;
 296        u32 function_mode;
 297        u32 function_caps;
 298        u32 rx_fc;              /* Rx flow control */
 299        u32 tx_fc;              /* Tx flow control */
 300        bool ue_detected;
 301        bool stats_ioctl_sent;
 302        int link_speed;
 303        u8 port_type;
 304        u8 transceiver;
 305        u8 autoneg;
 306        u8 generation;          /* BladeEngine ASIC generation */
 307        u32 flash_status;
 308        struct completion flash_compl;
 309
 310        bool sriov_enabled;
 311        struct be_vf_cfg vf_cfg[BE_MAX_VF];
 312        u8 is_virtfn;
 313        u32 sli_family;
 314};
 315
 316#define be_physfn(adapter) (!adapter->is_virtfn)
 317
 318/* BladeEngine Generation numbers */
 319#define BE_GEN2 2
 320#define BE_GEN3 3
 321
 322#define lancer_chip(adapter)            (adapter->pdev->device == OC_DEVICE_ID3)
 323
 324extern const struct ethtool_ops be_ethtool_ops;
 325
 326#define tx_stats(adapter)               (&adapter->tx_stats)
 327#define rx_stats(rxo)                   (&rxo->stats)
 328
 329#define BE_SET_NETDEV_OPS(netdev, ops)  (netdev->netdev_ops = ops)
 330
 331#define for_all_rx_queues(adapter, rxo, i)                              \
 332        for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;  \
 333                i++, rxo++)
 334
 335/* Just skip the first default non-rss queue */
 336#define for_all_rss_queues(adapter, rxo, i)                             \
 337        for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
 338                i++, rxo++)
 339
 340#define PAGE_SHIFT_4K           12
 341#define PAGE_SIZE_4K            (1 << PAGE_SHIFT_4K)
 342
 343/* Returns number of pages spanned by the data starting at the given addr */
 344#define PAGES_4K_SPANNED(_address, size)                                \
 345                ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) +     \
 346                        (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
 347
 348/* Byte offset into the page corresponding to given address */
 349#define OFFSET_IN_PAGE(addr)                                            \
 350                 ((size_t)(addr) & (PAGE_SIZE_4K-1))
 351
 352/* Returns bit offset within a DWORD of a bitfield */
 353#define AMAP_BIT_OFFSET(_struct, field)                                 \
 354                (((size_t)&(((_struct *)0)->field))%32)
 355
 356/* Returns the bit mask of the field that is NOT shifted into location. */
 357static inline u32 amap_mask(u32 bitsize)
 358{
 359        return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
 360}
 361
 362static inline void
 363amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
 364{
 365        u32 *dw = (u32 *) ptr + dw_offset;
 366        *dw &= ~(mask << offset);
 367        *dw |= (mask & value) << offset;
 368}
 369
 370#define AMAP_SET_BITS(_struct, field, ptr, val)                         \
 371                amap_set(ptr,                                           \
 372                        offsetof(_struct, field)/32,                    \
 373                        amap_mask(sizeof(((_struct *)0)->field)),       \
 374                        AMAP_BIT_OFFSET(_struct, field),                \
 375                        val)
 376
 377static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
 378{
 379        u32 *dw = (u32 *) ptr;
 380        return mask & (*(dw + dw_offset) >> offset);
 381}
 382
 383#define AMAP_GET_BITS(_struct, field, ptr)                              \
 384                amap_get(ptr,                                           \
 385                        offsetof(_struct, field)/32,                    \
 386                        amap_mask(sizeof(((_struct *)0)->field)),       \
 387                        AMAP_BIT_OFFSET(_struct, field))
 388
 389#define be_dws_cpu_to_le(wrb, len)      swap_dws(wrb, len)
 390#define be_dws_le_to_cpu(wrb, len)      swap_dws(wrb, len)
 391static inline void swap_dws(void *wrb, int len)
 392{
 393#ifdef __BIG_ENDIAN
 394        u32 *dw = wrb;
 395        BUG_ON(len % 4);
 396        do {
 397                *dw = cpu_to_le32(*dw);
 398                dw++;
 399                len -= 4;
 400        } while (len);
 401#endif                          /* __BIG_ENDIAN */
 402}
 403
 404static inline u8 is_tcp_pkt(struct sk_buff *skb)
 405{
 406        u8 val = 0;
 407
 408        if (ip_hdr(skb)->version == 4)
 409                val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
 410        else if (ip_hdr(skb)->version == 6)
 411                val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
 412
 413        return val;
 414}
 415
 416static inline u8 is_udp_pkt(struct sk_buff *skb)
 417{
 418        u8 val = 0;
 419
 420        if (ip_hdr(skb)->version == 4)
 421                val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
 422        else if (ip_hdr(skb)->version == 6)
 423                val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
 424
 425        return val;
 426}
 427
 428static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
 429{
 430        u8 data;
 431        u32 sli_intf;
 432
 433        if (lancer_chip(adapter)) {
 434                pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
 435                                                                &sli_intf);
 436                adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
 437        } else {
 438                pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
 439                pci_read_config_byte(adapter->pdev, 0xFE, &data);
 440                adapter->is_virtfn = (data != 0xAA);
 441        }
 442}
 443
 444static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
 445{
 446        u32 addr;
 447
 448        addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
 449
 450        mac[5] = (u8)(addr & 0xFF);
 451        mac[4] = (u8)((addr >> 8) & 0xFF);
 452        mac[3] = (u8)((addr >> 16) & 0xFF);
 453        mac[2] = 0xC9;
 454        mac[1] = 0x00;
 455        mac[0] = 0x00;
 456}
 457
 458extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
 459                u16 num_popped);
 460extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
 461extern void netdev_stats_update(struct be_adapter *adapter);
 462extern int be_load_fw(struct be_adapter *adapter, u8 *func);
 463#endif                          /* BE_H */
 464