linux/drivers/net/ethernet/brocade/bna/bnad.c
<<
>>
Prefs
   1/*
   2 * Linux network driver for Brocade Converged Network Adapter.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License (GPL) Version 2 as
   6 * published by the Free Software Foundation
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13/*
  14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15 * All rights reserved
  16 * www.brocade.com
  17 */
  18#include <linux/bitops.h>
  19#include <linux/netdevice.h>
  20#include <linux/skbuff.h>
  21#include <linux/etherdevice.h>
  22#include <linux/in.h>
  23#include <linux/ethtool.h>
  24#include <linux/if_vlan.h>
  25#include <linux/if_ether.h>
  26#include <linux/ip.h>
  27#include <linux/prefetch.h>
  28#include <linux/module.h>
  29
  30#include "bnad.h"
  31#include "bna.h"
  32#include "cna.h"
  33
  34static DEFINE_MUTEX(bnad_fwimg_mutex);
  35
  36/*
  37 * Module params
  38 */
  39static uint bnad_msix_disable;
  40module_param(bnad_msix_disable, uint, 0444);
  41MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
  42
  43static uint bnad_ioc_auto_recover = 1;
  44module_param(bnad_ioc_auto_recover, uint, 0444);
  45MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
  46
  47static uint bna_debugfs_enable = 1;
  48module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
  49MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
  50                 " Range[false:0|true:1]");
  51
  52/*
  53 * Global variables
  54 */
  55u32 bnad_rxqs_per_cq = 2;
  56static u32 bna_id;
  57static struct mutex bnad_list_mutex;
  58static LIST_HEAD(bnad_list);
  59static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  60
  61/*
  62 * Local MACROS
  63 */
  64#define BNAD_GET_MBOX_IRQ(_bnad)                                \
  65        (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
  66         ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
  67         ((_bnad)->pcidev->irq))
  68
  69#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)        \
  70do {                                                            \
  71        (_res_info)->res_type = BNA_RES_T_MEM;                  \
  72        (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
  73        (_res_info)->res_u.mem_info.num = (_num);               \
  74        (_res_info)->res_u.mem_info.len = (_size);              \
  75} while (0)
  76
  77static void
  78bnad_add_to_list(struct bnad *bnad)
  79{
  80        mutex_lock(&bnad_list_mutex);
  81        list_add_tail(&bnad->list_entry, &bnad_list);
  82        bnad->id = bna_id++;
  83        mutex_unlock(&bnad_list_mutex);
  84}
  85
  86static void
  87bnad_remove_from_list(struct bnad *bnad)
  88{
  89        mutex_lock(&bnad_list_mutex);
  90        list_del(&bnad->list_entry);
  91        mutex_unlock(&bnad_list_mutex);
  92}
  93
  94/*
  95 * Reinitialize completions in CQ, once Rx is taken down
  96 */
  97static void
  98bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
  99{
 100        struct bna_cq_entry *cmpl;
 101        int i;
 102
 103        for (i = 0; i < ccb->q_depth; i++) {
 104                cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
 105                cmpl->valid = 0;
 106        }
 107}
 108
 109/* Tx Datapath functions */
 110
 111
 112/* Caller should ensure that the entry at unmap_q[index] is valid */
 113static u32
 114bnad_tx_buff_unmap(struct bnad *bnad,
 115                              struct bnad_tx_unmap *unmap_q,
 116                              u32 q_depth, u32 index)
 117{
 118        struct bnad_tx_unmap *unmap;
 119        struct sk_buff *skb;
 120        int vector, nvecs;
 121
 122        unmap = &unmap_q[index];
 123        nvecs = unmap->nvecs;
 124
 125        skb = unmap->skb;
 126        unmap->skb = NULL;
 127        unmap->nvecs = 0;
 128        dma_unmap_single(&bnad->pcidev->dev,
 129                dma_unmap_addr(&unmap->vectors[0], dma_addr),
 130                skb_headlen(skb), DMA_TO_DEVICE);
 131        dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
 132        nvecs--;
 133
 134        vector = 0;
 135        while (nvecs) {
 136                vector++;
 137                if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
 138                        vector = 0;
 139                        BNA_QE_INDX_INC(index, q_depth);
 140                        unmap = &unmap_q[index];
 141                }
 142
 143                dma_unmap_page(&bnad->pcidev->dev,
 144                        dma_unmap_addr(&unmap->vectors[vector], dma_addr),
 145                        skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
 146                dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
 147                nvecs--;
 148        }
 149
 150        BNA_QE_INDX_INC(index, q_depth);
 151
 152        return index;
 153}
 154
 155/*
 156 * Frees all pending Tx Bufs
 157 * At this point no activity is expected on the Q,
 158 * so DMA unmap & freeing is fine.
 159 */
 160static void
 161bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
 162{
 163        struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
 164        struct sk_buff *skb;
 165        int i;
 166
 167        for (i = 0; i < tcb->q_depth; i++) {
 168                skb = unmap_q[i].skb;
 169                if (!skb)
 170                        continue;
 171                bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
 172
 173                dev_kfree_skb_any(skb);
 174        }
 175}
 176
 177/*
 178 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
 179 * Can be called in a) Interrupt context
 180 *                  b) Sending context
 181 */
 182static u32
 183bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
 184{
 185        u32 sent_packets = 0, sent_bytes = 0;
 186        u32 wis, unmap_wis, hw_cons, cons, q_depth;
 187        struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
 188        struct bnad_tx_unmap *unmap;
 189        struct sk_buff *skb;
 190
 191        /* Just return if TX is stopped */
 192        if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 193                return 0;
 194
 195        hw_cons = *(tcb->hw_consumer_index);
 196        cons = tcb->consumer_index;
 197        q_depth = tcb->q_depth;
 198
 199        wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
 200        BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
 201
 202        while (wis) {
 203                unmap = &unmap_q[cons];
 204
 205                skb = unmap->skb;
 206
 207                sent_packets++;
 208                sent_bytes += skb->len;
 209
 210                unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
 211                wis -= unmap_wis;
 212
 213                cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
 214                dev_kfree_skb_any(skb);
 215        }
 216
 217        /* Update consumer pointers. */
 218        tcb->consumer_index = hw_cons;
 219
 220        tcb->txq->tx_packets += sent_packets;
 221        tcb->txq->tx_bytes += sent_bytes;
 222
 223        return sent_packets;
 224}
 225
 226static u32
 227bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
 228{
 229        struct net_device *netdev = bnad->netdev;
 230        u32 sent = 0;
 231
 232        if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
 233                return 0;
 234
 235        sent = bnad_txcmpl_process(bnad, tcb);
 236        if (sent) {
 237                if (netif_queue_stopped(netdev) &&
 238                    netif_carrier_ok(netdev) &&
 239                    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
 240                                    BNAD_NETIF_WAKE_THRESHOLD) {
 241                        if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
 242                                netif_wake_queue(netdev);
 243                                BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
 244                        }
 245                }
 246        }
 247
 248        if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
 249                bna_ib_ack(tcb->i_dbell, sent);
 250
 251        smp_mb__before_clear_bit();
 252        clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 253
 254        return sent;
 255}
 256
 257/* MSIX Tx Completion Handler */
 258static irqreturn_t
 259bnad_msix_tx(int irq, void *data)
 260{
 261        struct bna_tcb *tcb = (struct bna_tcb *)data;
 262        struct bnad *bnad = tcb->bnad;
 263
 264        bnad_tx_complete(bnad, tcb);
 265
 266        return IRQ_HANDLED;
 267}
 268
 269static inline void
 270bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
 271{
 272        struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 273
 274        unmap_q->reuse_pi = -1;
 275        unmap_q->alloc_order = -1;
 276        unmap_q->map_size = 0;
 277        unmap_q->type = BNAD_RXBUF_NONE;
 278}
 279
 280/* Default is page-based allocation. Multi-buffer support - TBD */
 281static int
 282bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
 283{
 284        struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 285        int mtu, order;
 286
 287        bnad_rxq_alloc_uninit(bnad, rcb);
 288
 289        mtu = bna_enet_mtu_get(&bnad->bna.enet);
 290        order = get_order(mtu);
 291
 292        if (bna_is_small_rxq(rcb->id)) {
 293                unmap_q->alloc_order = 0;
 294                unmap_q->map_size = rcb->rxq->buffer_size;
 295        } else {
 296                unmap_q->alloc_order = order;
 297                unmap_q->map_size =
 298                        (rcb->rxq->buffer_size > 2048) ?
 299                        PAGE_SIZE << order : 2048;
 300        }
 301
 302        BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
 303
 304        unmap_q->type = BNAD_RXBUF_PAGE;
 305
 306        return 0;
 307}
 308
 309static inline void
 310bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
 311{
 312        if (!unmap->page)
 313                return;
 314
 315        dma_unmap_page(&bnad->pcidev->dev,
 316                        dma_unmap_addr(&unmap->vector, dma_addr),
 317                        unmap->vector.len, DMA_FROM_DEVICE);
 318        put_page(unmap->page);
 319        unmap->page = NULL;
 320        dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
 321        unmap->vector.len = 0;
 322}
 323
 324static inline void
 325bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
 326{
 327        if (!unmap->skb)
 328                return;
 329
 330        dma_unmap_single(&bnad->pcidev->dev,
 331                        dma_unmap_addr(&unmap->vector, dma_addr),
 332                        unmap->vector.len, DMA_FROM_DEVICE);
 333        dev_kfree_skb_any(unmap->skb);
 334        unmap->skb = NULL;
 335        dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
 336        unmap->vector.len = 0;
 337}
 338
 339static void
 340bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 341{
 342        struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 343        int i;
 344
 345        for (i = 0; i < rcb->q_depth; i++) {
 346                struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
 347
 348                if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
 349                        bnad_rxq_cleanup_page(bnad, unmap);
 350                else
 351                        bnad_rxq_cleanup_skb(bnad, unmap);
 352        }
 353        bnad_rxq_alloc_uninit(bnad, rcb);
 354}
 355
 356static u32
 357bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
 358{
 359        u32 alloced, prod, q_depth;
 360        struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 361        struct bnad_rx_unmap *unmap, *prev;
 362        struct bna_rxq_entry *rxent;
 363        struct page *page;
 364        u32 page_offset, alloc_size;
 365        dma_addr_t dma_addr;
 366
 367        prod = rcb->producer_index;
 368        q_depth = rcb->q_depth;
 369
 370        alloc_size = PAGE_SIZE << unmap_q->alloc_order;
 371        alloced = 0;
 372
 373        while (nalloc--) {
 374                unmap = &unmap_q->unmap[prod];
 375
 376                if (unmap_q->reuse_pi < 0) {
 377                        page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
 378                                        unmap_q->alloc_order);
 379                        page_offset = 0;
 380                } else {
 381                        prev = &unmap_q->unmap[unmap_q->reuse_pi];
 382                        page = prev->page;
 383                        page_offset = prev->page_offset + unmap_q->map_size;
 384                        get_page(page);
 385                }
 386
 387                if (unlikely(!page)) {
 388                        BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
 389                        rcb->rxq->rxbuf_alloc_failed++;
 390                        goto finishing;
 391                }
 392
 393                dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
 394                                unmap_q->map_size, DMA_FROM_DEVICE);
 395
 396                unmap->page = page;
 397                unmap->page_offset = page_offset;
 398                dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
 399                unmap->vector.len = unmap_q->map_size;
 400                page_offset += unmap_q->map_size;
 401
 402                if (page_offset < alloc_size)
 403                        unmap_q->reuse_pi = prod;
 404                else
 405                        unmap_q->reuse_pi = -1;
 406
 407                rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
 408                BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
 409                BNA_QE_INDX_INC(prod, q_depth);
 410                alloced++;
 411        }
 412
 413finishing:
 414        if (likely(alloced)) {
 415                rcb->producer_index = prod;
 416                smp_mb();
 417                if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
 418                        bna_rxq_prod_indx_doorbell(rcb);
 419        }
 420
 421        return alloced;
 422}
 423
 424static u32
 425bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
 426{
 427        u32 alloced, prod, q_depth, buff_sz;
 428        struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 429        struct bnad_rx_unmap *unmap;
 430        struct bna_rxq_entry *rxent;
 431        struct sk_buff *skb;
 432        dma_addr_t dma_addr;
 433
 434        buff_sz = rcb->rxq->buffer_size;
 435        prod = rcb->producer_index;
 436        q_depth = rcb->q_depth;
 437
 438        alloced = 0;
 439        while (nalloc--) {
 440                unmap = &unmap_q->unmap[prod];
 441
 442                skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
 443
 444                if (unlikely(!skb)) {
 445                        BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
 446                        rcb->rxq->rxbuf_alloc_failed++;
 447                        goto finishing;
 448                }
 449                dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
 450                                          buff_sz, DMA_FROM_DEVICE);
 451
 452                unmap->skb = skb;
 453                dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
 454                unmap->vector.len = buff_sz;
 455
 456                rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
 457                BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
 458                BNA_QE_INDX_INC(prod, q_depth);
 459                alloced++;
 460        }
 461
 462finishing:
 463        if (likely(alloced)) {
 464                rcb->producer_index = prod;
 465                smp_mb();
 466                if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
 467                        bna_rxq_prod_indx_doorbell(rcb);
 468        }
 469
 470        return alloced;
 471}
 472
 473static inline void
 474bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
 475{
 476        struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 477        u32 to_alloc;
 478
 479        to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
 480        if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
 481                return;
 482
 483        if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
 484                bnad_rxq_refill_page(bnad, rcb, to_alloc);
 485        else
 486                bnad_rxq_refill_skb(bnad, rcb, to_alloc);
 487}
 488
 489#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 490                                        BNA_CQ_EF_IPV6 | \
 491                                        BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
 492                                        BNA_CQ_EF_L4_CKSUM_OK)
 493
 494#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 495                                BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
 496#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
 497                                BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
 498#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 499                                BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
 500#define flags_udp6 (BNA_CQ_EF_IPV6 | \
 501                                BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
 502
 503static inline struct sk_buff *
 504bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
 505                struct bnad_rx_unmap_q *unmap_q,
 506                struct bnad_rx_unmap *unmap,
 507                u32 length, u32 flags)
 508{
 509        struct bnad *bnad = rx_ctrl->bnad;
 510        struct sk_buff *skb;
 511
 512        if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
 513                skb = napi_get_frags(&rx_ctrl->napi);
 514                if (unlikely(!skb))
 515                        return NULL;
 516
 517                dma_unmap_page(&bnad->pcidev->dev,
 518                                dma_unmap_addr(&unmap->vector, dma_addr),
 519                                unmap->vector.len, DMA_FROM_DEVICE);
 520                skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 521                                unmap->page, unmap->page_offset, length);
 522                skb->len += length;
 523                skb->data_len += length;
 524                skb->truesize += length;
 525
 526                unmap->page = NULL;
 527                unmap->vector.len = 0;
 528
 529                return skb;
 530        }
 531
 532        skb = unmap->skb;
 533        BUG_ON(!skb);
 534
 535        dma_unmap_single(&bnad->pcidev->dev,
 536                        dma_unmap_addr(&unmap->vector, dma_addr),
 537                        unmap->vector.len, DMA_FROM_DEVICE);
 538
 539        skb_put(skb, length);
 540
 541        skb->protocol = eth_type_trans(skb, bnad->netdev);
 542
 543        unmap->skb = NULL;
 544        unmap->vector.len = 0;
 545        return skb;
 546}
 547
 548static u32
 549bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 550{
 551        struct bna_cq_entry *cq, *cmpl;
 552        struct bna_rcb *rcb = NULL;
 553        struct bnad_rx_unmap_q *unmap_q;
 554        struct bnad_rx_unmap *unmap;
 555        struct sk_buff *skb;
 556        struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 557        struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
 558        u32 packets = 0, length = 0, flags, masked_flags;
 559
 560        prefetch(bnad->netdev);
 561
 562        cq = ccb->sw_q;
 563        cmpl = &cq[ccb->producer_index];
 564
 565        while (cmpl->valid && (packets < budget)) {
 566                packets++;
 567                flags = ntohl(cmpl->flags);
 568                length = ntohs(cmpl->length);
 569                BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
 570
 571                if (bna_is_small_rxq(cmpl->rxq_id))
 572                        rcb = ccb->rcb[1];
 573                else
 574                        rcb = ccb->rcb[0];
 575
 576                unmap_q = rcb->unmap_q;
 577                unmap = &unmap_q->unmap[rcb->consumer_index];
 578
 579                if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
 580                                        BNA_CQ_EF_FCS_ERROR |
 581                                        BNA_CQ_EF_TOO_LONG))) {
 582                        if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
 583                                bnad_rxq_cleanup_page(bnad, unmap);
 584                        else
 585                                bnad_rxq_cleanup_skb(bnad, unmap);
 586
 587                        rcb->rxq->rx_packets_with_error++;
 588                        goto next;
 589                }
 590
 591                skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
 592                                length, flags);
 593
 594                if (unlikely(!skb))
 595                        break;
 596
 597                masked_flags = flags & flags_cksum_prot_mask;
 598
 599                if (likely
 600                    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
 601                     ((masked_flags == flags_tcp4) ||
 602                      (masked_flags == flags_udp4) ||
 603                      (masked_flags == flags_tcp6) ||
 604                      (masked_flags == flags_udp6))))
 605                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 606                else
 607                        skb_checksum_none_assert(skb);
 608
 609                rcb->rxq->rx_packets++;
 610                rcb->rxq->rx_bytes += length;
 611
 612                if (flags & BNA_CQ_EF_VLAN)
 613                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
 614
 615                if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
 616                        napi_gro_frags(&rx_ctrl->napi);
 617                else
 618                        netif_receive_skb(skb);
 619
 620next:
 621                cmpl->valid = 0;
 622                BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
 623                BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
 624                cmpl = &cq[ccb->producer_index];
 625        }
 626
 627        napi_gro_flush(&rx_ctrl->napi, false);
 628        if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
 629                bna_ib_ack_disable_irq(ccb->i_dbell, packets);
 630
 631        bnad_rxq_post(bnad, ccb->rcb[0]);
 632        if (ccb->rcb[1])
 633                bnad_rxq_post(bnad, ccb->rcb[1]);
 634
 635        return packets;
 636}
 637
 638static void
 639bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
 640{
 641        struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
 642        struct napi_struct *napi = &rx_ctrl->napi;
 643
 644        if (likely(napi_schedule_prep(napi))) {
 645                __napi_schedule(napi);
 646                rx_ctrl->rx_schedule++;
 647        }
 648}
 649
 650/* MSIX Rx Path Handler */
 651static irqreturn_t
 652bnad_msix_rx(int irq, void *data)
 653{
 654        struct bna_ccb *ccb = (struct bna_ccb *)data;
 655
 656        if (ccb) {
 657                ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
 658                bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
 659        }
 660
 661        return IRQ_HANDLED;
 662}
 663
 664/* Interrupt handlers */
 665
 666/* Mbox Interrupt Handlers */
 667static irqreturn_t
 668bnad_msix_mbox_handler(int irq, void *data)
 669{
 670        u32 intr_status;
 671        unsigned long flags;
 672        struct bnad *bnad = (struct bnad *)data;
 673
 674        spin_lock_irqsave(&bnad->bna_lock, flags);
 675        if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
 676                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 677                return IRQ_HANDLED;
 678        }
 679
 680        bna_intr_status_get(&bnad->bna, intr_status);
 681
 682        if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
 683                bna_mbox_handler(&bnad->bna, intr_status);
 684
 685        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 686
 687        return IRQ_HANDLED;
 688}
 689
 690static irqreturn_t
 691bnad_isr(int irq, void *data)
 692{
 693        int i, j;
 694        u32 intr_status;
 695        unsigned long flags;
 696        struct bnad *bnad = (struct bnad *)data;
 697        struct bnad_rx_info *rx_info;
 698        struct bnad_rx_ctrl *rx_ctrl;
 699        struct bna_tcb *tcb = NULL;
 700
 701        spin_lock_irqsave(&bnad->bna_lock, flags);
 702        if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
 703                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 704                return IRQ_NONE;
 705        }
 706
 707        bna_intr_status_get(&bnad->bna, intr_status);
 708
 709        if (unlikely(!intr_status)) {
 710                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 711                return IRQ_NONE;
 712        }
 713
 714        if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
 715                bna_mbox_handler(&bnad->bna, intr_status);
 716
 717        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 718
 719        if (!BNA_IS_INTX_DATA_INTR(intr_status))
 720                return IRQ_HANDLED;
 721
 722        /* Process data interrupts */
 723        /* Tx processing */
 724        for (i = 0; i < bnad->num_tx; i++) {
 725                for (j = 0; j < bnad->num_txq_per_tx; j++) {
 726                        tcb = bnad->tx_info[i].tcb[j];
 727                        if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 728                                bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
 729                }
 730        }
 731        /* Rx processing */
 732        for (i = 0; i < bnad->num_rx; i++) {
 733                rx_info = &bnad->rx_info[i];
 734                if (!rx_info->rx)
 735                        continue;
 736                for (j = 0; j < bnad->num_rxp_per_rx; j++) {
 737                        rx_ctrl = &rx_info->rx_ctrl[j];
 738                        if (rx_ctrl->ccb)
 739                                bnad_netif_rx_schedule_poll(bnad,
 740                                                            rx_ctrl->ccb);
 741                }
 742        }
 743        return IRQ_HANDLED;
 744}
 745
 746/*
 747 * Called in interrupt / callback context
 748 * with bna_lock held, so cfg_flags access is OK
 749 */
 750static void
 751bnad_enable_mbox_irq(struct bnad *bnad)
 752{
 753        clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
 754
 755        BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
 756}
 757
 758/*
 759 * Called with bnad->bna_lock held b'cos of
 760 * bnad->cfg_flags access.
 761 */
 762static void
 763bnad_disable_mbox_irq(struct bnad *bnad)
 764{
 765        set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
 766
 767        BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
 768}
 769
 770static void
 771bnad_set_netdev_perm_addr(struct bnad *bnad)
 772{
 773        struct net_device *netdev = bnad->netdev;
 774
 775        memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
 776        if (is_zero_ether_addr(netdev->dev_addr))
 777                memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
 778}
 779
 780/* Control Path Handlers */
 781
 782/* Callbacks */
 783void
 784bnad_cb_mbox_intr_enable(struct bnad *bnad)
 785{
 786        bnad_enable_mbox_irq(bnad);
 787}
 788
 789void
 790bnad_cb_mbox_intr_disable(struct bnad *bnad)
 791{
 792        bnad_disable_mbox_irq(bnad);
 793}
 794
 795void
 796bnad_cb_ioceth_ready(struct bnad *bnad)
 797{
 798        bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
 799        complete(&bnad->bnad_completions.ioc_comp);
 800}
 801
 802void
 803bnad_cb_ioceth_failed(struct bnad *bnad)
 804{
 805        bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
 806        complete(&bnad->bnad_completions.ioc_comp);
 807}
 808
 809void
 810bnad_cb_ioceth_disabled(struct bnad *bnad)
 811{
 812        bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
 813        complete(&bnad->bnad_completions.ioc_comp);
 814}
 815
 816static void
 817bnad_cb_enet_disabled(void *arg)
 818{
 819        struct bnad *bnad = (struct bnad *)arg;
 820
 821        netif_carrier_off(bnad->netdev);
 822        complete(&bnad->bnad_completions.enet_comp);
 823}
 824
 825void
 826bnad_cb_ethport_link_status(struct bnad *bnad,
 827                        enum bna_link_status link_status)
 828{
 829        bool link_up = false;
 830
 831        link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
 832
 833        if (link_status == BNA_CEE_UP) {
 834                if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
 835                        BNAD_UPDATE_CTR(bnad, cee_toggle);
 836                set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
 837        } else {
 838                if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
 839                        BNAD_UPDATE_CTR(bnad, cee_toggle);
 840                clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
 841        }
 842
 843        if (link_up) {
 844                if (!netif_carrier_ok(bnad->netdev)) {
 845                        uint tx_id, tcb_id;
 846                        printk(KERN_WARNING "bna: %s link up\n",
 847                                bnad->netdev->name);
 848                        netif_carrier_on(bnad->netdev);
 849                        BNAD_UPDATE_CTR(bnad, link_toggle);
 850                        for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
 851                                for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
 852                                      tcb_id++) {
 853                                        struct bna_tcb *tcb =
 854                                        bnad->tx_info[tx_id].tcb[tcb_id];
 855                                        u32 txq_id;
 856                                        if (!tcb)
 857                                                continue;
 858
 859                                        txq_id = tcb->id;
 860
 861                                        if (test_bit(BNAD_TXQ_TX_STARTED,
 862                                                     &tcb->flags)) {
 863                                                /*
 864                                                 * Force an immediate
 865                                                 * Transmit Schedule */
 866                                                printk(KERN_INFO "bna: %s %d "
 867                                                      "TXQ_STARTED\n",
 868                                                       bnad->netdev->name,
 869                                                       txq_id);
 870                                                netif_wake_subqueue(
 871                                                                bnad->netdev,
 872                                                                txq_id);
 873                                                BNAD_UPDATE_CTR(bnad,
 874                                                        netif_queue_wakeup);
 875                                        } else {
 876                                                netif_stop_subqueue(
 877                                                                bnad->netdev,
 878                                                                txq_id);
 879                                                BNAD_UPDATE_CTR(bnad,
 880                                                        netif_queue_stop);
 881                                        }
 882                                }
 883                        }
 884                }
 885        } else {
 886                if (netif_carrier_ok(bnad->netdev)) {
 887                        printk(KERN_WARNING "bna: %s link down\n",
 888                                bnad->netdev->name);
 889                        netif_carrier_off(bnad->netdev);
 890                        BNAD_UPDATE_CTR(bnad, link_toggle);
 891                }
 892        }
 893}
 894
 895static void
 896bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
 897{
 898        struct bnad *bnad = (struct bnad *)arg;
 899
 900        complete(&bnad->bnad_completions.tx_comp);
 901}
 902
 903static void
 904bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
 905{
 906        struct bnad_tx_info *tx_info =
 907                        (struct bnad_tx_info *)tcb->txq->tx->priv;
 908
 909        tcb->priv = tcb;
 910        tx_info->tcb[tcb->id] = tcb;
 911}
 912
 913static void
 914bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
 915{
 916        struct bnad_tx_info *tx_info =
 917                        (struct bnad_tx_info *)tcb->txq->tx->priv;
 918
 919        tx_info->tcb[tcb->id] = NULL;
 920        tcb->priv = NULL;
 921}
 922
 923static void
 924bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
 925{
 926        struct bnad_rx_info *rx_info =
 927                        (struct bnad_rx_info *)ccb->cq->rx->priv;
 928
 929        rx_info->rx_ctrl[ccb->id].ccb = ccb;
 930        ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
 931}
 932
 933static void
 934bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
 935{
 936        struct bnad_rx_info *rx_info =
 937                        (struct bnad_rx_info *)ccb->cq->rx->priv;
 938
 939        rx_info->rx_ctrl[ccb->id].ccb = NULL;
 940}
 941
 942static void
 943bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
 944{
 945        struct bnad_tx_info *tx_info =
 946                        (struct bnad_tx_info *)tx->priv;
 947        struct bna_tcb *tcb;
 948        u32 txq_id;
 949        int i;
 950
 951        for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
 952                tcb = tx_info->tcb[i];
 953                if (!tcb)
 954                        continue;
 955                txq_id = tcb->id;
 956                clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
 957                netif_stop_subqueue(bnad->netdev, txq_id);
 958                printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
 959                        bnad->netdev->name, txq_id);
 960        }
 961}
 962
 963static void
 964bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
 965{
 966        struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
 967        struct bna_tcb *tcb;
 968        u32 txq_id;
 969        int i;
 970
 971        for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
 972                tcb = tx_info->tcb[i];
 973                if (!tcb)
 974                        continue;
 975                txq_id = tcb->id;
 976
 977                BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
 978                set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
 979                BUG_ON(*(tcb->hw_consumer_index) != 0);
 980
 981                if (netif_carrier_ok(bnad->netdev)) {
 982                        printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
 983                                bnad->netdev->name, txq_id);
 984                        netif_wake_subqueue(bnad->netdev, txq_id);
 985                        BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
 986                }
 987        }
 988
 989        /*
 990         * Workaround for first ioceth enable failure & we
 991         * get a 0 MAC address. We try to get the MAC address
 992         * again here.
 993         */
 994        if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
 995                bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
 996                bnad_set_netdev_perm_addr(bnad);
 997        }
 998}
 999
1000/*
1001 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1002 */
1003static void
1004bnad_tx_cleanup(struct delayed_work *work)
1005{
1006        struct bnad_tx_info *tx_info =
1007                container_of(work, struct bnad_tx_info, tx_cleanup_work);
1008        struct bnad *bnad = NULL;
1009        struct bna_tcb *tcb;
1010        unsigned long flags;
1011        u32 i, pending = 0;
1012
1013        for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1014                tcb = tx_info->tcb[i];
1015                if (!tcb)
1016                        continue;
1017
1018                bnad = tcb->bnad;
1019
1020                if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1021                        pending++;
1022                        continue;
1023                }
1024
1025                bnad_txq_cleanup(bnad, tcb);
1026
1027                smp_mb__before_clear_bit();
1028                clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1029        }
1030
1031        if (pending) {
1032                queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1033                        msecs_to_jiffies(1));
1034                return;
1035        }
1036
1037        spin_lock_irqsave(&bnad->bna_lock, flags);
1038        bna_tx_cleanup_complete(tx_info->tx);
1039        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1040}
1041
1042static void
1043bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1044{
1045        struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1046        struct bna_tcb *tcb;
1047        int i;
1048
1049        for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1050                tcb = tx_info->tcb[i];
1051                if (!tcb)
1052                        continue;
1053        }
1054
1055        queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1056}
1057
1058static void
1059bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1060{
1061        struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1062        struct bna_ccb *ccb;
1063        struct bnad_rx_ctrl *rx_ctrl;
1064        int i;
1065
1066        for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1067                rx_ctrl = &rx_info->rx_ctrl[i];
1068                ccb = rx_ctrl->ccb;
1069                if (!ccb)
1070                        continue;
1071
1072                clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1073
1074                if (ccb->rcb[1])
1075                        clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1076        }
1077}
1078
1079/*
1080 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1081 */
1082static void
1083bnad_rx_cleanup(void *work)
1084{
1085        struct bnad_rx_info *rx_info =
1086                container_of(work, struct bnad_rx_info, rx_cleanup_work);
1087        struct bnad_rx_ctrl *rx_ctrl;
1088        struct bnad *bnad = NULL;
1089        unsigned long flags;
1090        u32 i;
1091
1092        for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1093                rx_ctrl = &rx_info->rx_ctrl[i];
1094
1095                if (!rx_ctrl->ccb)
1096                        continue;
1097
1098                bnad = rx_ctrl->ccb->bnad;
1099
1100                /*
1101                 * Wait till the poll handler has exited
1102                 * and nothing can be scheduled anymore
1103                 */
1104                napi_disable(&rx_ctrl->napi);
1105
1106                bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1107                bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1108                if (rx_ctrl->ccb->rcb[1])
1109                        bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1110        }
1111
1112        spin_lock_irqsave(&bnad->bna_lock, flags);
1113        bna_rx_cleanup_complete(rx_info->rx);
1114        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1115}
1116
1117static void
1118bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1119{
1120        struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1121        struct bna_ccb *ccb;
1122        struct bnad_rx_ctrl *rx_ctrl;
1123        int i;
1124
1125        for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1126                rx_ctrl = &rx_info->rx_ctrl[i];
1127                ccb = rx_ctrl->ccb;
1128                if (!ccb)
1129                        continue;
1130
1131                clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1132
1133                if (ccb->rcb[1])
1134                        clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1135        }
1136
1137        queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1138}
1139
1140static void
1141bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1142{
1143        struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1144        struct bna_ccb *ccb;
1145        struct bna_rcb *rcb;
1146        struct bnad_rx_ctrl *rx_ctrl;
1147        int i, j;
1148
1149        for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1150                rx_ctrl = &rx_info->rx_ctrl[i];
1151                ccb = rx_ctrl->ccb;
1152                if (!ccb)
1153                        continue;
1154
1155                napi_enable(&rx_ctrl->napi);
1156
1157                for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1158                        rcb = ccb->rcb[j];
1159                        if (!rcb)
1160                                continue;
1161
1162                        bnad_rxq_alloc_init(bnad, rcb);
1163                        set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1164                        set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1165                        bnad_rxq_post(bnad, rcb);
1166                }
1167        }
1168}
1169
1170static void
1171bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1172{
1173        struct bnad *bnad = (struct bnad *)arg;
1174
1175        complete(&bnad->bnad_completions.rx_comp);
1176}
1177
1178static void
1179bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1180{
1181        bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1182        complete(&bnad->bnad_completions.mcast_comp);
1183}
1184
1185void
1186bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1187                       struct bna_stats *stats)
1188{
1189        if (status == BNA_CB_SUCCESS)
1190                BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1191
1192        if (!netif_running(bnad->netdev) ||
1193                !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1194                return;
1195
1196        mod_timer(&bnad->stats_timer,
1197                  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1198}
1199
1200static void
1201bnad_cb_enet_mtu_set(struct bnad *bnad)
1202{
1203        bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1204        complete(&bnad->bnad_completions.mtu_comp);
1205}
1206
1207void
1208bnad_cb_completion(void *arg, enum bfa_status status)
1209{
1210        struct bnad_iocmd_comp *iocmd_comp =
1211                        (struct bnad_iocmd_comp *)arg;
1212
1213        iocmd_comp->comp_status = (u32) status;
1214        complete(&iocmd_comp->comp);
1215}
1216
1217/* Resource allocation, free functions */
1218
1219static void
1220bnad_mem_free(struct bnad *bnad,
1221              struct bna_mem_info *mem_info)
1222{
1223        int i;
1224        dma_addr_t dma_pa;
1225
1226        if (mem_info->mdl == NULL)
1227                return;
1228
1229        for (i = 0; i < mem_info->num; i++) {
1230                if (mem_info->mdl[i].kva != NULL) {
1231                        if (mem_info->mem_type == BNA_MEM_T_DMA) {
1232                                BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1233                                                dma_pa);
1234                                dma_free_coherent(&bnad->pcidev->dev,
1235                                                  mem_info->mdl[i].len,
1236                                                  mem_info->mdl[i].kva, dma_pa);
1237                        } else
1238                                kfree(mem_info->mdl[i].kva);
1239                }
1240        }
1241        kfree(mem_info->mdl);
1242        mem_info->mdl = NULL;
1243}
1244
1245static int
1246bnad_mem_alloc(struct bnad *bnad,
1247               struct bna_mem_info *mem_info)
1248{
1249        int i;
1250        dma_addr_t dma_pa;
1251
1252        if ((mem_info->num == 0) || (mem_info->len == 0)) {
1253                mem_info->mdl = NULL;
1254                return 0;
1255        }
1256
1257        mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1258                                GFP_KERNEL);
1259        if (mem_info->mdl == NULL)
1260                return -ENOMEM;
1261
1262        if (mem_info->mem_type == BNA_MEM_T_DMA) {
1263                for (i = 0; i < mem_info->num; i++) {
1264                        mem_info->mdl[i].len = mem_info->len;
1265                        mem_info->mdl[i].kva =
1266                                dma_alloc_coherent(&bnad->pcidev->dev,
1267                                                   mem_info->len, &dma_pa,
1268                                                   GFP_KERNEL);
1269                        if (mem_info->mdl[i].kva == NULL)
1270                                goto err_return;
1271
1272                        BNA_SET_DMA_ADDR(dma_pa,
1273                                         &(mem_info->mdl[i].dma));
1274                }
1275        } else {
1276                for (i = 0; i < mem_info->num; i++) {
1277                        mem_info->mdl[i].len = mem_info->len;
1278                        mem_info->mdl[i].kva = kzalloc(mem_info->len,
1279                                                        GFP_KERNEL);
1280                        if (mem_info->mdl[i].kva == NULL)
1281                                goto err_return;
1282                }
1283        }
1284
1285        return 0;
1286
1287err_return:
1288        bnad_mem_free(bnad, mem_info);
1289        return -ENOMEM;
1290}
1291
1292/* Free IRQ for Mailbox */
1293static void
1294bnad_mbox_irq_free(struct bnad *bnad)
1295{
1296        int irq;
1297        unsigned long flags;
1298
1299        spin_lock_irqsave(&bnad->bna_lock, flags);
1300        bnad_disable_mbox_irq(bnad);
1301        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1302
1303        irq = BNAD_GET_MBOX_IRQ(bnad);
1304        free_irq(irq, bnad);
1305}
1306
1307/*
1308 * Allocates IRQ for Mailbox, but keep it disabled
1309 * This will be enabled once we get the mbox enable callback
1310 * from bna
1311 */
1312static int
1313bnad_mbox_irq_alloc(struct bnad *bnad)
1314{
1315        int             err = 0;
1316        unsigned long   irq_flags, flags;
1317        u32     irq;
1318        irq_handler_t   irq_handler;
1319
1320        spin_lock_irqsave(&bnad->bna_lock, flags);
1321        if (bnad->cfg_flags & BNAD_CF_MSIX) {
1322                irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1323                irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1324                irq_flags = 0;
1325        } else {
1326                irq_handler = (irq_handler_t)bnad_isr;
1327                irq = bnad->pcidev->irq;
1328                irq_flags = IRQF_SHARED;
1329        }
1330
1331        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1332        sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1333
1334        /*
1335         * Set the Mbox IRQ disable flag, so that the IRQ handler
1336         * called from request_irq() for SHARED IRQs do not execute
1337         */
1338        set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1339
1340        BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1341
1342        err = request_irq(irq, irq_handler, irq_flags,
1343                          bnad->mbox_irq_name, bnad);
1344
1345        return err;
1346}
1347
1348static void
1349bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1350{
1351        kfree(intr_info->idl);
1352        intr_info->idl = NULL;
1353}
1354
1355/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1356static int
1357bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1358                    u32 txrx_id, struct bna_intr_info *intr_info)
1359{
1360        int i, vector_start = 0;
1361        u32 cfg_flags;
1362        unsigned long flags;
1363
1364        spin_lock_irqsave(&bnad->bna_lock, flags);
1365        cfg_flags = bnad->cfg_flags;
1366        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1367
1368        if (cfg_flags & BNAD_CF_MSIX) {
1369                intr_info->intr_type = BNA_INTR_T_MSIX;
1370                intr_info->idl = kcalloc(intr_info->num,
1371                                        sizeof(struct bna_intr_descr),
1372                                        GFP_KERNEL);
1373                if (!intr_info->idl)
1374                        return -ENOMEM;
1375
1376                switch (src) {
1377                case BNAD_INTR_TX:
1378                        vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1379                        break;
1380
1381                case BNAD_INTR_RX:
1382                        vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1383                                        (bnad->num_tx * bnad->num_txq_per_tx) +
1384                                        txrx_id;
1385                        break;
1386
1387                default:
1388                        BUG();
1389                }
1390
1391                for (i = 0; i < intr_info->num; i++)
1392                        intr_info->idl[i].vector = vector_start + i;
1393        } else {
1394                intr_info->intr_type = BNA_INTR_T_INTX;
1395                intr_info->num = 1;
1396                intr_info->idl = kcalloc(intr_info->num,
1397                                        sizeof(struct bna_intr_descr),
1398                                        GFP_KERNEL);
1399                if (!intr_info->idl)
1400                        return -ENOMEM;
1401
1402                switch (src) {
1403                case BNAD_INTR_TX:
1404                        intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1405                        break;
1406
1407                case BNAD_INTR_RX:
1408                        intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1409                        break;
1410                }
1411        }
1412        return 0;
1413}
1414
1415/* NOTE: Should be called for MSIX only
1416 * Unregisters Tx MSIX vector(s) from the kernel
1417 */
1418static void
1419bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1420                        int num_txqs)
1421{
1422        int i;
1423        int vector_num;
1424
1425        for (i = 0; i < num_txqs; i++) {
1426                if (tx_info->tcb[i] == NULL)
1427                        continue;
1428
1429                vector_num = tx_info->tcb[i]->intr_vector;
1430                free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1431        }
1432}
1433
1434/* NOTE: Should be called for MSIX only
1435 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1436 */
1437static int
1438bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1439                        u32 tx_id, int num_txqs)
1440{
1441        int i;
1442        int err;
1443        int vector_num;
1444
1445        for (i = 0; i < num_txqs; i++) {
1446                vector_num = tx_info->tcb[i]->intr_vector;
1447                sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1448                                tx_id + tx_info->tcb[i]->id);
1449                err = request_irq(bnad->msix_table[vector_num].vector,
1450                                  (irq_handler_t)bnad_msix_tx, 0,
1451                                  tx_info->tcb[i]->name,
1452                                  tx_info->tcb[i]);
1453                if (err)
1454                        goto err_return;
1455        }
1456
1457        return 0;
1458
1459err_return:
1460        if (i > 0)
1461                bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1462        return -1;
1463}
1464
1465/* NOTE: Should be called for MSIX only
1466 * Unregisters Rx MSIX vector(s) from the kernel
1467 */
1468static void
1469bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1470                        int num_rxps)
1471{
1472        int i;
1473        int vector_num;
1474
1475        for (i = 0; i < num_rxps; i++) {
1476                if (rx_info->rx_ctrl[i].ccb == NULL)
1477                        continue;
1478
1479                vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1480                free_irq(bnad->msix_table[vector_num].vector,
1481                         rx_info->rx_ctrl[i].ccb);
1482        }
1483}
1484
1485/* NOTE: Should be called for MSIX only
1486 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1487 */
1488static int
1489bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1490                        u32 rx_id, int num_rxps)
1491{
1492        int i;
1493        int err;
1494        int vector_num;
1495
1496        for (i = 0; i < num_rxps; i++) {
1497                vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1498                sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1499                        bnad->netdev->name,
1500                        rx_id + rx_info->rx_ctrl[i].ccb->id);
1501                err = request_irq(bnad->msix_table[vector_num].vector,
1502                                  (irq_handler_t)bnad_msix_rx, 0,
1503                                  rx_info->rx_ctrl[i].ccb->name,
1504                                  rx_info->rx_ctrl[i].ccb);
1505                if (err)
1506                        goto err_return;
1507        }
1508
1509        return 0;
1510
1511err_return:
1512        if (i > 0)
1513                bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1514        return -1;
1515}
1516
1517/* Free Tx object Resources */
1518static void
1519bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1520{
1521        int i;
1522
1523        for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1524                if (res_info[i].res_type == BNA_RES_T_MEM)
1525                        bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1526                else if (res_info[i].res_type == BNA_RES_T_INTR)
1527                        bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1528        }
1529}
1530
1531/* Allocates memory and interrupt resources for Tx object */
1532static int
1533bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1534                  u32 tx_id)
1535{
1536        int i, err = 0;
1537
1538        for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1539                if (res_info[i].res_type == BNA_RES_T_MEM)
1540                        err = bnad_mem_alloc(bnad,
1541                                        &res_info[i].res_u.mem_info);
1542                else if (res_info[i].res_type == BNA_RES_T_INTR)
1543                        err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1544                                        &res_info[i].res_u.intr_info);
1545                if (err)
1546                        goto err_return;
1547        }
1548        return 0;
1549
1550err_return:
1551        bnad_tx_res_free(bnad, res_info);
1552        return err;
1553}
1554
1555/* Free Rx object Resources */
1556static void
1557bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1558{
1559        int i;
1560
1561        for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1562                if (res_info[i].res_type == BNA_RES_T_MEM)
1563                        bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1564                else if (res_info[i].res_type == BNA_RES_T_INTR)
1565                        bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1566        }
1567}
1568
1569/* Allocates memory and interrupt resources for Rx object */
1570static int
1571bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1572                  uint rx_id)
1573{
1574        int i, err = 0;
1575
1576        /* All memory needs to be allocated before setup_ccbs */
1577        for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1578                if (res_info[i].res_type == BNA_RES_T_MEM)
1579                        err = bnad_mem_alloc(bnad,
1580                                        &res_info[i].res_u.mem_info);
1581                else if (res_info[i].res_type == BNA_RES_T_INTR)
1582                        err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1583                                        &res_info[i].res_u.intr_info);
1584                if (err)
1585                        goto err_return;
1586        }
1587        return 0;
1588
1589err_return:
1590        bnad_rx_res_free(bnad, res_info);
1591        return err;
1592}
1593
1594/* Timer callbacks */
1595/* a) IOC timer */
1596static void
1597bnad_ioc_timeout(unsigned long data)
1598{
1599        struct bnad *bnad = (struct bnad *)data;
1600        unsigned long flags;
1601
1602        spin_lock_irqsave(&bnad->bna_lock, flags);
1603        bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1604        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1605}
1606
1607static void
1608bnad_ioc_hb_check(unsigned long data)
1609{
1610        struct bnad *bnad = (struct bnad *)data;
1611        unsigned long flags;
1612
1613        spin_lock_irqsave(&bnad->bna_lock, flags);
1614        bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1615        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1616}
1617
1618static void
1619bnad_iocpf_timeout(unsigned long data)
1620{
1621        struct bnad *bnad = (struct bnad *)data;
1622        unsigned long flags;
1623
1624        spin_lock_irqsave(&bnad->bna_lock, flags);
1625        bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1626        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1627}
1628
1629static void
1630bnad_iocpf_sem_timeout(unsigned long data)
1631{
1632        struct bnad *bnad = (struct bnad *)data;
1633        unsigned long flags;
1634
1635        spin_lock_irqsave(&bnad->bna_lock, flags);
1636        bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1637        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1638}
1639
1640/*
1641 * All timer routines use bnad->bna_lock to protect against
1642 * the following race, which may occur in case of no locking:
1643 *      Time    CPU m   CPU n
1644 *      0       1 = test_bit
1645 *      1                       clear_bit
1646 *      2                       del_timer_sync
1647 *      3       mod_timer
1648 */
1649
1650/* b) Dynamic Interrupt Moderation Timer */
1651static void
1652bnad_dim_timeout(unsigned long data)
1653{
1654        struct bnad *bnad = (struct bnad *)data;
1655        struct bnad_rx_info *rx_info;
1656        struct bnad_rx_ctrl *rx_ctrl;
1657        int i, j;
1658        unsigned long flags;
1659
1660        if (!netif_carrier_ok(bnad->netdev))
1661                return;
1662
1663        spin_lock_irqsave(&bnad->bna_lock, flags);
1664        for (i = 0; i < bnad->num_rx; i++) {
1665                rx_info = &bnad->rx_info[i];
1666                if (!rx_info->rx)
1667                        continue;
1668                for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1669                        rx_ctrl = &rx_info->rx_ctrl[j];
1670                        if (!rx_ctrl->ccb)
1671                                continue;
1672                        bna_rx_dim_update(rx_ctrl->ccb);
1673                }
1674        }
1675
1676        /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1677        if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1678                mod_timer(&bnad->dim_timer,
1679                          jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1680        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1681}
1682
1683/* c)  Statistics Timer */
1684static void
1685bnad_stats_timeout(unsigned long data)
1686{
1687        struct bnad *bnad = (struct bnad *)data;
1688        unsigned long flags;
1689
1690        if (!netif_running(bnad->netdev) ||
1691                !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1692                return;
1693
1694        spin_lock_irqsave(&bnad->bna_lock, flags);
1695        bna_hw_stats_get(&bnad->bna);
1696        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1697}
1698
1699/*
1700 * Set up timer for DIM
1701 * Called with bnad->bna_lock held
1702 */
1703void
1704bnad_dim_timer_start(struct bnad *bnad)
1705{
1706        if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1707            !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1708                setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1709                            (unsigned long)bnad);
1710                set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1711                mod_timer(&bnad->dim_timer,
1712                          jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1713        }
1714}
1715
1716/*
1717 * Set up timer for statistics
1718 * Called with mutex_lock(&bnad->conf_mutex) held
1719 */
1720static void
1721bnad_stats_timer_start(struct bnad *bnad)
1722{
1723        unsigned long flags;
1724
1725        spin_lock_irqsave(&bnad->bna_lock, flags);
1726        if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1727                setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1728                            (unsigned long)bnad);
1729                mod_timer(&bnad->stats_timer,
1730                          jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1731        }
1732        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1733}
1734
1735/*
1736 * Stops the stats timer
1737 * Called with mutex_lock(&bnad->conf_mutex) held
1738 */
1739static void
1740bnad_stats_timer_stop(struct bnad *bnad)
1741{
1742        int to_del = 0;
1743        unsigned long flags;
1744
1745        spin_lock_irqsave(&bnad->bna_lock, flags);
1746        if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1747                to_del = 1;
1748        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1749        if (to_del)
1750                del_timer_sync(&bnad->stats_timer);
1751}
1752
1753/* Utilities */
1754
1755static void
1756bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1757{
1758        int i = 1; /* Index 0 has broadcast address */
1759        struct netdev_hw_addr *mc_addr;
1760
1761        netdev_for_each_mc_addr(mc_addr, netdev) {
1762                memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1763                                                        ETH_ALEN);
1764                i++;
1765        }
1766}
1767
1768static int
1769bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1770{
1771        struct bnad_rx_ctrl *rx_ctrl =
1772                container_of(napi, struct bnad_rx_ctrl, napi);
1773        struct bnad *bnad = rx_ctrl->bnad;
1774        int rcvd = 0;
1775
1776        rx_ctrl->rx_poll_ctr++;
1777
1778        if (!netif_carrier_ok(bnad->netdev))
1779                goto poll_exit;
1780
1781        rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1782        if (rcvd >= budget)
1783                return rcvd;
1784
1785poll_exit:
1786        napi_complete(napi);
1787
1788        rx_ctrl->rx_complete++;
1789
1790        if (rx_ctrl->ccb)
1791                bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1792
1793        return rcvd;
1794}
1795
1796#define BNAD_NAPI_POLL_QUOTA            64
1797static void
1798bnad_napi_add(struct bnad *bnad, u32 rx_id)
1799{
1800        struct bnad_rx_ctrl *rx_ctrl;
1801        int i;
1802
1803        /* Initialize & enable NAPI */
1804        for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1805                rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1806                netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1807                               bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1808        }
1809}
1810
1811static void
1812bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1813{
1814        int i;
1815
1816        /* First disable and then clean up */
1817        for (i = 0; i < bnad->num_rxp_per_rx; i++)
1818                netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1819}
1820
1821/* Should be held with conf_lock held */
1822void
1823bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1824{
1825        struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1826        struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1827        unsigned long flags;
1828
1829        if (!tx_info->tx)
1830                return;
1831
1832        init_completion(&bnad->bnad_completions.tx_comp);
1833        spin_lock_irqsave(&bnad->bna_lock, flags);
1834        bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1835        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1836        wait_for_completion(&bnad->bnad_completions.tx_comp);
1837
1838        if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1839                bnad_tx_msix_unregister(bnad, tx_info,
1840                        bnad->num_txq_per_tx);
1841
1842        spin_lock_irqsave(&bnad->bna_lock, flags);
1843        bna_tx_destroy(tx_info->tx);
1844        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1845
1846        tx_info->tx = NULL;
1847        tx_info->tx_id = 0;
1848
1849        bnad_tx_res_free(bnad, res_info);
1850}
1851
1852/* Should be held with conf_lock held */
1853int
1854bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1855{
1856        int err;
1857        struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1858        struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1859        struct bna_intr_info *intr_info =
1860                        &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1861        struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1862        static const struct bna_tx_event_cbfn tx_cbfn = {
1863                .tcb_setup_cbfn = bnad_cb_tcb_setup,
1864                .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1865                .tx_stall_cbfn = bnad_cb_tx_stall,
1866                .tx_resume_cbfn = bnad_cb_tx_resume,
1867                .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1868        };
1869
1870        struct bna_tx *tx;
1871        unsigned long flags;
1872
1873        tx_info->tx_id = tx_id;
1874
1875        /* Initialize the Tx object configuration */
1876        tx_config->num_txq = bnad->num_txq_per_tx;
1877        tx_config->txq_depth = bnad->txq_depth;
1878        tx_config->tx_type = BNA_TX_T_REGULAR;
1879        tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1880
1881        /* Get BNA's resource requirement for one tx object */
1882        spin_lock_irqsave(&bnad->bna_lock, flags);
1883        bna_tx_res_req(bnad->num_txq_per_tx,
1884                bnad->txq_depth, res_info);
1885        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1886
1887        /* Fill Unmap Q memory requirements */
1888        BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1889                        bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1890                        bnad->txq_depth));
1891
1892        /* Allocate resources */
1893        err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1894        if (err)
1895                return err;
1896
1897        /* Ask BNA to create one Tx object, supplying required resources */
1898        spin_lock_irqsave(&bnad->bna_lock, flags);
1899        tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1900                        tx_info);
1901        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1902        if (!tx)
1903                goto err_return;
1904        tx_info->tx = tx;
1905
1906        INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1907                        (work_func_t)bnad_tx_cleanup);
1908
1909        /* Register ISR for the Tx object */
1910        if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1911                err = bnad_tx_msix_register(bnad, tx_info,
1912                        tx_id, bnad->num_txq_per_tx);
1913                if (err)
1914                        goto err_return;
1915        }
1916
1917        spin_lock_irqsave(&bnad->bna_lock, flags);
1918        bna_tx_enable(tx);
1919        spin_unlock_irqrestore(&bnad->bna_lock, flags);
1920
1921        return 0;
1922
1923err_return:
1924        bnad_tx_res_free(bnad, res_info);
1925        return err;
1926}
1927
1928/* Setup the rx config for bna_rx_create */
1929/* bnad decides the configuration */
1930static void
1931bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1932{
1933        rx_config->rx_type = BNA_RX_T_REGULAR;
1934        rx_config->num_paths = bnad->num_rxp_per_rx;
1935        rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1936
1937        if (bnad->num_rxp_per_rx > 1) {
1938                rx_config->rss_status = BNA_STATUS_T_ENABLED;
1939                rx_config->rss_config.hash_type =
1940                                (BFI_ENET_RSS_IPV6 |
1941                                 BFI_ENET_RSS_IPV6_TCP |
1942                                 BFI_ENET_RSS_IPV4 |
1943                                 BFI_ENET_RSS_IPV4_TCP);
1944                rx_config->rss_config.hash_mask =
1945                                bnad->num_rxp_per_rx - 1;
1946                get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1947                        sizeof(rx_config->rss_config.toeplitz_hash_key));
1948        } else {
1949                rx_config->rss_status = BNA_STATUS_T_DISABLED;
1950                memset(&rx_config->rss_config, 0,
1951                       sizeof(rx_config->rss_config));
1952        }
1953        rx_config->rxp_type = BNA_RXP_SLR;
1954        rx_config->q_depth = bnad->rxq_depth;
1955
1956        rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1957
1958        rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1959}
1960
1961static void
1962bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1963{
1964        struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1965        int i;
1966
1967        for (i = 0; i < bnad->num_rxp_per_rx; i++)
1968                rx_info->rx_ctrl[i].bnad = bnad;
1969}
1970
1971/* Called with mutex_lock(&bnad->conf_mutex) held */
1972void
1973bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
1974{
1975        struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1976        struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1977        struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1978        unsigned long flags;
1979        int to_del = 0;
1980
1981        if (!rx_info->rx)
1982                return;
1983
1984        if (0 == rx_id) {
1985                spin_lock_irqsave(&bnad->bna_lock, flags);
1986                if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1987                    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1988                        clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1989                        to_del = 1;
1990                }
1991                spin_unlock_irqrestore(&bnad->bna_lock, flags);
1992                if (to_del)
1993                        del_timer_sync(&bnad->dim_timer);
1994        }
1995
1996        init_completion(&bnad->bnad_completions.rx_comp);
1997        spin_lock_irqsave(&bnad->bna_lock, flags);
1998        bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1999        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2000        wait_for_completion(&bnad->bnad_completions.rx_comp);
2001
2002        if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2003                bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2004
2005        bnad_napi_delete(bnad, rx_id);
2006
2007        spin_lock_irqsave(&bnad->bna_lock, flags);
2008        bna_rx_destroy(rx_info->rx);
2009
2010        rx_info->rx = NULL;
2011        rx_info->rx_id = 0;
2012        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2013
2014        bnad_rx_res_free(bnad, res_info);
2015}
2016
2017/* Called with mutex_lock(&bnad->conf_mutex) held */
2018int
2019bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2020{
2021        int err;
2022        struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2023        struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2024        struct bna_intr_info *intr_info =
2025                        &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2026        struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2027        static const struct bna_rx_event_cbfn rx_cbfn = {
2028                .rcb_setup_cbfn = NULL,
2029                .rcb_destroy_cbfn = NULL,
2030                .ccb_setup_cbfn = bnad_cb_ccb_setup,
2031                .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2032                .rx_stall_cbfn = bnad_cb_rx_stall,
2033                .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2034                .rx_post_cbfn = bnad_cb_rx_post,
2035        };
2036        struct bna_rx *rx;
2037        unsigned long flags;
2038
2039        rx_info->rx_id = rx_id;
2040
2041        /* Initialize the Rx object configuration */
2042        bnad_init_rx_config(bnad, rx_config);
2043
2044        /* Get BNA's resource requirement for one Rx object */
2045        spin_lock_irqsave(&bnad->bna_lock, flags);
2046        bna_rx_res_req(rx_config, res_info);
2047        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2048
2049        /* Fill Unmap Q memory requirements */
2050        BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
2051                        rx_config->num_paths +
2052                        ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
2053                         0 : rx_config->num_paths),
2054                        ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
2055                         sizeof(struct bnad_rx_unmap_q)));
2056
2057        /* Allocate resource */
2058        err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2059        if (err)
2060                return err;
2061
2062        bnad_rx_ctrl_init(bnad, rx_id);
2063
2064        /* Ask BNA to create one Rx object, supplying required resources */
2065        spin_lock_irqsave(&bnad->bna_lock, flags);
2066        rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2067                        rx_info);
2068        if (!rx) {
2069                err = -ENOMEM;
2070                spin_unlock_irqrestore(&bnad->bna_lock, flags);
2071                goto err_return;
2072        }
2073        rx_info->rx = rx;
2074        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2075
2076        INIT_WORK(&rx_info->rx_cleanup_work,
2077                        (work_func_t)(bnad_rx_cleanup));
2078
2079        /*
2080         * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2081         * so that IRQ handler cannot schedule NAPI at this point.
2082         */
2083        bnad_napi_add(bnad, rx_id);
2084
2085        /* Register ISR for the Rx object */
2086        if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2087                err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2088                                                rx_config->num_paths);
2089                if (err)
2090                        goto err_return;
2091        }
2092
2093        spin_lock_irqsave(&bnad->bna_lock, flags);
2094        if (0 == rx_id) {
2095                /* Set up Dynamic Interrupt Moderation Vector */
2096                if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2097                        bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2098
2099                /* Enable VLAN filtering only on the default Rx */
2100                bna_rx_vlanfilter_enable(rx);
2101
2102                /* Start the DIM timer */
2103                bnad_dim_timer_start(bnad);
2104        }
2105
2106        bna_rx_enable(rx);
2107        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2108
2109        return 0;
2110
2111err_return:
2112        bnad_destroy_rx(bnad, rx_id);
2113        return err;
2114}
2115
2116/* Called with conf_lock & bnad->bna_lock held */
2117void
2118bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2119{
2120        struct bnad_tx_info *tx_info;
2121
2122        tx_info = &bnad->tx_info[0];
2123        if (!tx_info->tx)
2124                return;
2125
2126        bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2127}
2128
2129/* Called with conf_lock & bnad->bna_lock held */
2130void
2131bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2132{
2133        struct bnad_rx_info *rx_info;
2134        int     i;
2135
2136        for (i = 0; i < bnad->num_rx; i++) {
2137                rx_info = &bnad->rx_info[i];
2138                if (!rx_info->rx)
2139                        continue;
2140                bna_rx_coalescing_timeo_set(rx_info->rx,
2141                                bnad->rx_coalescing_timeo);
2142        }
2143}
2144
2145/*
2146 * Called with bnad->bna_lock held
2147 */
2148int
2149bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2150{
2151        int ret;
2152
2153        if (!is_valid_ether_addr(mac_addr))
2154                return -EADDRNOTAVAIL;
2155
2156        /* If datapath is down, pretend everything went through */
2157        if (!bnad->rx_info[0].rx)
2158                return 0;
2159
2160        ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2161        if (ret != BNA_CB_SUCCESS)
2162                return -EADDRNOTAVAIL;
2163
2164        return 0;
2165}
2166
2167/* Should be called with conf_lock held */
2168int
2169bnad_enable_default_bcast(struct bnad *bnad)
2170{
2171        struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2172        int ret;
2173        unsigned long flags;
2174
2175        init_completion(&bnad->bnad_completions.mcast_comp);
2176
2177        spin_lock_irqsave(&bnad->bna_lock, flags);
2178        ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2179                                bnad_cb_rx_mcast_add);
2180        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2181
2182        if (ret == BNA_CB_SUCCESS)
2183                wait_for_completion(&bnad->bnad_completions.mcast_comp);
2184        else
2185                return -ENODEV;
2186
2187        if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2188                return -ENODEV;
2189
2190        return 0;
2191}
2192
2193/* Called with mutex_lock(&bnad->conf_mutex) held */
2194void
2195bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2196{
2197        u16 vid;
2198        unsigned long flags;
2199
2200        for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2201                spin_lock_irqsave(&bnad->bna_lock, flags);
2202                bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2203                spin_unlock_irqrestore(&bnad->bna_lock, flags);
2204        }
2205}
2206
2207/* Statistics utilities */
2208void
2209bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2210{
2211        int i, j;
2212
2213        for (i = 0; i < bnad->num_rx; i++) {
2214                for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2215                        if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2216                                stats->rx_packets += bnad->rx_info[i].
2217                                rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2218                                stats->rx_bytes += bnad->rx_info[i].
2219                                        rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2220                                if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2221                                        bnad->rx_info[i].rx_ctrl[j].ccb->
2222                                        rcb[1]->rxq) {
2223                                        stats->rx_packets +=
2224                                                bnad->rx_info[i].rx_ctrl[j].
2225                                                ccb->rcb[1]->rxq->rx_packets;
2226                                        stats->rx_bytes +=
2227                                                bnad->rx_info[i].rx_ctrl[j].
2228                                                ccb->rcb[1]->rxq->rx_bytes;
2229                                }
2230                        }
2231                }
2232        }
2233        for (i = 0; i < bnad->num_tx; i++) {
2234                for (j = 0; j < bnad->num_txq_per_tx; j++) {
2235                        if (bnad->tx_info[i].tcb[j]) {
2236                                stats->tx_packets +=
2237                                bnad->tx_info[i].tcb[j]->txq->tx_packets;
2238                                stats->tx_bytes +=
2239                                        bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2240                        }
2241                }
2242        }
2243}
2244
2245/*
2246 * Must be called with the bna_lock held.
2247 */
2248void
2249bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2250{
2251        struct bfi_enet_stats_mac *mac_stats;
2252        u32 bmap;
2253        int i;
2254
2255        mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2256        stats->rx_errors =
2257                mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2258                mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2259                mac_stats->rx_undersize;
2260        stats->tx_errors = mac_stats->tx_fcs_error +
2261                                        mac_stats->tx_undersize;
2262        stats->rx_dropped = mac_stats->rx_drop;
2263        stats->tx_dropped = mac_stats->tx_drop;
2264        stats->multicast = mac_stats->rx_multicast;
2265        stats->collisions = mac_stats->tx_total_collision;
2266
2267        stats->rx_length_errors = mac_stats->rx_frame_length_error;
2268
2269        /* receive ring buffer overflow  ?? */
2270
2271        stats->rx_crc_errors = mac_stats->rx_fcs_error;
2272        stats->rx_frame_errors = mac_stats->rx_alignment_error;
2273        /* recv'r fifo overrun */
2274        bmap = bna_rx_rid_mask(&bnad->bna);
2275        for (i = 0; bmap; i++) {
2276                if (bmap & 1) {
2277                        stats->rx_fifo_errors +=
2278                                bnad->stats.bna_stats->
2279                                        hw_stats.rxf_stats[i].frame_drops;
2280                        break;
2281                }
2282                bmap >>= 1;
2283        }
2284}
2285
2286static void
2287bnad_mbox_irq_sync(struct bnad *bnad)
2288{
2289        u32 irq;
2290        unsigned long flags;
2291
2292        spin_lock_irqsave(&bnad->bna_lock, flags);
2293        if (bnad->cfg_flags & BNAD_CF_MSIX)
2294                irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2295        else
2296                irq = bnad->pcidev->irq;
2297        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2298
2299        synchronize_irq(irq);
2300}
2301
2302/* Utility used by bnad_start_xmit, for doing TSO */
2303static int
2304bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2305{
2306        int err;
2307
2308        if (skb_header_cloned(skb)) {
2309                err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2310                if (err) {
2311                        BNAD_UPDATE_CTR(bnad, tso_err);
2312                        return err;
2313                }
2314        }
2315
2316        /*
2317         * For TSO, the TCP checksum field is seeded with pseudo-header sum
2318         * excluding the length field.
2319         */
2320        if (skb->protocol == htons(ETH_P_IP)) {
2321                struct iphdr *iph = ip_hdr(skb);
2322
2323                /* Do we really need these? */
2324                iph->tot_len = 0;
2325                iph->check = 0;
2326
2327                tcp_hdr(skb)->check =
2328                        ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2329                                           IPPROTO_TCP, 0);
2330                BNAD_UPDATE_CTR(bnad, tso4);
2331        } else {
2332                struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2333
2334                ipv6h->payload_len = 0;
2335                tcp_hdr(skb)->check =
2336                        ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2337                                         IPPROTO_TCP, 0);
2338                BNAD_UPDATE_CTR(bnad, tso6);
2339        }
2340
2341        return 0;
2342}
2343
2344/*
2345 * Initialize Q numbers depending on Rx Paths
2346 * Called with bnad->bna_lock held, because of cfg_flags
2347 * access.
2348 */
2349static void
2350bnad_q_num_init(struct bnad *bnad)
2351{
2352        int rxps;
2353
2354        rxps = min((uint)num_online_cpus(),
2355                        (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2356
2357        if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2358                rxps = 1;       /* INTx */
2359
2360        bnad->num_rx = 1;
2361        bnad->num_tx = 1;
2362        bnad->num_rxp_per_rx = rxps;
2363        bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2364}
2365
2366/*
2367 * Adjusts the Q numbers, given a number of msix vectors
2368 * Give preference to RSS as opposed to Tx priority Queues,
2369 * in such a case, just use 1 Tx Q
2370 * Called with bnad->bna_lock held b'cos of cfg_flags access
2371 */
2372static void
2373bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2374{
2375        bnad->num_txq_per_tx = 1;
2376        if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2377             bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2378            (bnad->cfg_flags & BNAD_CF_MSIX)) {
2379                bnad->num_rxp_per_rx = msix_vectors -
2380                        (bnad->num_tx * bnad->num_txq_per_tx) -
2381                        BNAD_MAILBOX_MSIX_VECTORS;
2382        } else
2383                bnad->num_rxp_per_rx = 1;
2384}
2385
2386/* Enable / disable ioceth */
2387static int
2388bnad_ioceth_disable(struct bnad *bnad)
2389{
2390        unsigned long flags;
2391        int err = 0;
2392
2393        spin_lock_irqsave(&bnad->bna_lock, flags);
2394        init_completion(&bnad->bnad_completions.ioc_comp);
2395        bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2396        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2397
2398        wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2399                msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2400
2401        err = bnad->bnad_completions.ioc_comp_status;
2402        return err;
2403}
2404
2405static int
2406bnad_ioceth_enable(struct bnad *bnad)
2407{
2408        int err = 0;
2409        unsigned long flags;
2410
2411        spin_lock_irqsave(&bnad->bna_lock, flags);
2412        init_completion(&bnad->bnad_completions.ioc_comp);
2413        bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2414        bna_ioceth_enable(&bnad->bna.ioceth);
2415        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2416
2417        wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2418                msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2419
2420        err = bnad->bnad_completions.ioc_comp_status;
2421
2422        return err;
2423}
2424
2425/* Free BNA resources */
2426static void
2427bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2428                u32 res_val_max)
2429{
2430        int i;
2431
2432        for (i = 0; i < res_val_max; i++)
2433                bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2434}
2435
2436/* Allocates memory and interrupt resources for BNA */
2437static int
2438bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2439                u32 res_val_max)
2440{
2441        int i, err;
2442
2443        for (i = 0; i < res_val_max; i++) {
2444                err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2445                if (err)
2446                        goto err_return;
2447        }
2448        return 0;
2449
2450err_return:
2451        bnad_res_free(bnad, res_info, res_val_max);
2452        return err;
2453}
2454
2455/* Interrupt enable / disable */
2456static void
2457bnad_enable_msix(struct bnad *bnad)
2458{
2459        int i, ret;
2460        unsigned long flags;
2461
2462        spin_lock_irqsave(&bnad->bna_lock, flags);
2463        if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2464                spin_unlock_irqrestore(&bnad->bna_lock, flags);
2465                return;
2466        }
2467        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2468
2469        if (bnad->msix_table)
2470                return;
2471
2472        bnad->msix_table =
2473                kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2474
2475        if (!bnad->msix_table)
2476                goto intx_mode;
2477
2478        for (i = 0; i < bnad->msix_num; i++)
2479                bnad->msix_table[i].entry = i;
2480
2481        ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2482        if (ret > 0) {
2483                /* Not enough MSI-X vectors. */
2484                pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2485                        ret, bnad->msix_num);
2486
2487                spin_lock_irqsave(&bnad->bna_lock, flags);
2488                /* ret = #of vectors that we got */
2489                bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2490                        (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2491                spin_unlock_irqrestore(&bnad->bna_lock, flags);
2492
2493                bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2494                         BNAD_MAILBOX_MSIX_VECTORS;
2495
2496                if (bnad->msix_num > ret)
2497                        goto intx_mode;
2498
2499                /* Try once more with adjusted numbers */
2500                /* If this fails, fall back to INTx */
2501                ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2502                                      bnad->msix_num);
2503                if (ret)
2504                        goto intx_mode;
2505
2506        } else if (ret < 0)
2507                goto intx_mode;
2508
2509        pci_intx(bnad->pcidev, 0);
2510
2511        return;
2512
2513intx_mode:
2514        pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2515
2516        kfree(bnad->msix_table);
2517        bnad->msix_table = NULL;
2518        bnad->msix_num = 0;
2519        spin_lock_irqsave(&bnad->bna_lock, flags);
2520        bnad->cfg_flags &= ~BNAD_CF_MSIX;
2521        bnad_q_num_init(bnad);
2522        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2523}
2524
2525static void
2526bnad_disable_msix(struct bnad *bnad)
2527{
2528        u32 cfg_flags;
2529        unsigned long flags;
2530
2531        spin_lock_irqsave(&bnad->bna_lock, flags);
2532        cfg_flags = bnad->cfg_flags;
2533        if (bnad->cfg_flags & BNAD_CF_MSIX)
2534                bnad->cfg_flags &= ~BNAD_CF_MSIX;
2535        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2536
2537        if (cfg_flags & BNAD_CF_MSIX) {
2538                pci_disable_msix(bnad->pcidev);
2539                kfree(bnad->msix_table);
2540                bnad->msix_table = NULL;
2541        }
2542}
2543
2544/* Netdev entry points */
2545static int
2546bnad_open(struct net_device *netdev)
2547{
2548        int err;
2549        struct bnad *bnad = netdev_priv(netdev);
2550        struct bna_pause_config pause_config;
2551        int mtu;
2552        unsigned long flags;
2553
2554        mutex_lock(&bnad->conf_mutex);
2555
2556        /* Tx */
2557        err = bnad_setup_tx(bnad, 0);
2558        if (err)
2559                goto err_return;
2560
2561        /* Rx */
2562        err = bnad_setup_rx(bnad, 0);
2563        if (err)
2564                goto cleanup_tx;
2565
2566        /* Port */
2567        pause_config.tx_pause = 0;
2568        pause_config.rx_pause = 0;
2569
2570        mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2571
2572        spin_lock_irqsave(&bnad->bna_lock, flags);
2573        bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2574        bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2575        bna_enet_enable(&bnad->bna.enet);
2576        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2577
2578        /* Enable broadcast */
2579        bnad_enable_default_bcast(bnad);
2580
2581        /* Restore VLANs, if any */
2582        bnad_restore_vlans(bnad, 0);
2583
2584        /* Set the UCAST address */
2585        spin_lock_irqsave(&bnad->bna_lock, flags);
2586        bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2587        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2588
2589        /* Start the stats timer */
2590        bnad_stats_timer_start(bnad);
2591
2592        mutex_unlock(&bnad->conf_mutex);
2593
2594        return 0;
2595
2596cleanup_tx:
2597        bnad_destroy_tx(bnad, 0);
2598
2599err_return:
2600        mutex_unlock(&bnad->conf_mutex);
2601        return err;
2602}
2603
2604static int
2605bnad_stop(struct net_device *netdev)
2606{
2607        struct bnad *bnad = netdev_priv(netdev);
2608        unsigned long flags;
2609
2610        mutex_lock(&bnad->conf_mutex);
2611
2612        /* Stop the stats timer */
2613        bnad_stats_timer_stop(bnad);
2614
2615        init_completion(&bnad->bnad_completions.enet_comp);
2616
2617        spin_lock_irqsave(&bnad->bna_lock, flags);
2618        bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2619                        bnad_cb_enet_disabled);
2620        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2621
2622        wait_for_completion(&bnad->bnad_completions.enet_comp);
2623
2624        bnad_destroy_tx(bnad, 0);
2625        bnad_destroy_rx(bnad, 0);
2626
2627        /* These config flags are cleared in the hardware */
2628        bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
2629
2630        /* Synchronize mailbox IRQ */
2631        bnad_mbox_irq_sync(bnad);
2632
2633        mutex_unlock(&bnad->conf_mutex);
2634
2635        return 0;
2636}
2637
2638/* TX */
2639/* Returns 0 for success */
2640static int
2641bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2642                    struct sk_buff *skb, struct bna_txq_entry *txqent)
2643{
2644        u16 flags = 0;
2645        u32 gso_size;
2646        u16 vlan_tag = 0;
2647
2648        if (vlan_tx_tag_present(skb)) {
2649                vlan_tag = (u16)vlan_tx_tag_get(skb);
2650                flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2651        }
2652        if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2653                vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2654                                | (vlan_tag & 0x1fff);
2655                flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2656        }
2657        txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2658
2659        if (skb_is_gso(skb)) {
2660                gso_size = skb_shinfo(skb)->gso_size;
2661                if (unlikely(gso_size > bnad->netdev->mtu)) {
2662                        BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2663                        return -EINVAL;
2664                }
2665                if (unlikely((gso_size + skb_transport_offset(skb) +
2666                              tcp_hdrlen(skb)) >= skb->len)) {
2667                        txqent->hdr.wi.opcode =
2668                                __constant_htons(BNA_TXQ_WI_SEND);
2669                        txqent->hdr.wi.lso_mss = 0;
2670                        BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2671                } else {
2672                        txqent->hdr.wi.opcode =
2673                                __constant_htons(BNA_TXQ_WI_SEND_LSO);
2674                        txqent->hdr.wi.lso_mss = htons(gso_size);
2675                }
2676
2677                if (bnad_tso_prepare(bnad, skb)) {
2678                        BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2679                        return -EINVAL;
2680                }
2681
2682                flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2683                txqent->hdr.wi.l4_hdr_size_n_offset =
2684                        htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2685                        tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2686        } else  {
2687                txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2688                txqent->hdr.wi.lso_mss = 0;
2689
2690                if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
2691                        BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2692                        return -EINVAL;
2693                }
2694
2695                if (skb->ip_summed == CHECKSUM_PARTIAL) {
2696                        u8 proto = 0;
2697
2698                        if (skb->protocol == __constant_htons(ETH_P_IP))
2699                                proto = ip_hdr(skb)->protocol;
2700#ifdef NETIF_F_IPV6_CSUM
2701                        else if (skb->protocol ==
2702                                 __constant_htons(ETH_P_IPV6)) {
2703                                /* nexthdr may not be TCP immediately. */
2704                                proto = ipv6_hdr(skb)->nexthdr;
2705                        }
2706#endif
2707                        if (proto == IPPROTO_TCP) {
2708                                flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2709                                txqent->hdr.wi.l4_hdr_size_n_offset =
2710                                        htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2711                                              (0, skb_transport_offset(skb)));
2712
2713                                BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2714
2715                                if (unlikely(skb_headlen(skb) <
2716                                            skb_transport_offset(skb) +
2717                                    tcp_hdrlen(skb))) {
2718                                        BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2719                                        return -EINVAL;
2720                                }
2721                        } else if (proto == IPPROTO_UDP) {
2722                                flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2723                                txqent->hdr.wi.l4_hdr_size_n_offset =
2724                                        htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2725                                              (0, skb_transport_offset(skb)));
2726
2727                                BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2728                                if (unlikely(skb_headlen(skb) <
2729                                            skb_transport_offset(skb) +
2730                                    sizeof(struct udphdr))) {
2731                                        BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2732                                        return -EINVAL;
2733                                }
2734                        } else {
2735
2736                                BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2737                                return -EINVAL;
2738                        }
2739                } else
2740                        txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2741        }
2742
2743        txqent->hdr.wi.flags = htons(flags);
2744        txqent->hdr.wi.frame_length = htonl(skb->len);
2745
2746        return 0;
2747}
2748
2749/*
2750 * bnad_start_xmit : Netdev entry point for Transmit
2751 *                   Called under lock held by net_device
2752 */
2753static netdev_tx_t
2754bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2755{
2756        struct bnad *bnad = netdev_priv(netdev);
2757        u32 txq_id = 0;
2758        struct bna_tcb *tcb = NULL;
2759        struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2760        u32             prod, q_depth, vect_id;
2761        u32             wis, vectors, len;
2762        int             i;
2763        dma_addr_t              dma_addr;
2764        struct bna_txq_entry *txqent;
2765
2766        len = skb_headlen(skb);
2767
2768        /* Sanity checks for the skb */
2769
2770        if (unlikely(skb->len <= ETH_HLEN)) {
2771                dev_kfree_skb(skb);
2772                BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2773                return NETDEV_TX_OK;
2774        }
2775        if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2776                dev_kfree_skb(skb);
2777                BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2778                return NETDEV_TX_OK;
2779        }
2780        if (unlikely(len == 0)) {
2781                dev_kfree_skb(skb);
2782                BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2783                return NETDEV_TX_OK;
2784        }
2785
2786        tcb = bnad->tx_info[0].tcb[txq_id];
2787        q_depth = tcb->q_depth;
2788        prod = tcb->producer_index;
2789
2790        unmap_q = tcb->unmap_q;
2791
2792        /*
2793         * Takes care of the Tx that is scheduled between clearing the flag
2794         * and the netif_tx_stop_all_queues() call.
2795         */
2796        if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2797                dev_kfree_skb(skb);
2798                BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2799                return NETDEV_TX_OK;
2800        }
2801
2802        vectors = 1 + skb_shinfo(skb)->nr_frags;
2803        wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2804
2805        if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2806                dev_kfree_skb(skb);
2807                BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2808                return NETDEV_TX_OK;
2809        }
2810
2811        /* Check for available TxQ resources */
2812        if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2813                if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2814                    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2815                        u32 sent;
2816                        sent = bnad_txcmpl_process(bnad, tcb);
2817                        if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2818                                bna_ib_ack(tcb->i_dbell, sent);
2819                        smp_mb__before_clear_bit();
2820                        clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2821                } else {
2822                        netif_stop_queue(netdev);
2823                        BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2824                }
2825
2826                smp_mb();
2827                /*
2828                 * Check again to deal with race condition between
2829                 * netif_stop_queue here, and netif_wake_queue in
2830                 * interrupt handler which is not inside netif tx lock.
2831                 */
2832                if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2833                        BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2834                        return NETDEV_TX_BUSY;
2835                } else {
2836                        netif_wake_queue(netdev);
2837                        BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2838                }
2839        }
2840
2841        txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2842        head_unmap = &unmap_q[prod];
2843
2844        /* Program the opcode, flags, frame_len, num_vectors in WI */
2845        if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
2846                dev_kfree_skb(skb);
2847                return NETDEV_TX_OK;
2848        }
2849        txqent->hdr.wi.reserved = 0;
2850        txqent->hdr.wi.num_vectors = vectors;
2851
2852        head_unmap->skb = skb;
2853        head_unmap->nvecs = 0;
2854
2855        /* Program the vectors */
2856        unmap = head_unmap;
2857        dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2858                                  len, DMA_TO_DEVICE);
2859        BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2860        txqent->vector[0].length = htons(len);
2861        dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
2862        head_unmap->nvecs++;
2863
2864        for (i = 0, vect_id = 0; i < vectors - 1; i++) {
2865                const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2866                u16             size = skb_frag_size(frag);
2867
2868                if (unlikely(size == 0)) {
2869                        /* Undo the changes starting at tcb->producer_index */
2870                        bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
2871                                tcb->producer_index);
2872                        dev_kfree_skb(skb);
2873                        BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2874                        return NETDEV_TX_OK;
2875                }
2876
2877                len += size;
2878
2879                vect_id++;
2880                if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2881                        vect_id = 0;
2882                        BNA_QE_INDX_INC(prod, q_depth);
2883                        txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2884                        txqent->hdr.wi_ext.opcode =
2885                                __constant_htons(BNA_TXQ_WI_EXTENSION);
2886                        unmap = &unmap_q[prod];
2887                }
2888
2889                dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2890                                            0, size, DMA_TO_DEVICE);
2891                BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2892                txqent->vector[vect_id].length = htons(size);
2893                dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
2894                                                dma_addr);
2895                head_unmap->nvecs++;
2896        }
2897
2898        if (unlikely(len != skb->len)) {
2899                /* Undo the changes starting at tcb->producer_index */
2900                bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
2901                dev_kfree_skb(skb);
2902                BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2903                return NETDEV_TX_OK;
2904        }
2905
2906        BNA_QE_INDX_INC(prod, q_depth);
2907        tcb->producer_index = prod;
2908
2909        smp_mb();
2910
2911        if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2912                return NETDEV_TX_OK;
2913
2914        bna_txq_prod_indx_doorbell(tcb);
2915        smp_mb();
2916
2917        return NETDEV_TX_OK;
2918}
2919
2920/*
2921 * Used spin_lock to synchronize reading of stats structures, which
2922 * is written by BNA under the same lock.
2923 */
2924static struct rtnl_link_stats64 *
2925bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2926{
2927        struct bnad *bnad = netdev_priv(netdev);
2928        unsigned long flags;
2929
2930        spin_lock_irqsave(&bnad->bna_lock, flags);
2931
2932        bnad_netdev_qstats_fill(bnad, stats);
2933        bnad_netdev_hwstats_fill(bnad, stats);
2934
2935        spin_unlock_irqrestore(&bnad->bna_lock, flags);
2936
2937        return stats;
2938}
2939
2940void
2941bnad_set_rx_mode(struct net_device *netdev)
2942{
2943        struct bnad *bnad = netdev_priv(netdev);
2944        u32     new_mask, valid_mask;
2945        unsigned long flags;
2946
2947        spin_lock_irqsave(&bnad->bna_lock, flags);
2948
2949        new_mask = valid_mask = 0;
2950
2951        if (netdev->flags & IFF_PROMISC) {
2952                if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2953                        new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2954                        valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2955                        bnad->cfg_flags |= BNAD_CF_PROMISC;
2956                }
2957        } else {
2958                if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2959                        new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2960                        valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2961                        bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2962                }
2963        }
2964
2965        if (netdev->flags & IFF_ALLMULTI) {
2966                if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2967                        new_mask |= BNA_RXMODE_ALLMULTI;
2968                        valid_mask |= BNA_RXMODE_ALLMULTI;
2969                        bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2970                }
2971        } else {
2972                if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2973                        new_mask &= ~BNA_RXMODE_ALLMULTI;
2974                        valid_mask |= BNA_RXMODE_ALLMULTI;
2975                        bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2976                }
2977        }
2978
2979        if (bnad->rx_info[0].rx == NULL)
2980                goto unlock;
2981
2982        bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2983
2984        if (!netdev_mc_empty(netdev)) {
2985                u8 *mcaddr_list;
2986                int mc_count = netdev_mc_count(netdev);
2987
2988                /* Index 0 holds the broadcast address */
2989                mcaddr_list =
2990                        kzalloc((mc_count + 1) * ETH_ALEN,
2991                                GFP_ATOMIC);
2992                if (!mcaddr_list)
2993                        goto unlock;
2994
2995                memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2996
2997                /* Copy rest of the MC addresses */
2998                bnad_netdev_mc_list_get(netdev, mcaddr_list);
2999
3000                bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3001                                        mcaddr_list, NULL);
3002
3003                /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
3004                kfree(mcaddr_list);
3005        }
3006unlock:
3007        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3008}
3009
3010/*
3011 * bna_lock is used to sync writes to netdev->addr
3012 * conf_lock cannot be used since this call may be made
3013 * in a non-blocking context.
3014 */
3015static int
3016bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3017{
3018        int err;
3019        struct bnad *bnad = netdev_priv(netdev);
3020        struct sockaddr *sa = (struct sockaddr *)mac_addr;
3021        unsigned long flags;
3022
3023        spin_lock_irqsave(&bnad->bna_lock, flags);
3024
3025        err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3026
3027        if (!err)
3028                memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3029
3030        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3031
3032        return err;
3033}
3034
3035static int
3036bnad_mtu_set(struct bnad *bnad, int mtu)
3037{
3038        unsigned long flags;
3039
3040        init_completion(&bnad->bnad_completions.mtu_comp);
3041
3042        spin_lock_irqsave(&bnad->bna_lock, flags);
3043        bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
3044        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3045
3046        wait_for_completion(&bnad->bnad_completions.mtu_comp);
3047
3048        return bnad->bnad_completions.mtu_comp_status;
3049}
3050
3051static int
3052bnad_change_mtu(struct net_device *netdev, int new_mtu)
3053{
3054        int err, mtu = netdev->mtu;
3055        struct bnad *bnad = netdev_priv(netdev);
3056
3057        if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3058                return -EINVAL;
3059
3060        mutex_lock(&bnad->conf_mutex);
3061
3062        netdev->mtu = new_mtu;
3063
3064        mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
3065        err = bnad_mtu_set(bnad, mtu);
3066        if (err)
3067                err = -EBUSY;
3068
3069        mutex_unlock(&bnad->conf_mutex);
3070        return err;
3071}
3072
3073static int
3074bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3075{
3076        struct bnad *bnad = netdev_priv(netdev);
3077        unsigned long flags;
3078
3079        if (!bnad->rx_info[0].rx)
3080                return 0;
3081
3082        mutex_lock(&bnad->conf_mutex);
3083
3084        spin_lock_irqsave(&bnad->bna_lock, flags);
3085        bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3086        set_bit(vid, bnad->active_vlans);
3087        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3088
3089        mutex_unlock(&bnad->conf_mutex);
3090
3091        return 0;
3092}
3093
3094static int
3095bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3096{
3097        struct bnad *bnad = netdev_priv(netdev);
3098        unsigned long flags;
3099
3100        if (!bnad->rx_info[0].rx)
3101                return 0;
3102
3103        mutex_lock(&bnad->conf_mutex);
3104
3105        spin_lock_irqsave(&bnad->bna_lock, flags);
3106        clear_bit(vid, bnad->active_vlans);
3107        bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3108        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3109
3110        mutex_unlock(&bnad->conf_mutex);
3111
3112        return 0;
3113}
3114
3115#ifdef CONFIG_NET_POLL_CONTROLLER
3116static void
3117bnad_netpoll(struct net_device *netdev)
3118{
3119        struct bnad *bnad = netdev_priv(netdev);
3120        struct bnad_rx_info *rx_info;
3121        struct bnad_rx_ctrl *rx_ctrl;
3122        u32 curr_mask;
3123        int i, j;
3124
3125        if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3126                bna_intx_disable(&bnad->bna, curr_mask);
3127                bnad_isr(bnad->pcidev->irq, netdev);
3128                bna_intx_enable(&bnad->bna, curr_mask);
3129        } else {
3130                /*
3131                 * Tx processing may happen in sending context, so no need
3132                 * to explicitly process completions here
3133                 */
3134
3135                /* Rx processing */
3136                for (i = 0; i < bnad->num_rx; i++) {
3137                        rx_info = &bnad->rx_info[i];
3138                        if (!rx_info->rx)
3139                                continue;
3140                        for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3141                                rx_ctrl = &rx_info->rx_ctrl[j];
3142                                if (rx_ctrl->ccb)
3143                                        bnad_netif_rx_schedule_poll(bnad,
3144                                                            rx_ctrl->ccb);
3145                        }
3146                }
3147        }
3148}
3149#endif
3150
3151static const struct net_device_ops bnad_netdev_ops = {
3152        .ndo_open               = bnad_open,
3153        .ndo_stop               = bnad_stop,
3154        .ndo_start_xmit         = bnad_start_xmit,
3155        .ndo_get_stats64                = bnad_get_stats64,
3156        .ndo_set_rx_mode        = bnad_set_rx_mode,
3157        .ndo_validate_addr      = eth_validate_addr,
3158        .ndo_set_mac_address    = bnad_set_mac_address,
3159        .ndo_change_mtu         = bnad_change_mtu,
3160        .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3161        .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3162#ifdef CONFIG_NET_POLL_CONTROLLER
3163        .ndo_poll_controller    = bnad_netpoll
3164#endif
3165};
3166
3167static void
3168bnad_netdev_init(struct bnad *bnad, bool using_dac)
3169{
3170        struct net_device *netdev = bnad->netdev;
3171
3172        netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3173                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3174                NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
3175
3176        netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3177                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3178                NETIF_F_TSO | NETIF_F_TSO6;
3179
3180        netdev->features |= netdev->hw_features |
3181                NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3182
3183        if (using_dac)
3184                netdev->features |= NETIF_F_HIGHDMA;
3185
3186        netdev->mem_start = bnad->mmio_start;
3187        netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3188
3189        netdev->netdev_ops = &bnad_netdev_ops;
3190        bnad_set_ethtool_ops(netdev);
3191}
3192
3193/*
3194 * 1. Initialize the bnad structure
3195 * 2. Setup netdev pointer in pci_dev
3196 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3197 * 4. Initialize work queue.
3198 */
3199static int
3200bnad_init(struct bnad *bnad,
3201          struct pci_dev *pdev, struct net_device *netdev)
3202{
3203        unsigned long flags;
3204
3205        SET_NETDEV_DEV(netdev, &pdev->dev);
3206        pci_set_drvdata(pdev, netdev);
3207
3208        bnad->netdev = netdev;
3209        bnad->pcidev = pdev;
3210        bnad->mmio_start = pci_resource_start(pdev, 0);
3211        bnad->mmio_len = pci_resource_len(pdev, 0);
3212        bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3213        if (!bnad->bar0) {
3214                dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3215                pci_set_drvdata(pdev, NULL);
3216                return -ENOMEM;
3217        }
3218        pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3219               (unsigned long long) bnad->mmio_len);
3220
3221        spin_lock_irqsave(&bnad->bna_lock, flags);
3222        if (!bnad_msix_disable)
3223                bnad->cfg_flags = BNAD_CF_MSIX;
3224
3225        bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3226
3227        bnad_q_num_init(bnad);
3228        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3229
3230        bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3231                (bnad->num_rx * bnad->num_rxp_per_rx) +
3232                         BNAD_MAILBOX_MSIX_VECTORS;
3233
3234        bnad->txq_depth = BNAD_TXQ_DEPTH;
3235        bnad->rxq_depth = BNAD_RXQ_DEPTH;
3236
3237        bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3238        bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3239
3240        sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3241        bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3242        if (!bnad->work_q) {
3243                iounmap(bnad->bar0);
3244                return -ENOMEM;
3245        }
3246
3247        return 0;
3248}
3249
3250/*
3251 * Must be called after bnad_pci_uninit()
3252 * so that iounmap() and pci_set_drvdata(NULL)
3253 * happens only after PCI uninitialization.
3254 */
3255static void
3256bnad_uninit(struct bnad *bnad)
3257{
3258        if (bnad->work_q) {
3259                flush_workqueue(bnad->work_q);
3260                destroy_workqueue(bnad->work_q);
3261                bnad->work_q = NULL;
3262        }
3263
3264        if (bnad->bar0)
3265                iounmap(bnad->bar0);
3266        pci_set_drvdata(bnad->pcidev, NULL);
3267}
3268
3269/*
3270 * Initialize locks
3271        a) Per ioceth mutes used for serializing configuration
3272           changes from OS interface
3273        b) spin lock used to protect bna state machine
3274 */
3275static void
3276bnad_lock_init(struct bnad *bnad)
3277{
3278        spin_lock_init(&bnad->bna_lock);
3279        mutex_init(&bnad->conf_mutex);
3280        mutex_init(&bnad_list_mutex);
3281}
3282
3283static void
3284bnad_lock_uninit(struct bnad *bnad)
3285{
3286        mutex_destroy(&bnad->conf_mutex);
3287        mutex_destroy(&bnad_list_mutex);
3288}
3289
3290/* PCI Initialization */
3291static int
3292bnad_pci_init(struct bnad *bnad,
3293              struct pci_dev *pdev, bool *using_dac)
3294{
3295        int err;
3296
3297        err = pci_enable_device(pdev);
3298        if (err)
3299                return err;
3300        err = pci_request_regions(pdev, BNAD_NAME);
3301        if (err)
3302                goto disable_device;
3303        if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3304            !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3305                *using_dac = true;
3306        } else {
3307                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3308                if (err) {
3309                        err = dma_set_coherent_mask(&pdev->dev,
3310                                                    DMA_BIT_MASK(32));
3311                        if (err)
3312                                goto release_regions;
3313                }
3314                *using_dac = false;
3315        }
3316        pci_set_master(pdev);
3317        return 0;
3318
3319release_regions:
3320        pci_release_regions(pdev);
3321disable_device:
3322        pci_disable_device(pdev);
3323
3324        return err;
3325}
3326
3327static void
3328bnad_pci_uninit(struct pci_dev *pdev)
3329{
3330        pci_release_regions(pdev);
3331        pci_disable_device(pdev);
3332}
3333
3334static int
3335bnad_pci_probe(struct pci_dev *pdev,
3336                const struct pci_device_id *pcidev_id)
3337{
3338        bool    using_dac;
3339        int     err;
3340        struct bnad *bnad;
3341        struct bna *bna;
3342        struct net_device *netdev;
3343        struct bfa_pcidev pcidev_info;
3344        unsigned long flags;
3345
3346        pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3347               pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3348
3349        mutex_lock(&bnad_fwimg_mutex);
3350        if (!cna_get_firmware_buf(pdev)) {
3351                mutex_unlock(&bnad_fwimg_mutex);
3352                pr_warn("Failed to load Firmware Image!\n");
3353                return -ENODEV;
3354        }
3355        mutex_unlock(&bnad_fwimg_mutex);
3356
3357        /*
3358         * Allocates sizeof(struct net_device + struct bnad)
3359         * bnad = netdev->priv
3360         */
3361        netdev = alloc_etherdev(sizeof(struct bnad));
3362        if (!netdev) {
3363                err = -ENOMEM;
3364                return err;
3365        }
3366        bnad = netdev_priv(netdev);
3367        bnad_lock_init(bnad);
3368        bnad_add_to_list(bnad);
3369
3370        mutex_lock(&bnad->conf_mutex);
3371        /*
3372         * PCI initialization
3373         *      Output : using_dac = 1 for 64 bit DMA
3374         *                         = 0 for 32 bit DMA
3375         */
3376        using_dac = false;
3377        err = bnad_pci_init(bnad, pdev, &using_dac);
3378        if (err)
3379                goto unlock_mutex;
3380
3381        /*
3382         * Initialize bnad structure
3383         * Setup relation between pci_dev & netdev
3384         */
3385        err = bnad_init(bnad, pdev, netdev);
3386        if (err)
3387                goto pci_uninit;
3388
3389        /* Initialize netdev structure, set up ethtool ops */
3390        bnad_netdev_init(bnad, using_dac);
3391
3392        /* Set link to down state */
3393        netif_carrier_off(netdev);
3394
3395        /* Setup the debugfs node for this bfad */
3396        if (bna_debugfs_enable)
3397                bnad_debugfs_init(bnad);
3398
3399        /* Get resource requirement form bna */
3400        spin_lock_irqsave(&bnad->bna_lock, flags);
3401        bna_res_req(&bnad->res_info[0]);
3402        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3403
3404        /* Allocate resources from bna */
3405        err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3406        if (err)
3407                goto drv_uninit;
3408
3409        bna = &bnad->bna;
3410
3411        /* Setup pcidev_info for bna_init() */
3412        pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3413        pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3414        pcidev_info.device_id = bnad->pcidev->device;
3415        pcidev_info.pci_bar_kva = bnad->bar0;
3416
3417        spin_lock_irqsave(&bnad->bna_lock, flags);
3418        bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3419        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3420
3421        bnad->stats.bna_stats = &bna->stats;
3422
3423        bnad_enable_msix(bnad);
3424        err = bnad_mbox_irq_alloc(bnad);
3425        if (err)
3426                goto res_free;
3427
3428        /* Set up timers */
3429        setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3430                                ((unsigned long)bnad));
3431        setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3432                                ((unsigned long)bnad));
3433        setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3434                                ((unsigned long)bnad));
3435        setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3436                                ((unsigned long)bnad));
3437
3438        /* Now start the timer before calling IOC */
3439        mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3440                  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3441
3442        /*
3443         * Start the chip
3444         * If the call back comes with error, we bail out.
3445         * This is a catastrophic error.
3446         */
3447        err = bnad_ioceth_enable(bnad);
3448        if (err) {
3449                pr_err("BNA: Initialization failed err=%d\n",
3450                       err);
3451                goto probe_success;
3452        }
3453
3454        spin_lock_irqsave(&bnad->bna_lock, flags);
3455        if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3456                bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3457                bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3458                        bna_attr(bna)->num_rxp - 1);
3459                if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3460                        bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3461                        err = -EIO;
3462        }
3463        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3464        if (err)
3465                goto disable_ioceth;
3466
3467        spin_lock_irqsave(&bnad->bna_lock, flags);
3468        bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3469        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3470
3471        err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3472        if (err) {
3473                err = -EIO;
3474                goto disable_ioceth;
3475        }
3476
3477        spin_lock_irqsave(&bnad->bna_lock, flags);
3478        bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3479        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3480
3481        /* Get the burnt-in mac */
3482        spin_lock_irqsave(&bnad->bna_lock, flags);
3483        bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3484        bnad_set_netdev_perm_addr(bnad);
3485        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3486
3487        mutex_unlock(&bnad->conf_mutex);
3488
3489        /* Finally, reguister with net_device layer */
3490        err = register_netdev(netdev);
3491        if (err) {
3492                pr_err("BNA : Registering with netdev failed\n");
3493                goto probe_uninit;
3494        }
3495        set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3496
3497        return 0;
3498
3499probe_success:
3500        mutex_unlock(&bnad->conf_mutex);
3501        return 0;
3502
3503probe_uninit:
3504        mutex_lock(&bnad->conf_mutex);
3505        bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3506disable_ioceth:
3507        bnad_ioceth_disable(bnad);
3508        del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3509        del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3510        del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3511        spin_lock_irqsave(&bnad->bna_lock, flags);
3512        bna_uninit(bna);
3513        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3514        bnad_mbox_irq_free(bnad);
3515        bnad_disable_msix(bnad);
3516res_free:
3517        bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3518drv_uninit:
3519        /* Remove the debugfs node for this bnad */
3520        kfree(bnad->regdata);
3521        bnad_debugfs_uninit(bnad);
3522        bnad_uninit(bnad);
3523pci_uninit:
3524        bnad_pci_uninit(pdev);
3525unlock_mutex:
3526        mutex_unlock(&bnad->conf_mutex);
3527        bnad_remove_from_list(bnad);
3528        bnad_lock_uninit(bnad);
3529        free_netdev(netdev);
3530        return err;
3531}
3532
3533static void
3534bnad_pci_remove(struct pci_dev *pdev)
3535{
3536        struct net_device *netdev = pci_get_drvdata(pdev);
3537        struct bnad *bnad;
3538        struct bna *bna;
3539        unsigned long flags;
3540
3541        if (!netdev)
3542                return;
3543
3544        pr_info("%s bnad_pci_remove\n", netdev->name);
3545        bnad = netdev_priv(netdev);
3546        bna = &bnad->bna;
3547
3548        if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3549                unregister_netdev(netdev);
3550
3551        mutex_lock(&bnad->conf_mutex);
3552        bnad_ioceth_disable(bnad);
3553        del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3554        del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3555        del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3556        spin_lock_irqsave(&bnad->bna_lock, flags);
3557        bna_uninit(bna);
3558        spin_unlock_irqrestore(&bnad->bna_lock, flags);
3559
3560        bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3561        bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3562        bnad_mbox_irq_free(bnad);
3563        bnad_disable_msix(bnad);
3564        bnad_pci_uninit(pdev);
3565        mutex_unlock(&bnad->conf_mutex);
3566        bnad_remove_from_list(bnad);
3567        bnad_lock_uninit(bnad);
3568        /* Remove the debugfs node for this bnad */
3569        kfree(bnad->regdata);
3570        bnad_debugfs_uninit(bnad);
3571        bnad_uninit(bnad);
3572        free_netdev(netdev);
3573}
3574
3575static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3576        {
3577                PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3578                        PCI_DEVICE_ID_BROCADE_CT),
3579                .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3580                .class_mask =  0xffff00
3581        },
3582        {
3583                PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3584                        BFA_PCI_DEVICE_ID_CT2),
3585                .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3586                .class_mask =  0xffff00
3587        },
3588        {0,  },
3589};
3590
3591MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3592
3593static struct pci_driver bnad_pci_driver = {
3594        .name = BNAD_NAME,
3595        .id_table = bnad_pci_id_table,
3596        .probe = bnad_pci_probe,
3597        .remove = bnad_pci_remove,
3598};
3599
3600static int __init
3601bnad_module_init(void)
3602{
3603        int err;
3604
3605        pr_info("Brocade 10G Ethernet driver - version: %s\n",
3606                        BNAD_VERSION);
3607
3608        bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3609
3610        err = pci_register_driver(&bnad_pci_driver);
3611        if (err < 0) {
3612                pr_err("bna : PCI registration failed in module init "
3613                       "(%d)\n", err);
3614                return err;
3615        }
3616
3617        return 0;
3618}
3619
3620static void __exit
3621bnad_module_exit(void)
3622{
3623        pci_unregister_driver(&bnad_pci_driver);
3624        release_firmware(bfi_fw);
3625}
3626
3627module_init(bnad_module_init);
3628module_exit(bnad_module_exit);
3629
3630MODULE_AUTHOR("Brocade");
3631MODULE_LICENSE("GPL");
3632MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3633MODULE_VERSION(BNAD_VERSION);
3634MODULE_FIRMWARE(CNA_FW_FILE_CT);
3635MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3636