linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
<<
>>
Prefs
   1/* bnx2x_cmn.c: Broadcom Everest network driver.
   2 *
   3 * Copyright (c) 2007-2013 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  10 * Written by: Eliezer Tamir
  11 * Based on code from Michael Chan's bnx2 driver
  12 * UDP CSUM errata workaround by Arik Gendelman
  13 * Slowpath and fastpath rework by Vladislav Zolotarov
  14 * Statistics and Link management by Yitchak Gertner
  15 *
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/etherdevice.h>
  21#include <linux/if_vlan.h>
  22#include <linux/interrupt.h>
  23#include <linux/ip.h>
  24#include <net/tcp.h>
  25#include <net/ipv6.h>
  26#include <net/ip6_checksum.h>
  27#include <net/busy_poll.h>
  28#include <linux/prefetch.h>
  29#include "bnx2x_cmn.h"
  30#include "bnx2x_init.h"
  31#include "bnx2x_sp.h"
  32
  33/**
  34 * bnx2x_move_fp - move content of the fastpath structure.
  35 *
  36 * @bp:         driver handle
  37 * @from:       source FP index
  38 * @to:         destination FP index
  39 *
  40 * Makes sure the contents of the bp->fp[to].napi is kept
  41 * intact. This is done by first copying the napi struct from
  42 * the target to the source, and then mem copying the entire
  43 * source onto the target. Update txdata pointers and related
  44 * content.
  45 */
  46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
  47{
  48        struct bnx2x_fastpath *from_fp = &bp->fp[from];
  49        struct bnx2x_fastpath *to_fp = &bp->fp[to];
  50        struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
  51        struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
  52        struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
  53        struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
  54        int old_max_eth_txqs, new_max_eth_txqs;
  55        int old_txdata_index = 0, new_txdata_index = 0;
  56        struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
  57
  58        /* Copy the NAPI object as it has been already initialized */
  59        from_fp->napi = to_fp->napi;
  60
  61        /* Move bnx2x_fastpath contents */
  62        memcpy(to_fp, from_fp, sizeof(*to_fp));
  63        to_fp->index = to;
  64
  65        /* Retain the tpa_info of the original `to' version as we don't want
  66         * 2 FPs to contain the same tpa_info pointer.
  67         */
  68        to_fp->tpa_info = old_tpa_info;
  69
  70        /* move sp_objs contents as well, as their indices match fp ones */
  71        memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
  72
  73        /* move fp_stats contents as well, as their indices match fp ones */
  74        memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
  75
  76        /* Update txdata pointers in fp and move txdata content accordingly:
  77         * Each fp consumes 'max_cos' txdata structures, so the index should be
  78         * decremented by max_cos x delta.
  79         */
  80
  81        old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
  82        new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
  83                                (bp)->max_cos;
  84        if (from == FCOE_IDX(bp)) {
  85                old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
  86                new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
  87        }
  88
  89        memcpy(&bp->bnx2x_txq[new_txdata_index],
  90               &bp->bnx2x_txq[old_txdata_index],
  91               sizeof(struct bnx2x_fp_txdata));
  92        to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
  93}
  94
  95/**
  96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
  97 *
  98 * @bp:        driver handle
  99 * @buf:       character buffer to fill with the fw name
 100 * @buf_len:   length of the above buffer
 101 *
 102 */
 103void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
 104{
 105        if (IS_PF(bp)) {
 106                u8 phy_fw_ver[PHY_FW_VER_LEN];
 107
 108                phy_fw_ver[0] = '\0';
 109                bnx2x_get_ext_phy_fw_version(&bp->link_params,
 110                                             phy_fw_ver, PHY_FW_VER_LEN);
 111                strlcpy(buf, bp->fw_ver, buf_len);
 112                snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
 113                         "bc %d.%d.%d%s%s",
 114                         (bp->common.bc_ver & 0xff0000) >> 16,
 115                         (bp->common.bc_ver & 0xff00) >> 8,
 116                         (bp->common.bc_ver & 0xff),
 117                         ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
 118        } else {
 119                bnx2x_vf_fill_fw_str(bp, buf, buf_len);
 120        }
 121}
 122
 123/**
 124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 125 *
 126 * @bp: driver handle
 127 * @delta:      number of eth queues which were not allocated
 128 */
 129static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
 130{
 131        int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
 132
 133        /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
 134         * backward along the array could cause memory to be overridden
 135         */
 136        for (cos = 1; cos < bp->max_cos; cos++) {
 137                for (i = 0; i < old_eth_num - delta; i++) {
 138                        struct bnx2x_fastpath *fp = &bp->fp[i];
 139                        int new_idx = cos * (old_eth_num - delta) + i;
 140
 141                        memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
 142                               sizeof(struct bnx2x_fp_txdata));
 143                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
 144                }
 145        }
 146}
 147
 148int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 149
 150/* free skb in the packet ring at pos idx
 151 * return idx of last bd freed
 152 */
 153static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 154                             u16 idx, unsigned int *pkts_compl,
 155                             unsigned int *bytes_compl)
 156{
 157        struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 158        struct eth_tx_start_bd *tx_start_bd;
 159        struct eth_tx_bd *tx_data_bd;
 160        struct sk_buff *skb = tx_buf->skb;
 161        u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 162        int nbd;
 163
 164        /* prefetch skb end pointer to speedup dev_kfree_skb() */
 165        prefetch(&skb->end);
 166
 167        DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
 168           txdata->txq_index, idx, tx_buf, skb);
 169
 170        /* unmap first bd */
 171        tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
 172        dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 173                         BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
 174
 175        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 176#ifdef BNX2X_STOP_ON_ERROR
 177        if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
 178                BNX2X_ERR("BAD nbd!\n");
 179                bnx2x_panic();
 180        }
 181#endif
 182        new_cons = nbd + tx_buf->first_bd;
 183
 184        /* Get the next bd */
 185        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 186
 187        /* Skip a parse bd... */
 188        --nbd;
 189        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 190
 191        /* ...and the TSO split header bd since they have no mapping */
 192        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 193                --nbd;
 194                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 195        }
 196
 197        /* now free frags */
 198        while (nbd > 0) {
 199
 200                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 201                dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
 202                               BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 203                if (--nbd)
 204                        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 205        }
 206
 207        /* release skb */
 208        WARN_ON(!skb);
 209        if (likely(skb)) {
 210                (*pkts_compl)++;
 211                (*bytes_compl) += skb->len;
 212        }
 213
 214        dev_kfree_skb_any(skb);
 215        tx_buf->first_bd = 0;
 216        tx_buf->skb = NULL;
 217
 218        return new_cons;
 219}
 220
 221int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 222{
 223        struct netdev_queue *txq;
 224        u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
 225        unsigned int pkts_compl = 0, bytes_compl = 0;
 226
 227#ifdef BNX2X_STOP_ON_ERROR
 228        if (unlikely(bp->panic))
 229                return -1;
 230#endif
 231
 232        txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 233        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 234        sw_cons = txdata->tx_pkt_cons;
 235
 236        while (sw_cons != hw_cons) {
 237                u16 pkt_cons;
 238
 239                pkt_cons = TX_BD(sw_cons);
 240
 241                DP(NETIF_MSG_TX_DONE,
 242                   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
 243                   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 244
 245                bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
 246                                            &pkts_compl, &bytes_compl);
 247
 248                sw_cons++;
 249        }
 250
 251        netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
 252
 253        txdata->tx_pkt_cons = sw_cons;
 254        txdata->tx_bd_cons = bd_cons;
 255
 256        /* Need to make the tx_bd_cons update visible to start_xmit()
 257         * before checking for netif_tx_queue_stopped().  Without the
 258         * memory barrier, there is a small possibility that
 259         * start_xmit() will miss it and cause the queue to be stopped
 260         * forever.
 261         * On the other hand we need an rmb() here to ensure the proper
 262         * ordering of bit testing in the following
 263         * netif_tx_queue_stopped(txq) call.
 264         */
 265        smp_mb();
 266
 267        if (unlikely(netif_tx_queue_stopped(txq))) {
 268                /* Taking tx_lock() is needed to prevent re-enabling the queue
 269                 * while it's empty. This could have happen if rx_action() gets
 270                 * suspended in bnx2x_tx_int() after the condition before
 271                 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
 272                 *
 273                 * stops the queue->sees fresh tx_bd_cons->releases the queue->
 274                 * sends some packets consuming the whole queue again->
 275                 * stops the queue
 276                 */
 277
 278                __netif_tx_lock(txq, smp_processor_id());
 279
 280                if ((netif_tx_queue_stopped(txq)) &&
 281                    (bp->state == BNX2X_STATE_OPEN) &&
 282                    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
 283                        netif_tx_wake_queue(txq);
 284
 285                __netif_tx_unlock(txq);
 286        }
 287        return 0;
 288}
 289
 290static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
 291                                             u16 idx)
 292{
 293        u16 last_max = fp->last_max_sge;
 294
 295        if (SUB_S16(idx, last_max) > 0)
 296                fp->last_max_sge = idx;
 297}
 298
 299static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 300                                         u16 sge_len,
 301                                         struct eth_end_agg_rx_cqe *cqe)
 302{
 303        struct bnx2x *bp = fp->bp;
 304        u16 last_max, last_elem, first_elem;
 305        u16 delta = 0;
 306        u16 i;
 307
 308        if (!sge_len)
 309                return;
 310
 311        /* First mark all used pages */
 312        for (i = 0; i < sge_len; i++)
 313                BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 314                        RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
 315
 316        DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
 317           sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 318
 319        /* Here we assume that the last SGE index is the biggest */
 320        prefetch((void *)(fp->sge_mask));
 321        bnx2x_update_last_max_sge(fp,
 322                le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 323
 324        last_max = RX_SGE(fp->last_max_sge);
 325        last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
 326        first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 327
 328        /* If ring is not full */
 329        if (last_elem + 1 != first_elem)
 330                last_elem++;
 331
 332        /* Now update the prod */
 333        for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
 334                if (likely(fp->sge_mask[i]))
 335                        break;
 336
 337                fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
 338                delta += BIT_VEC64_ELEM_SZ;
 339        }
 340
 341        if (delta > 0) {
 342                fp->rx_sge_prod += delta;
 343                /* clear page-end entries */
 344                bnx2x_clear_sge_mask_next_elems(fp);
 345        }
 346
 347        DP(NETIF_MSG_RX_STATUS,
 348           "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
 349           fp->last_max_sge, fp->rx_sge_prod);
 350}
 351
 352/* Get Toeplitz hash value in the skb using the value from the
 353 * CQE (calculated by HW).
 354 */
 355static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
 356                            const struct eth_fast_path_rx_cqe *cqe,
 357                            bool *l4_rxhash)
 358{
 359        /* Get Toeplitz hash from CQE */
 360        if ((bp->dev->features & NETIF_F_RXHASH) &&
 361            (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
 362                enum eth_rss_hash_type htype;
 363
 364                htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
 365                *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
 366                             (htype == TCP_IPV6_HASH_TYPE);
 367                return le32_to_cpu(cqe->rss_hash_result);
 368        }
 369        *l4_rxhash = false;
 370        return 0;
 371}
 372
 373static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 374                            u16 cons, u16 prod,
 375                            struct eth_fast_path_rx_cqe *cqe)
 376{
 377        struct bnx2x *bp = fp->bp;
 378        struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 379        struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 380        struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 381        dma_addr_t mapping;
 382        struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 383        struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 384
 385        /* print error if current state != stop */
 386        if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 387                BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 388
 389        /* Try to map an empty data buffer from the aggregation info  */
 390        mapping = dma_map_single(&bp->pdev->dev,
 391                                 first_buf->data + NET_SKB_PAD,
 392                                 fp->rx_buf_size, DMA_FROM_DEVICE);
 393        /*
 394         *  ...if it fails - move the skb from the consumer to the producer
 395         *  and set the current aggregation state as ERROR to drop it
 396         *  when TPA_STOP arrives.
 397         */
 398
 399        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 400                /* Move the BD from the consumer to the producer */
 401                bnx2x_reuse_rx_data(fp, cons, prod);
 402                tpa_info->tpa_state = BNX2X_TPA_ERROR;
 403                return;
 404        }
 405
 406        /* move empty data from pool to prod */
 407        prod_rx_buf->data = first_buf->data;
 408        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 409        /* point prod_bd to new data */
 410        prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 411        prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 412
 413        /* move partial skb from cons to pool (don't unmap yet) */
 414        *first_buf = *cons_rx_buf;
 415
 416        /* mark bin state as START */
 417        tpa_info->parsing_flags =
 418                le16_to_cpu(cqe->pars_flags.flags);
 419        tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
 420        tpa_info->tpa_state = BNX2X_TPA_START;
 421        tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 422        tpa_info->placement_offset = cqe->placement_offset;
 423        tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
 424        if (fp->mode == TPA_MODE_GRO) {
 425                u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
 426                tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
 427                tpa_info->gro_size = gro_size;
 428        }
 429
 430#ifdef BNX2X_STOP_ON_ERROR
 431        fp->tpa_queue_used |= (1 << queue);
 432#ifdef _ASM_GENERIC_INT_L64_H
 433        DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
 434#else
 435        DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
 436#endif
 437           fp->tpa_queue_used);
 438#endif
 439}
 440
 441/* Timestamp option length allowed for TPA aggregation:
 442 *
 443 *              nop nop kind length echo val
 444 */
 445#define TPA_TSTAMP_OPT_LEN      12
 446/**
 447 * bnx2x_set_gro_params - compute GRO values
 448 *
 449 * @skb:                packet skb
 450 * @parsing_flags:      parsing flags from the START CQE
 451 * @len_on_bd:          total length of the first packet for the
 452 *                      aggregation.
 453 * @pkt_len:            length of all segments
 454 *
 455 * Approximate value of the MSS for this aggregation calculated using
 456 * the first packet of it.
 457 * Compute number of aggregated segments, and gso_type.
 458 */
 459static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
 460                                 u16 len_on_bd, unsigned int pkt_len,
 461                                 u16 num_of_coalesced_segs)
 462{
 463        /* TPA aggregation won't have either IP options or TCP options
 464         * other than timestamp or IPv6 extension headers.
 465         */
 466        u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
 467
 468        if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
 469            PRS_FLAG_OVERETH_IPV6) {
 470                hdrs_len += sizeof(struct ipv6hdr);
 471                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 472        } else {
 473                hdrs_len += sizeof(struct iphdr);
 474                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 475        }
 476
 477        /* Check if there was a TCP timestamp, if there is it's will
 478         * always be 12 bytes length: nop nop kind length echo val.
 479         *
 480         * Otherwise FW would close the aggregation.
 481         */
 482        if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
 483                hdrs_len += TPA_TSTAMP_OPT_LEN;
 484
 485        skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
 486
 487        /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
 488         * to skb_shinfo(skb)->gso_segs
 489         */
 490        NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
 491}
 492
 493static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
 494                              struct bnx2x_fastpath *fp, u16 index)
 495{
 496        struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
 497        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 498        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 499        dma_addr_t mapping;
 500
 501        if (unlikely(page == NULL)) {
 502                BNX2X_ERR("Can't alloc sge\n");
 503                return -ENOMEM;
 504        }
 505
 506        mapping = dma_map_page(&bp->pdev->dev, page, 0,
 507                               SGE_PAGES, DMA_FROM_DEVICE);
 508        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 509                __free_pages(page, PAGES_PER_SGE_SHIFT);
 510                BNX2X_ERR("Can't map sge\n");
 511                return -ENOMEM;
 512        }
 513
 514        sw_buf->page = page;
 515        dma_unmap_addr_set(sw_buf, mapping, mapping);
 516
 517        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 518        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 519
 520        return 0;
 521}
 522
 523static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 524                               struct bnx2x_agg_info *tpa_info,
 525                               u16 pages,
 526                               struct sk_buff *skb,
 527                               struct eth_end_agg_rx_cqe *cqe,
 528                               u16 cqe_idx)
 529{
 530        struct sw_rx_page *rx_pg, old_rx_pg;
 531        u32 i, frag_len, frag_size;
 532        int err, j, frag_id = 0;
 533        u16 len_on_bd = tpa_info->len_on_bd;
 534        u16 full_page = 0, gro_size = 0;
 535
 536        frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 537
 538        if (fp->mode == TPA_MODE_GRO) {
 539                gro_size = tpa_info->gro_size;
 540                full_page = tpa_info->full_page;
 541        }
 542
 543        /* This is needed in order to enable forwarding support */
 544        if (frag_size)
 545                bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
 546                                     le16_to_cpu(cqe->pkt_len),
 547                                     le16_to_cpu(cqe->num_of_coalesced_segs));
 548
 549#ifdef BNX2X_STOP_ON_ERROR
 550        if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
 551                BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 552                          pages, cqe_idx);
 553                BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 554                bnx2x_panic();
 555                return -EINVAL;
 556        }
 557#endif
 558
 559        /* Run through the SGL and compose the fragmented skb */
 560        for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
 561                u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 562
 563                /* FW gives the indices of the SGE as if the ring is an array
 564                   (meaning that "next" element will consume 2 indices) */
 565                if (fp->mode == TPA_MODE_GRO)
 566                        frag_len = min_t(u32, frag_size, (u32)full_page);
 567                else /* LRO */
 568                        frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
 569
 570                rx_pg = &fp->rx_page_ring[sge_idx];
 571                old_rx_pg = *rx_pg;
 572
 573                /* If we fail to allocate a substitute page, we simply stop
 574                   where we are and drop the whole packet */
 575                err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
 576                if (unlikely(err)) {
 577                        bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
 578                        return err;
 579                }
 580
 581                /* Unmap the page as we're going to pass it to the stack */
 582                dma_unmap_page(&bp->pdev->dev,
 583                               dma_unmap_addr(&old_rx_pg, mapping),
 584                               SGE_PAGES, DMA_FROM_DEVICE);
 585                /* Add one frag and update the appropriate fields in the skb */
 586                if (fp->mode == TPA_MODE_LRO)
 587                        skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
 588                else { /* GRO */
 589                        int rem;
 590                        int offset = 0;
 591                        for (rem = frag_len; rem > 0; rem -= gro_size) {
 592                                int len = rem > gro_size ? gro_size : rem;
 593                                skb_fill_page_desc(skb, frag_id++,
 594                                                   old_rx_pg.page, offset, len);
 595                                if (offset)
 596                                        get_page(old_rx_pg.page);
 597                                offset += len;
 598                        }
 599                }
 600
 601                skb->data_len += frag_len;
 602                skb->truesize += SGE_PAGES;
 603                skb->len += frag_len;
 604
 605                frag_size -= frag_len;
 606        }
 607
 608        return 0;
 609}
 610
 611static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 612{
 613        if (fp->rx_frag_size)
 614                put_page(virt_to_head_page(data));
 615        else
 616                kfree(data);
 617}
 618
 619static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
 620{
 621        if (fp->rx_frag_size)
 622                return netdev_alloc_frag(fp->rx_frag_size);
 623
 624        return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
 625}
 626
 627#ifdef CONFIG_INET
 628static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
 629{
 630        const struct iphdr *iph = ip_hdr(skb);
 631        struct tcphdr *th;
 632
 633        skb_set_transport_header(skb, sizeof(struct iphdr));
 634        th = tcp_hdr(skb);
 635
 636        th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
 637                                  iph->saddr, iph->daddr, 0);
 638}
 639
 640static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
 641{
 642        struct ipv6hdr *iph = ipv6_hdr(skb);
 643        struct tcphdr *th;
 644
 645        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 646        th = tcp_hdr(skb);
 647
 648        th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 649                                  &iph->saddr, &iph->daddr, 0);
 650}
 651
 652static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
 653                            void (*gro_func)(struct bnx2x*, struct sk_buff*))
 654{
 655        skb_set_network_header(skb, 0);
 656        gro_func(bp, skb);
 657        tcp_gro_complete(skb);
 658}
 659#endif
 660
 661static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 662                               struct sk_buff *skb)
 663{
 664#ifdef CONFIG_INET
 665        if (skb_shinfo(skb)->gso_size) {
 666                switch (be16_to_cpu(skb->protocol)) {
 667                case ETH_P_IP:
 668                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
 669                        break;
 670                case ETH_P_IPV6:
 671                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
 672                        break;
 673                default:
 674                        BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
 675                                  be16_to_cpu(skb->protocol));
 676                }
 677        }
 678#endif
 679        napi_gro_receive(&fp->napi, skb);
 680}
 681
 682static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 683                           struct bnx2x_agg_info *tpa_info,
 684                           u16 pages,
 685                           struct eth_end_agg_rx_cqe *cqe,
 686                           u16 cqe_idx)
 687{
 688        struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 689        u8 pad = tpa_info->placement_offset;
 690        u16 len = tpa_info->len_on_bd;
 691        struct sk_buff *skb = NULL;
 692        u8 *new_data, *data = rx_buf->data;
 693        u8 old_tpa_state = tpa_info->tpa_state;
 694
 695        tpa_info->tpa_state = BNX2X_TPA_STOP;
 696
 697        /* If we there was an error during the handling of the TPA_START -
 698         * drop this aggregation.
 699         */
 700        if (old_tpa_state == BNX2X_TPA_ERROR)
 701                goto drop;
 702
 703        /* Try to allocate the new data */
 704        new_data = bnx2x_frag_alloc(fp);
 705        /* Unmap skb in the pool anyway, as we are going to change
 706           pool entry status to BNX2X_TPA_STOP even if new skb allocation
 707           fails. */
 708        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 709                         fp->rx_buf_size, DMA_FROM_DEVICE);
 710        if (likely(new_data))
 711                skb = build_skb(data, fp->rx_frag_size);
 712
 713        if (likely(skb)) {
 714#ifdef BNX2X_STOP_ON_ERROR
 715                if (pad + len > fp->rx_buf_size) {
 716                        BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
 717                                  pad, len, fp->rx_buf_size);
 718                        bnx2x_panic();
 719                        return;
 720                }
 721#endif
 722
 723                skb_reserve(skb, pad + NET_SKB_PAD);
 724                skb_put(skb, len);
 725                skb->rxhash = tpa_info->rxhash;
 726                skb->l4_rxhash = tpa_info->l4_rxhash;
 727
 728                skb->protocol = eth_type_trans(skb, bp->dev);
 729                skb->ip_summed = CHECKSUM_UNNECESSARY;
 730
 731                if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
 732                                         skb, cqe, cqe_idx)) {
 733                        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 734                                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
 735                        bnx2x_gro_receive(bp, fp, skb);
 736                } else {
 737                        DP(NETIF_MSG_RX_STATUS,
 738                           "Failed to allocate new pages - dropping packet!\n");
 739                        dev_kfree_skb_any(skb);
 740                }
 741
 742                /* put new data in bin */
 743                rx_buf->data = new_data;
 744
 745                return;
 746        }
 747        bnx2x_frag_free(fp, new_data);
 748drop:
 749        /* drop the packet and keep the buffer in the bin */
 750        DP(NETIF_MSG_RX_STATUS,
 751           "Failed to allocate or map a new skb - dropping packet!\n");
 752        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
 753}
 754
 755static int bnx2x_alloc_rx_data(struct bnx2x *bp,
 756                               struct bnx2x_fastpath *fp, u16 index)
 757{
 758        u8 *data;
 759        struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
 760        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 761        dma_addr_t mapping;
 762
 763        data = bnx2x_frag_alloc(fp);
 764        if (unlikely(data == NULL))
 765                return -ENOMEM;
 766
 767        mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
 768                                 fp->rx_buf_size,
 769                                 DMA_FROM_DEVICE);
 770        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 771                bnx2x_frag_free(fp, data);
 772                BNX2X_ERR("Can't map rx data\n");
 773                return -ENOMEM;
 774        }
 775
 776        rx_buf->data = data;
 777        dma_unmap_addr_set(rx_buf, mapping, mapping);
 778
 779        rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 780        rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 781
 782        return 0;
 783}
 784
 785static
 786void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
 787                                 struct bnx2x_fastpath *fp,
 788                                 struct bnx2x_eth_q_stats *qstats)
 789{
 790        /* Do nothing if no L4 csum validation was done.
 791         * We do not check whether IP csum was validated. For IPv4 we assume
 792         * that if the card got as far as validating the L4 csum, it also
 793         * validated the IP csum. IPv6 has no IP csum.
 794         */
 795        if (cqe->fast_path_cqe.status_flags &
 796            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
 797                return;
 798
 799        /* If L4 validation was done, check if an error was found. */
 800
 801        if (cqe->fast_path_cqe.type_error_flags &
 802            (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
 803             ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
 804                qstats->hw_csum_err++;
 805        else
 806                skb->ip_summed = CHECKSUM_UNNECESSARY;
 807}
 808
 809int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 810{
 811        struct bnx2x *bp = fp->bp;
 812        u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 813        u16 sw_comp_cons, sw_comp_prod;
 814        int rx_pkt = 0;
 815        union eth_rx_cqe *cqe;
 816        struct eth_fast_path_rx_cqe *cqe_fp;
 817
 818#ifdef BNX2X_STOP_ON_ERROR
 819        if (unlikely(bp->panic))
 820                return 0;
 821#endif
 822
 823        bd_cons = fp->rx_bd_cons;
 824        bd_prod = fp->rx_bd_prod;
 825        bd_prod_fw = bd_prod;
 826        sw_comp_cons = fp->rx_comp_cons;
 827        sw_comp_prod = fp->rx_comp_prod;
 828
 829        comp_ring_cons = RCQ_BD(sw_comp_cons);
 830        cqe = &fp->rx_comp_ring[comp_ring_cons];
 831        cqe_fp = &cqe->fast_path_cqe;
 832
 833        DP(NETIF_MSG_RX_STATUS,
 834           "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
 835
 836        while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
 837                struct sw_rx_bd *rx_buf = NULL;
 838                struct sk_buff *skb;
 839                u8 cqe_fp_flags;
 840                enum eth_rx_cqe_type cqe_fp_type;
 841                u16 len, pad, queue;
 842                u8 *data;
 843                bool l4_rxhash;
 844
 845#ifdef BNX2X_STOP_ON_ERROR
 846                if (unlikely(bp->panic))
 847                        return 0;
 848#endif
 849
 850                bd_prod = RX_BD(bd_prod);
 851                bd_cons = RX_BD(bd_cons);
 852
 853                cqe_fp_flags = cqe_fp->type_error_flags;
 854                cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 855
 856                DP(NETIF_MSG_RX_STATUS,
 857                   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
 858                   CQE_TYPE(cqe_fp_flags),
 859                   cqe_fp_flags, cqe_fp->status_flags,
 860                   le32_to_cpu(cqe_fp->rss_hash_result),
 861                   le16_to_cpu(cqe_fp->vlan_tag),
 862                   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
 863
 864                /* is this a slowpath msg? */
 865                if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 866                        bnx2x_sp_event(fp, cqe);
 867                        goto next_cqe;
 868                }
 869
 870                rx_buf = &fp->rx_buf_ring[bd_cons];
 871                data = rx_buf->data;
 872
 873                if (!CQE_TYPE_FAST(cqe_fp_type)) {
 874                        struct bnx2x_agg_info *tpa_info;
 875                        u16 frag_size, pages;
 876#ifdef BNX2X_STOP_ON_ERROR
 877                        /* sanity check */
 878                        if (fp->disable_tpa &&
 879                            (CQE_TYPE_START(cqe_fp_type) ||
 880                             CQE_TYPE_STOP(cqe_fp_type)))
 881                                BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
 882                                          CQE_TYPE(cqe_fp_type));
 883#endif
 884
 885                        if (CQE_TYPE_START(cqe_fp_type)) {
 886                                u16 queue = cqe_fp->queue_index;
 887                                DP(NETIF_MSG_RX_STATUS,
 888                                   "calling tpa_start on queue %d\n",
 889                                   queue);
 890
 891                                bnx2x_tpa_start(fp, queue,
 892                                                bd_cons, bd_prod,
 893                                                cqe_fp);
 894
 895                                goto next_rx;
 896                        }
 897                        queue = cqe->end_agg_cqe.queue_index;
 898                        tpa_info = &fp->tpa_info[queue];
 899                        DP(NETIF_MSG_RX_STATUS,
 900                           "calling tpa_stop on queue %d\n",
 901                           queue);
 902
 903                        frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
 904                                    tpa_info->len_on_bd;
 905
 906                        if (fp->mode == TPA_MODE_GRO)
 907                                pages = (frag_size + tpa_info->full_page - 1) /
 908                                         tpa_info->full_page;
 909                        else
 910                                pages = SGE_PAGE_ALIGN(frag_size) >>
 911                                        SGE_PAGE_SHIFT;
 912
 913                        bnx2x_tpa_stop(bp, fp, tpa_info, pages,
 914                                       &cqe->end_agg_cqe, comp_ring_cons);
 915#ifdef BNX2X_STOP_ON_ERROR
 916                        if (bp->panic)
 917                                return 0;
 918#endif
 919
 920                        bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
 921                        goto next_cqe;
 922                }
 923                /* non TPA */
 924                len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
 925                pad = cqe_fp->placement_offset;
 926                dma_sync_single_for_cpu(&bp->pdev->dev,
 927                                        dma_unmap_addr(rx_buf, mapping),
 928                                        pad + RX_COPY_THRESH,
 929                                        DMA_FROM_DEVICE);
 930                pad += NET_SKB_PAD;
 931                prefetch(data + pad); /* speedup eth_type_trans() */
 932                /* is this an error packet? */
 933                if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
 934                        DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
 935                           "ERROR  flags %x  rx packet %u\n",
 936                           cqe_fp_flags, sw_comp_cons);
 937                        bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
 938                        goto reuse_rx;
 939                }
 940
 941                /* Since we don't have a jumbo ring
 942                 * copy small packets if mtu > 1500
 943                 */
 944                if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
 945                    (len <= RX_COPY_THRESH)) {
 946                        skb = netdev_alloc_skb_ip_align(bp->dev, len);
 947                        if (skb == NULL) {
 948                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
 949                                   "ERROR  packet dropped because of alloc failure\n");
 950                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
 951                                goto reuse_rx;
 952                        }
 953                        memcpy(skb->data, data + pad, len);
 954                        bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
 955                } else {
 956                        if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
 957                                dma_unmap_single(&bp->pdev->dev,
 958                                                 dma_unmap_addr(rx_buf, mapping),
 959                                                 fp->rx_buf_size,
 960                                                 DMA_FROM_DEVICE);
 961                                skb = build_skb(data, fp->rx_frag_size);
 962                                if (unlikely(!skb)) {
 963                                        bnx2x_frag_free(fp, data);
 964                                        bnx2x_fp_qstats(bp, fp)->
 965                                                        rx_skb_alloc_failed++;
 966                                        goto next_rx;
 967                                }
 968                                skb_reserve(skb, pad);
 969                        } else {
 970                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
 971                                   "ERROR  packet dropped because of alloc failure\n");
 972                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
 973reuse_rx:
 974                                bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
 975                                goto next_rx;
 976                        }
 977                }
 978
 979                skb_put(skb, len);
 980                skb->protocol = eth_type_trans(skb, bp->dev);
 981
 982                /* Set Toeplitz hash for a none-LRO skb */
 983                skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
 984                skb->l4_rxhash = l4_rxhash;
 985
 986                skb_checksum_none_assert(skb);
 987
 988                if (bp->dev->features & NETIF_F_RXCSUM)
 989                        bnx2x_csum_validate(skb, cqe, fp,
 990                                            bnx2x_fp_qstats(bp, fp));
 991
 992                skb_record_rx_queue(skb, fp->rx_queue);
 993
 994                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
 995                    PARSING_FLAGS_VLAN)
 996                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 997                                               le16_to_cpu(cqe_fp->vlan_tag));
 998
 999                skb_mark_napi_id(skb, &fp->napi);
1000
1001                if (bnx2x_fp_ll_polling(fp))
1002                        netif_receive_skb(skb);
1003                else
1004                        napi_gro_receive(&fp->napi, skb);
1005next_rx:
1006                rx_buf->data = NULL;
1007
1008                bd_cons = NEXT_RX_IDX(bd_cons);
1009                bd_prod = NEXT_RX_IDX(bd_prod);
1010                bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1011                rx_pkt++;
1012next_cqe:
1013                sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1014                sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1015
1016                /* mark CQE as free */
1017                BNX2X_SEED_CQE(cqe_fp);
1018
1019                if (rx_pkt == budget)
1020                        break;
1021
1022                comp_ring_cons = RCQ_BD(sw_comp_cons);
1023                cqe = &fp->rx_comp_ring[comp_ring_cons];
1024                cqe_fp = &cqe->fast_path_cqe;
1025        } /* while */
1026
1027        fp->rx_bd_cons = bd_cons;
1028        fp->rx_bd_prod = bd_prod_fw;
1029        fp->rx_comp_cons = sw_comp_cons;
1030        fp->rx_comp_prod = sw_comp_prod;
1031
1032        /* Update producers */
1033        bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1034                             fp->rx_sge_prod);
1035
1036        fp->rx_pkt += rx_pkt;
1037        fp->rx_calls++;
1038
1039        return rx_pkt;
1040}
1041
1042static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1043{
1044        struct bnx2x_fastpath *fp = fp_cookie;
1045        struct bnx2x *bp = fp->bp;
1046        u8 cos;
1047
1048        DP(NETIF_MSG_INTR,
1049           "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1050           fp->index, fp->fw_sb_id, fp->igu_sb_id);
1051
1052        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1053
1054#ifdef BNX2X_STOP_ON_ERROR
1055        if (unlikely(bp->panic))
1056                return IRQ_HANDLED;
1057#endif
1058
1059        /* Handle Rx and Tx according to MSI-X vector */
1060        for_each_cos_in_tx_queue(fp, cos)
1061                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1062
1063        prefetch(&fp->sb_running_index[SM_RX_ID]);
1064        napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1065
1066        return IRQ_HANDLED;
1067}
1068
1069/* HW Lock for shared dual port PHYs */
1070void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1071{
1072        mutex_lock(&bp->port.phy_mutex);
1073
1074        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1075}
1076
1077void bnx2x_release_phy_lock(struct bnx2x *bp)
1078{
1079        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1080
1081        mutex_unlock(&bp->port.phy_mutex);
1082}
1083
1084/* calculates MF speed according to current linespeed and MF configuration */
1085u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1086{
1087        u16 line_speed = bp->link_vars.line_speed;
1088        if (IS_MF(bp)) {
1089                u16 maxCfg = bnx2x_extract_max_cfg(bp,
1090                                                   bp->mf_config[BP_VN(bp)]);
1091
1092                /* Calculate the current MAX line speed limit for the MF
1093                 * devices
1094                 */
1095                if (IS_MF_SI(bp))
1096                        line_speed = (line_speed * maxCfg) / 100;
1097                else { /* SD mode */
1098                        u16 vn_max_rate = maxCfg * 100;
1099
1100                        if (vn_max_rate < line_speed)
1101                                line_speed = vn_max_rate;
1102                }
1103        }
1104
1105        return line_speed;
1106}
1107
1108/**
1109 * bnx2x_fill_report_data - fill link report data to report
1110 *
1111 * @bp:         driver handle
1112 * @data:       link state to update
1113 *
1114 * It uses a none-atomic bit operations because is called under the mutex.
1115 */
1116static void bnx2x_fill_report_data(struct bnx2x *bp,
1117                                   struct bnx2x_link_report_data *data)
1118{
1119        u16 line_speed = bnx2x_get_mf_speed(bp);
1120
1121        memset(data, 0, sizeof(*data));
1122
1123        /* Fill the report data: effective line speed */
1124        data->line_speed = line_speed;
1125
1126        /* Link is down */
1127        if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1128                __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1129                          &data->link_report_flags);
1130
1131        /* Full DUPLEX */
1132        if (bp->link_vars.duplex == DUPLEX_FULL)
1133                __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1134
1135        /* Rx Flow Control is ON */
1136        if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1137                __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1138
1139        /* Tx Flow Control is ON */
1140        if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1141                __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1142}
1143
1144/**
1145 * bnx2x_link_report - report link status to OS.
1146 *
1147 * @bp:         driver handle
1148 *
1149 * Calls the __bnx2x_link_report() under the same locking scheme
1150 * as a link/PHY state managing code to ensure a consistent link
1151 * reporting.
1152 */
1153
1154void bnx2x_link_report(struct bnx2x *bp)
1155{
1156        bnx2x_acquire_phy_lock(bp);
1157        __bnx2x_link_report(bp);
1158        bnx2x_release_phy_lock(bp);
1159}
1160
1161/**
1162 * __bnx2x_link_report - report link status to OS.
1163 *
1164 * @bp:         driver handle
1165 *
1166 * None atomic implementation.
1167 * Should be called under the phy_lock.
1168 */
1169void __bnx2x_link_report(struct bnx2x *bp)
1170{
1171        struct bnx2x_link_report_data cur_data;
1172
1173        /* reread mf_cfg */
1174        if (IS_PF(bp) && !CHIP_IS_E1(bp))
1175                bnx2x_read_mf_cfg(bp);
1176
1177        /* Read the current link report info */
1178        bnx2x_fill_report_data(bp, &cur_data);
1179
1180        /* Don't report link down or exactly the same link status twice */
1181        if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1182            (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1183                      &bp->last_reported_link.link_report_flags) &&
1184             test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1185                      &cur_data.link_report_flags)))
1186                return;
1187
1188        bp->link_cnt++;
1189
1190        /* We are going to report a new link parameters now -
1191         * remember the current data for the next time.
1192         */
1193        memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1194
1195        if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1196                     &cur_data.link_report_flags)) {
1197                netif_carrier_off(bp->dev);
1198                netdev_err(bp->dev, "NIC Link is Down\n");
1199                return;
1200        } else {
1201                const char *duplex;
1202                const char *flow;
1203
1204                netif_carrier_on(bp->dev);
1205
1206                if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1207                                       &cur_data.link_report_flags))
1208                        duplex = "full";
1209                else
1210                        duplex = "half";
1211
1212                /* Handle the FC at the end so that only these flags would be
1213                 * possibly set. This way we may easily check if there is no FC
1214                 * enabled.
1215                 */
1216                if (cur_data.link_report_flags) {
1217                        if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1218                                     &cur_data.link_report_flags)) {
1219                                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1220                                     &cur_data.link_report_flags))
1221                                        flow = "ON - receive & transmit";
1222                                else
1223                                        flow = "ON - receive";
1224                        } else {
1225                                flow = "ON - transmit";
1226                        }
1227                } else {
1228                        flow = "none";
1229                }
1230                netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1231                            cur_data.line_speed, duplex, flow);
1232        }
1233}
1234
1235static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1236{
1237        int i;
1238
1239        for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1240                struct eth_rx_sge *sge;
1241
1242                sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1243                sge->addr_hi =
1244                        cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1245                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1246
1247                sge->addr_lo =
1248                        cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1249                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1250        }
1251}
1252
1253static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1254                                struct bnx2x_fastpath *fp, int last)
1255{
1256        int i;
1257
1258        for (i = 0; i < last; i++) {
1259                struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1260                struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1261                u8 *data = first_buf->data;
1262
1263                if (data == NULL) {
1264                        DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1265                        continue;
1266                }
1267                if (tpa_info->tpa_state == BNX2X_TPA_START)
1268                        dma_unmap_single(&bp->pdev->dev,
1269                                         dma_unmap_addr(first_buf, mapping),
1270                                         fp->rx_buf_size, DMA_FROM_DEVICE);
1271                bnx2x_frag_free(fp, data);
1272                first_buf->data = NULL;
1273        }
1274}
1275
1276void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1277{
1278        int j;
1279
1280        for_each_rx_queue_cnic(bp, j) {
1281                struct bnx2x_fastpath *fp = &bp->fp[j];
1282
1283                fp->rx_bd_cons = 0;
1284
1285                /* Activate BD ring */
1286                /* Warning!
1287                 * this will generate an interrupt (to the TSTORM)
1288                 * must only be done after chip is initialized
1289                 */
1290                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1291                                     fp->rx_sge_prod);
1292        }
1293}
1294
1295void bnx2x_init_rx_rings(struct bnx2x *bp)
1296{
1297        int func = BP_FUNC(bp);
1298        u16 ring_prod;
1299        int i, j;
1300
1301        /* Allocate TPA resources */
1302        for_each_eth_queue(bp, j) {
1303                struct bnx2x_fastpath *fp = &bp->fp[j];
1304
1305                DP(NETIF_MSG_IFUP,
1306                   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1307
1308                if (!fp->disable_tpa) {
1309                        /* Fill the per-aggregation pool */
1310                        for (i = 0; i < MAX_AGG_QS(bp); i++) {
1311                                struct bnx2x_agg_info *tpa_info =
1312                                        &fp->tpa_info[i];
1313                                struct sw_rx_bd *first_buf =
1314                                        &tpa_info->first_buf;
1315
1316                                first_buf->data = bnx2x_frag_alloc(fp);
1317                                if (!first_buf->data) {
1318                                        BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1319                                                  j);
1320                                        bnx2x_free_tpa_pool(bp, fp, i);
1321                                        fp->disable_tpa = 1;
1322                                        break;
1323                                }
1324                                dma_unmap_addr_set(first_buf, mapping, 0);
1325                                tpa_info->tpa_state = BNX2X_TPA_STOP;
1326                        }
1327
1328                        /* "next page" elements initialization */
1329                        bnx2x_set_next_page_sgl(fp);
1330
1331                        /* set SGEs bit mask */
1332                        bnx2x_init_sge_ring_bit_mask(fp);
1333
1334                        /* Allocate SGEs and initialize the ring elements */
1335                        for (i = 0, ring_prod = 0;
1336                             i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1337
1338                                if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1339                                        BNX2X_ERR("was only able to allocate %d rx sges\n",
1340                                                  i);
1341                                        BNX2X_ERR("disabling TPA for queue[%d]\n",
1342                                                  j);
1343                                        /* Cleanup already allocated elements */
1344                                        bnx2x_free_rx_sge_range(bp, fp,
1345                                                                ring_prod);
1346                                        bnx2x_free_tpa_pool(bp, fp,
1347                                                            MAX_AGG_QS(bp));
1348                                        fp->disable_tpa = 1;
1349                                        ring_prod = 0;
1350                                        break;
1351                                }
1352                                ring_prod = NEXT_SGE_IDX(ring_prod);
1353                        }
1354
1355                        fp->rx_sge_prod = ring_prod;
1356                }
1357        }
1358
1359        for_each_eth_queue(bp, j) {
1360                struct bnx2x_fastpath *fp = &bp->fp[j];
1361
1362                fp->rx_bd_cons = 0;
1363
1364                /* Activate BD ring */
1365                /* Warning!
1366                 * this will generate an interrupt (to the TSTORM)
1367                 * must only be done after chip is initialized
1368                 */
1369                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1370                                     fp->rx_sge_prod);
1371
1372                if (j != 0)
1373                        continue;
1374
1375                if (CHIP_IS_E1(bp)) {
1376                        REG_WR(bp, BAR_USTRORM_INTMEM +
1377                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1378                               U64_LO(fp->rx_comp_mapping));
1379                        REG_WR(bp, BAR_USTRORM_INTMEM +
1380                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1381                               U64_HI(fp->rx_comp_mapping));
1382                }
1383        }
1384}
1385
1386static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1387{
1388        u8 cos;
1389        struct bnx2x *bp = fp->bp;
1390
1391        for_each_cos_in_tx_queue(fp, cos) {
1392                struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1393                unsigned pkts_compl = 0, bytes_compl = 0;
1394
1395                u16 sw_prod = txdata->tx_pkt_prod;
1396                u16 sw_cons = txdata->tx_pkt_cons;
1397
1398                while (sw_cons != sw_prod) {
1399                        bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1400                                          &pkts_compl, &bytes_compl);
1401                        sw_cons++;
1402                }
1403
1404                netdev_tx_reset_queue(
1405                        netdev_get_tx_queue(bp->dev,
1406                                            txdata->txq_index));
1407        }
1408}
1409
1410static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1411{
1412        int i;
1413
1414        for_each_tx_queue_cnic(bp, i) {
1415                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1416        }
1417}
1418
1419static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1420{
1421        int i;
1422
1423        for_each_eth_queue(bp, i) {
1424                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1425        }
1426}
1427
1428static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1429{
1430        struct bnx2x *bp = fp->bp;
1431        int i;
1432
1433        /* ring wasn't allocated */
1434        if (fp->rx_buf_ring == NULL)
1435                return;
1436
1437        for (i = 0; i < NUM_RX_BD; i++) {
1438                struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1439                u8 *data = rx_buf->data;
1440
1441                if (data == NULL)
1442                        continue;
1443                dma_unmap_single(&bp->pdev->dev,
1444                                 dma_unmap_addr(rx_buf, mapping),
1445                                 fp->rx_buf_size, DMA_FROM_DEVICE);
1446
1447                rx_buf->data = NULL;
1448                bnx2x_frag_free(fp, data);
1449        }
1450}
1451
1452static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1453{
1454        int j;
1455
1456        for_each_rx_queue_cnic(bp, j) {
1457                bnx2x_free_rx_bds(&bp->fp[j]);
1458        }
1459}
1460
1461static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1462{
1463        int j;
1464
1465        for_each_eth_queue(bp, j) {
1466                struct bnx2x_fastpath *fp = &bp->fp[j];
1467
1468                bnx2x_free_rx_bds(fp);
1469
1470                if (!fp->disable_tpa)
1471                        bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1472        }
1473}
1474
1475void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1476{
1477        bnx2x_free_tx_skbs_cnic(bp);
1478        bnx2x_free_rx_skbs_cnic(bp);
1479}
1480
1481void bnx2x_free_skbs(struct bnx2x *bp)
1482{
1483        bnx2x_free_tx_skbs(bp);
1484        bnx2x_free_rx_skbs(bp);
1485}
1486
1487void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1488{
1489        /* load old values */
1490        u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1491
1492        if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1493                /* leave all but MAX value */
1494                mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1495
1496                /* set new MAX value */
1497                mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1498                                & FUNC_MF_CFG_MAX_BW_MASK;
1499
1500                bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1501        }
1502}
1503
1504/**
1505 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1506 *
1507 * @bp:         driver handle
1508 * @nvecs:      number of vectors to be released
1509 */
1510static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1511{
1512        int i, offset = 0;
1513
1514        if (nvecs == offset)
1515                return;
1516
1517        /* VFs don't have a default SB */
1518        if (IS_PF(bp)) {
1519                free_irq(bp->msix_table[offset].vector, bp->dev);
1520                DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1521                   bp->msix_table[offset].vector);
1522                offset++;
1523        }
1524
1525        if (CNIC_SUPPORT(bp)) {
1526                if (nvecs == offset)
1527                        return;
1528                offset++;
1529        }
1530
1531        for_each_eth_queue(bp, i) {
1532                if (nvecs == offset)
1533                        return;
1534                DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1535                   i, bp->msix_table[offset].vector);
1536
1537                free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1538        }
1539}
1540
1541void bnx2x_free_irq(struct bnx2x *bp)
1542{
1543        if (bp->flags & USING_MSIX_FLAG &&
1544            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1545                int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1546
1547                /* vfs don't have a default status block */
1548                if (IS_PF(bp))
1549                        nvecs++;
1550
1551                bnx2x_free_msix_irqs(bp, nvecs);
1552        } else {
1553                free_irq(bp->dev->irq, bp->dev);
1554        }
1555}
1556
1557int bnx2x_enable_msix(struct bnx2x *bp)
1558{
1559        int msix_vec = 0, i, rc;
1560
1561        /* VFs don't have a default status block */
1562        if (IS_PF(bp)) {
1563                bp->msix_table[msix_vec].entry = msix_vec;
1564                BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1565                               bp->msix_table[0].entry);
1566                msix_vec++;
1567        }
1568
1569        /* Cnic requires an msix vector for itself */
1570        if (CNIC_SUPPORT(bp)) {
1571                bp->msix_table[msix_vec].entry = msix_vec;
1572                BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1573                               msix_vec, bp->msix_table[msix_vec].entry);
1574                msix_vec++;
1575        }
1576
1577        /* We need separate vectors for ETH queues only (not FCoE) */
1578        for_each_eth_queue(bp, i) {
1579                bp->msix_table[msix_vec].entry = msix_vec;
1580                BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1581                               msix_vec, msix_vec, i);
1582                msix_vec++;
1583        }
1584
1585        DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1586           msix_vec);
1587
1588        rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1589
1590        /*
1591         * reconfigure number of tx/rx queues according to available
1592         * MSI-X vectors
1593         */
1594        if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1595                /* how less vectors we will have? */
1596                int diff = msix_vec - rc;
1597
1598                BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1599
1600                rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1601
1602                if (rc) {
1603                        BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1604                        goto no_msix;
1605                }
1606                /*
1607                 * decrease number of queues by number of unallocated entries
1608                 */
1609                bp->num_ethernet_queues -= diff;
1610                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1611
1612                BNX2X_DEV_INFO("New queue configuration set: %d\n",
1613                               bp->num_queues);
1614        } else if (rc > 0) {
1615                /* Get by with single vector */
1616                rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1617                if (rc) {
1618                        BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1619                                       rc);
1620                        goto no_msix;
1621                }
1622
1623                BNX2X_DEV_INFO("Using single MSI-X vector\n");
1624                bp->flags |= USING_SINGLE_MSIX_FLAG;
1625
1626                BNX2X_DEV_INFO("set number of queues to 1\n");
1627                bp->num_ethernet_queues = 1;
1628                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1629        } else if (rc < 0) {
1630                BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1631                goto no_msix;
1632        }
1633
1634        bp->flags |= USING_MSIX_FLAG;
1635
1636        return 0;
1637
1638no_msix:
1639        /* fall to INTx if not enough memory */
1640        if (rc == -ENOMEM)
1641                bp->flags |= DISABLE_MSI_FLAG;
1642
1643        return rc;
1644}
1645
1646static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1647{
1648        int i, rc, offset = 0;
1649
1650        /* no default status block for vf */
1651        if (IS_PF(bp)) {
1652                rc = request_irq(bp->msix_table[offset++].vector,
1653                                 bnx2x_msix_sp_int, 0,
1654                                 bp->dev->name, bp->dev);
1655                if (rc) {
1656                        BNX2X_ERR("request sp irq failed\n");
1657                        return -EBUSY;
1658                }
1659        }
1660
1661        if (CNIC_SUPPORT(bp))
1662                offset++;
1663
1664        for_each_eth_queue(bp, i) {
1665                struct bnx2x_fastpath *fp = &bp->fp[i];
1666                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1667                         bp->dev->name, i);
1668
1669                rc = request_irq(bp->msix_table[offset].vector,
1670                                 bnx2x_msix_fp_int, 0, fp->name, fp);
1671                if (rc) {
1672                        BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1673                              bp->msix_table[offset].vector, rc);
1674                        bnx2x_free_msix_irqs(bp, offset);
1675                        return -EBUSY;
1676                }
1677
1678                offset++;
1679        }
1680
1681        i = BNX2X_NUM_ETH_QUEUES(bp);
1682        if (IS_PF(bp)) {
1683                offset = 1 + CNIC_SUPPORT(bp);
1684                netdev_info(bp->dev,
1685                            "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1686                            bp->msix_table[0].vector,
1687                            0, bp->msix_table[offset].vector,
1688                            i - 1, bp->msix_table[offset + i - 1].vector);
1689        } else {
1690                offset = CNIC_SUPPORT(bp);
1691                netdev_info(bp->dev,
1692                            "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1693                            0, bp->msix_table[offset].vector,
1694                            i - 1, bp->msix_table[offset + i - 1].vector);
1695        }
1696        return 0;
1697}
1698
1699int bnx2x_enable_msi(struct bnx2x *bp)
1700{
1701        int rc;
1702
1703        rc = pci_enable_msi(bp->pdev);
1704        if (rc) {
1705                BNX2X_DEV_INFO("MSI is not attainable\n");
1706                return -1;
1707        }
1708        bp->flags |= USING_MSI_FLAG;
1709
1710        return 0;
1711}
1712
1713static int bnx2x_req_irq(struct bnx2x *bp)
1714{
1715        unsigned long flags;
1716        unsigned int irq;
1717
1718        if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1719                flags = 0;
1720        else
1721                flags = IRQF_SHARED;
1722
1723        if (bp->flags & USING_MSIX_FLAG)
1724                irq = bp->msix_table[0].vector;
1725        else
1726                irq = bp->pdev->irq;
1727
1728        return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1729}
1730
1731static int bnx2x_setup_irqs(struct bnx2x *bp)
1732{
1733        int rc = 0;
1734        if (bp->flags & USING_MSIX_FLAG &&
1735            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1736                rc = bnx2x_req_msix_irqs(bp);
1737                if (rc)
1738                        return rc;
1739        } else {
1740                rc = bnx2x_req_irq(bp);
1741                if (rc) {
1742                        BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1743                        return rc;
1744                }
1745                if (bp->flags & USING_MSI_FLAG) {
1746                        bp->dev->irq = bp->pdev->irq;
1747                        netdev_info(bp->dev, "using MSI IRQ %d\n",
1748                                    bp->dev->irq);
1749                }
1750                if (bp->flags & USING_MSIX_FLAG) {
1751                        bp->dev->irq = bp->msix_table[0].vector;
1752                        netdev_info(bp->dev, "using MSIX IRQ %d\n",
1753                                    bp->dev->irq);
1754                }
1755        }
1756
1757        return 0;
1758}
1759
1760static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1761{
1762        int i;
1763
1764        for_each_rx_queue_cnic(bp, i) {
1765                bnx2x_fp_init_lock(&bp->fp[i]);
1766                napi_enable(&bnx2x_fp(bp, i, napi));
1767        }
1768}
1769
1770static void bnx2x_napi_enable(struct bnx2x *bp)
1771{
1772        int i;
1773
1774        for_each_eth_queue(bp, i) {
1775                bnx2x_fp_init_lock(&bp->fp[i]);
1776                napi_enable(&bnx2x_fp(bp, i, napi));
1777        }
1778}
1779
1780static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1781{
1782        int i;
1783
1784        local_bh_disable();
1785        for_each_rx_queue_cnic(bp, i) {
1786                napi_disable(&bnx2x_fp(bp, i, napi));
1787                while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1788                        mdelay(1);
1789        }
1790        local_bh_enable();
1791}
1792
1793static void bnx2x_napi_disable(struct bnx2x *bp)
1794{
1795        int i;
1796
1797        local_bh_disable();
1798        for_each_eth_queue(bp, i) {
1799                napi_disable(&bnx2x_fp(bp, i, napi));
1800                while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1801                        mdelay(1);
1802        }
1803        local_bh_enable();
1804}
1805
1806void bnx2x_netif_start(struct bnx2x *bp)
1807{
1808        if (netif_running(bp->dev)) {
1809                bnx2x_napi_enable(bp);
1810                if (CNIC_LOADED(bp))
1811                        bnx2x_napi_enable_cnic(bp);
1812                bnx2x_int_enable(bp);
1813                if (bp->state == BNX2X_STATE_OPEN)
1814                        netif_tx_wake_all_queues(bp->dev);
1815        }
1816}
1817
1818void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1819{
1820        bnx2x_int_disable_sync(bp, disable_hw);
1821        bnx2x_napi_disable(bp);
1822        if (CNIC_LOADED(bp))
1823                bnx2x_napi_disable_cnic(bp);
1824}
1825
1826u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1827{
1828        struct bnx2x *bp = netdev_priv(dev);
1829
1830        if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1831                struct ethhdr *hdr = (struct ethhdr *)skb->data;
1832                u16 ether_type = ntohs(hdr->h_proto);
1833
1834                /* Skip VLAN tag if present */
1835                if (ether_type == ETH_P_8021Q) {
1836                        struct vlan_ethhdr *vhdr =
1837                                (struct vlan_ethhdr *)skb->data;
1838
1839                        ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1840                }
1841
1842                /* If ethertype is FCoE or FIP - use FCoE ring */
1843                if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1844                        return bnx2x_fcoe_tx(bp, txq_index);
1845        }
1846
1847        /* select a non-FCoE queue */
1848        return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1849}
1850
1851void bnx2x_set_num_queues(struct bnx2x *bp)
1852{
1853        /* RSS queues */
1854        bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1855
1856        /* override in STORAGE SD modes */
1857        if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1858                bp->num_ethernet_queues = 1;
1859
1860        /* Add special queues */
1861        bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1862        bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1863
1864        BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1865}
1866
1867/**
1868 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1869 *
1870 * @bp:         Driver handle
1871 *
1872 * We currently support for at most 16 Tx queues for each CoS thus we will
1873 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1874 * bp->max_cos.
1875 *
1876 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1877 * index after all ETH L2 indices.
1878 *
1879 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1880 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1881 * 16..31,...) with indices that are not coupled with any real Tx queue.
1882 *
1883 * The proper configuration of skb->queue_mapping is handled by
1884 * bnx2x_select_queue() and __skb_tx_hash().
1885 *
1886 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1887 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1888 */
1889static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1890{
1891        int rc, tx, rx;
1892
1893        tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1894        rx = BNX2X_NUM_ETH_QUEUES(bp);
1895
1896/* account for fcoe queue */
1897        if (include_cnic && !NO_FCOE(bp)) {
1898                rx++;
1899                tx++;
1900        }
1901
1902        rc = netif_set_real_num_tx_queues(bp->dev, tx);
1903        if (rc) {
1904                BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1905                return rc;
1906        }
1907        rc = netif_set_real_num_rx_queues(bp->dev, rx);
1908        if (rc) {
1909                BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1910                return rc;
1911        }
1912
1913        DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1914                          tx, rx);
1915
1916        return rc;
1917}
1918
1919static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1920{
1921        int i;
1922
1923        for_each_queue(bp, i) {
1924                struct bnx2x_fastpath *fp = &bp->fp[i];
1925                u32 mtu;
1926
1927                /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1928                if (IS_FCOE_IDX(i))
1929                        /*
1930                         * Although there are no IP frames expected to arrive to
1931                         * this ring we still want to add an
1932                         * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1933                         * overrun attack.
1934                         */
1935                        mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1936                else
1937                        mtu = bp->dev->mtu;
1938                fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1939                                  IP_HEADER_ALIGNMENT_PADDING +
1940                                  ETH_OVREHEAD +
1941                                  mtu +
1942                                  BNX2X_FW_RX_ALIGN_END;
1943                /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1944                if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1945                        fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1946                else
1947                        fp->rx_frag_size = 0;
1948        }
1949}
1950
1951static int bnx2x_init_rss_pf(struct bnx2x *bp)
1952{
1953        int i;
1954        u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1955
1956        /* Prepare the initial contents for the indirection table if RSS is
1957         * enabled
1958         */
1959        for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1960                bp->rss_conf_obj.ind_table[i] =
1961                        bp->fp->cl_id +
1962                        ethtool_rxfh_indir_default(i, num_eth_queues);
1963
1964        /*
1965         * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1966         * per-port, so if explicit configuration is needed , do it only
1967         * for a PMF.
1968         *
1969         * For 57712 and newer on the other hand it's a per-function
1970         * configuration.
1971         */
1972        return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1973}
1974
1975int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1976                        bool config_hash)
1977{
1978        struct bnx2x_config_rss_params params = {NULL};
1979
1980        /* Although RSS is meaningless when there is a single HW queue we
1981         * still need it enabled in order to have HW Rx hash generated.
1982         *
1983         * if (!is_eth_multi(bp))
1984         *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1985         */
1986
1987        params.rss_obj = rss_obj;
1988
1989        __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1990
1991        __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1992
1993        /* RSS configuration */
1994        __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1995        __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1996        __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1997        __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1998        if (rss_obj->udp_rss_v4)
1999                __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2000        if (rss_obj->udp_rss_v6)
2001                __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2002
2003        /* Hash bits */
2004        params.rss_result_mask = MULTI_MASK;
2005
2006        memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2007
2008        if (config_hash) {
2009                /* RSS keys */
2010                prandom_bytes(params.rss_key, sizeof(params.rss_key));
2011                __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2012        }
2013
2014        return bnx2x_config_rss(bp, &params);
2015}
2016
2017static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2018{
2019        struct bnx2x_func_state_params func_params = {NULL};
2020
2021        /* Prepare parameters for function state transitions */
2022        __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2023
2024        func_params.f_obj = &bp->func_obj;
2025        func_params.cmd = BNX2X_F_CMD_HW_INIT;
2026
2027        func_params.params.hw_init.load_phase = load_code;
2028
2029        return bnx2x_func_state_change(bp, &func_params);
2030}
2031
2032/*
2033 * Cleans the object that have internal lists without sending
2034 * ramrods. Should be run when interrupts are disabled.
2035 */
2036void bnx2x_squeeze_objects(struct bnx2x *bp)
2037{
2038        int rc;
2039        unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2040        struct bnx2x_mcast_ramrod_params rparam = {NULL};
2041        struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2042
2043        /***************** Cleanup MACs' object first *************************/
2044
2045        /* Wait for completion of requested */
2046        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2047        /* Perform a dry cleanup */
2048        __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2049
2050        /* Clean ETH primary MAC */
2051        __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2052        rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2053                                 &ramrod_flags);
2054        if (rc != 0)
2055                BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2056
2057        /* Cleanup UC list */
2058        vlan_mac_flags = 0;
2059        __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2060        rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2061                                 &ramrod_flags);
2062        if (rc != 0)
2063                BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2064
2065        /***************** Now clean mcast object *****************************/
2066        rparam.mcast_obj = &bp->mcast_obj;
2067        __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2068
2069        /* Add a DEL command... */
2070        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2071        if (rc < 0)
2072                BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2073                          rc);
2074
2075        /* ...and wait until all pending commands are cleared */
2076        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2077        while (rc != 0) {
2078                if (rc < 0) {
2079                        BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2080                                  rc);
2081                        return;
2082                }
2083
2084                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2085        }
2086}
2087
2088#ifndef BNX2X_STOP_ON_ERROR
2089#define LOAD_ERROR_EXIT(bp, label) \
2090        do { \
2091                (bp)->state = BNX2X_STATE_ERROR; \
2092                goto label; \
2093        } while (0)
2094
2095#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2096        do { \
2097                bp->cnic_loaded = false; \
2098                goto label; \
2099        } while (0)
2100#else /*BNX2X_STOP_ON_ERROR*/
2101#define LOAD_ERROR_EXIT(bp, label) \
2102        do { \
2103                (bp)->state = BNX2X_STATE_ERROR; \
2104                (bp)->panic = 1; \
2105                return -EBUSY; \
2106        } while (0)
2107#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2108        do { \
2109                bp->cnic_loaded = false; \
2110                (bp)->panic = 1; \
2111                return -EBUSY; \
2112        } while (0)
2113#endif /*BNX2X_STOP_ON_ERROR*/
2114
2115static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2116{
2117        BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2118                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2119        return;
2120}
2121
2122static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2123{
2124        int num_groups, vf_headroom = 0;
2125        int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2126
2127        /* number of queues for statistics is number of eth queues + FCoE */
2128        u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2129
2130        /* Total number of FW statistics requests =
2131         * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2132         * and fcoe l2 queue) stats + num of queues (which includes another 1
2133         * for fcoe l2 queue if applicable)
2134         */
2135        bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2136
2137        /* vf stats appear in the request list, but their data is allocated by
2138         * the VFs themselves. We don't include them in the bp->fw_stats_num as
2139         * it is used to determine where to place the vf stats queries in the
2140         * request struct
2141         */
2142        if (IS_SRIOV(bp))
2143                vf_headroom = bnx2x_vf_headroom(bp);
2144
2145        /* Request is built from stats_query_header and an array of
2146         * stats_query_cmd_group each of which contains
2147         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2148         * configured in the stats_query_header.
2149         */
2150        num_groups =
2151                (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2152                 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2153                 1 : 0));
2154
2155        DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2156           bp->fw_stats_num, vf_headroom, num_groups);
2157        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2158                num_groups * sizeof(struct stats_query_cmd_group);
2159
2160        /* Data for statistics requests + stats_counter
2161         * stats_counter holds per-STORM counters that are incremented
2162         * when STORM has finished with the current request.
2163         * memory for FCoE offloaded statistics are counted anyway,
2164         * even if they will not be sent.
2165         * VF stats are not accounted for here as the data of VF stats is stored
2166         * in memory allocated by the VF, not here.
2167         */
2168        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2169                sizeof(struct per_pf_stats) +
2170                sizeof(struct fcoe_statistics_params) +
2171                sizeof(struct per_queue_stats) * num_queue_stats +
2172                sizeof(struct stats_counter);
2173
2174        BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2175                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2176
2177        /* Set shortcuts */
2178        bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2179        bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2180        bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2181                ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2182        bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2183                bp->fw_stats_req_sz;
2184
2185        DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2186           U64_HI(bp->fw_stats_req_mapping),
2187           U64_LO(bp->fw_stats_req_mapping));
2188        DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2189           U64_HI(bp->fw_stats_data_mapping),
2190           U64_LO(bp->fw_stats_data_mapping));
2191        return 0;
2192
2193alloc_mem_err:
2194        bnx2x_free_fw_stats_mem(bp);
2195        BNX2X_ERR("Can't allocate FW stats memory\n");
2196        return -ENOMEM;
2197}
2198
2199/* send load request to mcp and analyze response */
2200static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2201{
2202        u32 param;
2203
2204        /* init fw_seq */
2205        bp->fw_seq =
2206                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2207                 DRV_MSG_SEQ_NUMBER_MASK);
2208        BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2209
2210        /* Get current FW pulse sequence */
2211        bp->fw_drv_pulse_wr_seq =
2212                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2213                 DRV_PULSE_SEQ_MASK);
2214        BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2215
2216        param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2217
2218        if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2219                param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2220
2221        /* load request */
2222        (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2223
2224        /* if mcp fails to respond we must abort */
2225        if (!(*load_code)) {
2226                BNX2X_ERR("MCP response failure, aborting\n");
2227                return -EBUSY;
2228        }
2229
2230        /* If mcp refused (e.g. other port is in diagnostic mode) we
2231         * must abort
2232         */
2233        if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2234                BNX2X_ERR("MCP refused load request, aborting\n");
2235                return -EBUSY;
2236        }
2237        return 0;
2238}
2239
2240/* check whether another PF has already loaded FW to chip. In
2241 * virtualized environments a pf from another VM may have already
2242 * initialized the device including loading FW
2243 */
2244int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2245{
2246        /* is another pf loaded on this engine? */
2247        if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2248            load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2249                /* build my FW version dword */
2250                u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2251                        (BCM_5710_FW_MINOR_VERSION << 8) +
2252                        (BCM_5710_FW_REVISION_VERSION << 16) +
2253                        (BCM_5710_FW_ENGINEERING_VERSION << 24);
2254
2255                /* read loaded FW from chip */
2256                u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2257
2258                DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2259                   loaded_fw, my_fw);
2260
2261                /* abort nic load if version mismatch */
2262                if (my_fw != loaded_fw) {
2263                        BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2264                                  loaded_fw, my_fw);
2265                        return -EBUSY;
2266                }
2267        }
2268        return 0;
2269}
2270
2271/* returns the "mcp load_code" according to global load_count array */
2272static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2273{
2274        int path = BP_PATH(bp);
2275
2276        DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2277           path, load_count[path][0], load_count[path][1],
2278           load_count[path][2]);
2279        load_count[path][0]++;
2280        load_count[path][1 + port]++;
2281        DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2282           path, load_count[path][0], load_count[path][1],
2283           load_count[path][2]);
2284        if (load_count[path][0] == 1)
2285                return FW_MSG_CODE_DRV_LOAD_COMMON;
2286        else if (load_count[path][1 + port] == 1)
2287                return FW_MSG_CODE_DRV_LOAD_PORT;
2288        else
2289                return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2290}
2291
2292/* mark PMF if applicable */
2293static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2294{
2295        if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2296            (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2297            (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2298                bp->port.pmf = 1;
2299                /* We need the barrier to ensure the ordering between the
2300                 * writing to bp->port.pmf here and reading it from the
2301                 * bnx2x_periodic_task().
2302                 */
2303                smp_mb();
2304        } else {
2305                bp->port.pmf = 0;
2306        }
2307
2308        DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2309}
2310
2311static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2312{
2313        if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2314             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2315            (bp->common.shmem2_base)) {
2316                if (SHMEM2_HAS(bp, dcc_support))
2317                        SHMEM2_WR(bp, dcc_support,
2318                                  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2319                                   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2320                if (SHMEM2_HAS(bp, afex_driver_support))
2321                        SHMEM2_WR(bp, afex_driver_support,
2322                                  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2323        }
2324
2325        /* Set AFEX default VLAN tag to an invalid value */
2326        bp->afex_def_vlan_tag = -1;
2327}
2328
2329/**
2330 * bnx2x_bz_fp - zero content of the fastpath structure.
2331 *
2332 * @bp:         driver handle
2333 * @index:      fastpath index to be zeroed
2334 *
2335 * Makes sure the contents of the bp->fp[index].napi is kept
2336 * intact.
2337 */
2338static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2339{
2340        struct bnx2x_fastpath *fp = &bp->fp[index];
2341        int cos;
2342        struct napi_struct orig_napi = fp->napi;
2343        struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2344
2345        /* bzero bnx2x_fastpath contents */
2346        if (fp->tpa_info)
2347                memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2348                       sizeof(struct bnx2x_agg_info));
2349        memset(fp, 0, sizeof(*fp));
2350
2351        /* Restore the NAPI object as it has been already initialized */
2352        fp->napi = orig_napi;
2353        fp->tpa_info = orig_tpa_info;
2354        fp->bp = bp;
2355        fp->index = index;
2356        if (IS_ETH_FP(fp))
2357                fp->max_cos = bp->max_cos;
2358        else
2359                /* Special queues support only one CoS */
2360                fp->max_cos = 1;
2361
2362        /* Init txdata pointers */
2363        if (IS_FCOE_FP(fp))
2364                fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2365        if (IS_ETH_FP(fp))
2366                for_each_cos_in_tx_queue(fp, cos)
2367                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2368                                BNX2X_NUM_ETH_QUEUES(bp) + index];
2369
2370        /* set the tpa flag for each queue. The tpa flag determines the queue
2371         * minimal size so it must be set prior to queue memory allocation
2372         */
2373        fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2374                                  (bp->flags & GRO_ENABLE_FLAG &&
2375                                   bnx2x_mtu_allows_gro(bp->dev->mtu)));
2376        if (bp->flags & TPA_ENABLE_FLAG)
2377                fp->mode = TPA_MODE_LRO;
2378        else if (bp->flags & GRO_ENABLE_FLAG)
2379                fp->mode = TPA_MODE_GRO;
2380
2381        /* We don't want TPA on an FCoE L2 ring */
2382        if (IS_FCOE_FP(fp))
2383                fp->disable_tpa = 1;
2384}
2385
2386int bnx2x_load_cnic(struct bnx2x *bp)
2387{
2388        int i, rc, port = BP_PORT(bp);
2389
2390        DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2391
2392        mutex_init(&bp->cnic_mutex);
2393
2394        if (IS_PF(bp)) {
2395                rc = bnx2x_alloc_mem_cnic(bp);
2396                if (rc) {
2397                        BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2398                        LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2399                }
2400        }
2401
2402        rc = bnx2x_alloc_fp_mem_cnic(bp);
2403        if (rc) {
2404                BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2405                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2406        }
2407
2408        /* Update the number of queues with the cnic queues */
2409        rc = bnx2x_set_real_num_queues(bp, 1);
2410        if (rc) {
2411                BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2412                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2413        }
2414
2415        /* Add all CNIC NAPI objects */
2416        bnx2x_add_all_napi_cnic(bp);
2417        DP(NETIF_MSG_IFUP, "cnic napi added\n");
2418        bnx2x_napi_enable_cnic(bp);
2419
2420        rc = bnx2x_init_hw_func_cnic(bp);
2421        if (rc)
2422                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2423
2424        bnx2x_nic_init_cnic(bp);
2425
2426        if (IS_PF(bp)) {
2427                /* Enable Timer scan */
2428                REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2429
2430                /* setup cnic queues */
2431                for_each_cnic_queue(bp, i) {
2432                        rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2433                        if (rc) {
2434                                BNX2X_ERR("Queue setup failed\n");
2435                                LOAD_ERROR_EXIT(bp, load_error_cnic2);
2436                        }
2437                }
2438        }
2439
2440        /* Initialize Rx filter. */
2441        netif_addr_lock_bh(bp->dev);
2442        bnx2x_set_rx_mode(bp->dev);
2443        netif_addr_unlock_bh(bp->dev);
2444
2445        /* re-read iscsi info */
2446        bnx2x_get_iscsi_info(bp);
2447        bnx2x_setup_cnic_irq_info(bp);
2448        bnx2x_setup_cnic_info(bp);
2449        bp->cnic_loaded = true;
2450        if (bp->state == BNX2X_STATE_OPEN)
2451                bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2452
2453        DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2454
2455        return 0;
2456
2457#ifndef BNX2X_STOP_ON_ERROR
2458load_error_cnic2:
2459        /* Disable Timer scan */
2460        REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2461
2462load_error_cnic1:
2463        bnx2x_napi_disable_cnic(bp);
2464        /* Update the number of queues without the cnic queues */
2465        rc = bnx2x_set_real_num_queues(bp, 0);
2466        if (rc)
2467                BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2468load_error_cnic0:
2469        BNX2X_ERR("CNIC-related load failed\n");
2470        bnx2x_free_fp_mem_cnic(bp);
2471        bnx2x_free_mem_cnic(bp);
2472        return rc;
2473#endif /* ! BNX2X_STOP_ON_ERROR */
2474}
2475
2476/* must be called with rtnl_lock */
2477int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2478{
2479        int port = BP_PORT(bp);
2480        int i, rc = 0, load_code = 0;
2481
2482        DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2483        DP(NETIF_MSG_IFUP,
2484           "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2485
2486#ifdef BNX2X_STOP_ON_ERROR
2487        if (unlikely(bp->panic)) {
2488                BNX2X_ERR("Can't load NIC when there is panic\n");
2489                return -EPERM;
2490        }
2491#endif
2492
2493        bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2494
2495        /* zero the structure w/o any lock, before SP handler is initialized */
2496        memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2497        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2498                &bp->last_reported_link.link_report_flags);
2499
2500        if (IS_PF(bp))
2501                /* must be called before memory allocation and HW init */
2502                bnx2x_ilt_set_info(bp);
2503
2504        /*
2505         * Zero fastpath structures preserving invariants like napi, which are
2506         * allocated only once, fp index, max_cos, bp pointer.
2507         * Also set fp->disable_tpa and txdata_ptr.
2508         */
2509        DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2510        for_each_queue(bp, i)
2511                bnx2x_bz_fp(bp, i);
2512        memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2513                                  bp->num_cnic_queues) *
2514                                  sizeof(struct bnx2x_fp_txdata));
2515
2516        bp->fcoe_init = false;
2517
2518        /* Set the receive queues buffer size */
2519        bnx2x_set_rx_buf_size(bp);
2520
2521        if (IS_PF(bp)) {
2522                rc = bnx2x_alloc_mem(bp);
2523                if (rc) {
2524                        BNX2X_ERR("Unable to allocate bp memory\n");
2525                        return rc;
2526                }
2527        }
2528
2529        /* Allocated memory for FW statistics  */
2530        if (bnx2x_alloc_fw_stats_mem(bp))
2531                LOAD_ERROR_EXIT(bp, load_error0);
2532
2533        /* need to be done after alloc mem, since it's self adjusting to amount
2534         * of memory available for RSS queues
2535         */
2536        rc = bnx2x_alloc_fp_mem(bp);
2537        if (rc) {
2538                BNX2X_ERR("Unable to allocate memory for fps\n");
2539                LOAD_ERROR_EXIT(bp, load_error0);
2540        }
2541
2542        /* request pf to initialize status blocks */
2543        if (IS_VF(bp)) {
2544                rc = bnx2x_vfpf_init(bp);
2545                if (rc)
2546                        LOAD_ERROR_EXIT(bp, load_error0);
2547        }
2548
2549        /* As long as bnx2x_alloc_mem() may possibly update
2550         * bp->num_queues, bnx2x_set_real_num_queues() should always
2551         * come after it. At this stage cnic queues are not counted.
2552         */
2553        rc = bnx2x_set_real_num_queues(bp, 0);
2554        if (rc) {
2555                BNX2X_ERR("Unable to set real_num_queues\n");
2556                LOAD_ERROR_EXIT(bp, load_error0);
2557        }
2558
2559        /* configure multi cos mappings in kernel.
2560         * this configuration may be overridden by a multi class queue
2561         * discipline or by a dcbx negotiation result.
2562         */
2563        bnx2x_setup_tc(bp->dev, bp->max_cos);
2564
2565        /* Add all NAPI objects */
2566        bnx2x_add_all_napi(bp);
2567        DP(NETIF_MSG_IFUP, "napi added\n");
2568        bnx2x_napi_enable(bp);
2569
2570        if (IS_PF(bp)) {
2571                /* set pf load just before approaching the MCP */
2572                bnx2x_set_pf_load(bp);
2573
2574                /* if mcp exists send load request and analyze response */
2575                if (!BP_NOMCP(bp)) {
2576                        /* attempt to load pf */
2577                        rc = bnx2x_nic_load_request(bp, &load_code);
2578                        if (rc)
2579                                LOAD_ERROR_EXIT(bp, load_error1);
2580
2581                        /* what did mcp say? */
2582                        rc = bnx2x_nic_load_analyze_req(bp, load_code);
2583                        if (rc) {
2584                                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2585                                LOAD_ERROR_EXIT(bp, load_error2);
2586                        }
2587                } else {
2588                        load_code = bnx2x_nic_load_no_mcp(bp, port);
2589                }
2590
2591                /* mark pmf if applicable */
2592                bnx2x_nic_load_pmf(bp, load_code);
2593
2594                /* Init Function state controlling object */
2595                bnx2x__init_func_obj(bp);
2596
2597                /* Initialize HW */
2598                rc = bnx2x_init_hw(bp, load_code);
2599                if (rc) {
2600                        BNX2X_ERR("HW init failed, aborting\n");
2601                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2602                        LOAD_ERROR_EXIT(bp, load_error2);
2603                }
2604        }
2605
2606        bnx2x_pre_irq_nic_init(bp);
2607
2608        /* Connect to IRQs */
2609        rc = bnx2x_setup_irqs(bp);
2610        if (rc) {
2611                BNX2X_ERR("setup irqs failed\n");
2612                if (IS_PF(bp))
2613                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2614                LOAD_ERROR_EXIT(bp, load_error2);
2615        }
2616
2617        /* Init per-function objects */
2618        if (IS_PF(bp)) {
2619                /* Setup NIC internals and enable interrupts */
2620                bnx2x_post_irq_nic_init(bp, load_code);
2621
2622                bnx2x_init_bp_objs(bp);
2623                bnx2x_iov_nic_init(bp);
2624
2625                /* Set AFEX default VLAN tag to an invalid value */
2626                bp->afex_def_vlan_tag = -1;
2627                bnx2x_nic_load_afex_dcc(bp, load_code);
2628                bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2629                rc = bnx2x_func_start(bp);
2630                if (rc) {
2631                        BNX2X_ERR("Function start failed!\n");
2632                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2633
2634                        LOAD_ERROR_EXIT(bp, load_error3);
2635                }
2636
2637                /* Send LOAD_DONE command to MCP */
2638                if (!BP_NOMCP(bp)) {
2639                        load_code = bnx2x_fw_command(bp,
2640                                                     DRV_MSG_CODE_LOAD_DONE, 0);
2641                        if (!load_code) {
2642                                BNX2X_ERR("MCP response failure, aborting\n");
2643                                rc = -EBUSY;
2644                                LOAD_ERROR_EXIT(bp, load_error3);
2645                        }
2646                }
2647
2648                /* initialize FW coalescing state machines in RAM */
2649                bnx2x_update_coalesce(bp);
2650
2651                /* setup the leading queue */
2652                rc = bnx2x_setup_leading(bp);
2653                if (rc) {
2654                        BNX2X_ERR("Setup leading failed!\n");
2655                        LOAD_ERROR_EXIT(bp, load_error3);
2656                }
2657
2658                /* set up the rest of the queues */
2659                for_each_nondefault_eth_queue(bp, i) {
2660                        rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2661                        if (rc) {
2662                                BNX2X_ERR("Queue setup failed\n");
2663                                LOAD_ERROR_EXIT(bp, load_error3);
2664                        }
2665                }
2666
2667                /* setup rss */
2668                rc = bnx2x_init_rss_pf(bp);
2669                if (rc) {
2670                        BNX2X_ERR("PF RSS init failed\n");
2671                        LOAD_ERROR_EXIT(bp, load_error3);
2672                }
2673
2674        } else { /* vf */
2675                for_each_eth_queue(bp, i) {
2676                        rc = bnx2x_vfpf_setup_q(bp, i);
2677                        if (rc) {
2678                                BNX2X_ERR("Queue setup failed\n");
2679                                LOAD_ERROR_EXIT(bp, load_error3);
2680                        }
2681                }
2682        }
2683
2684        /* Now when Clients are configured we are ready to work */
2685        bp->state = BNX2X_STATE_OPEN;
2686
2687        /* Configure a ucast MAC */
2688        if (IS_PF(bp))
2689                rc = bnx2x_set_eth_mac(bp, true);
2690        else /* vf */
2691                rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2692                                           true);
2693        if (rc) {
2694                BNX2X_ERR("Setting Ethernet MAC failed\n");
2695                LOAD_ERROR_EXIT(bp, load_error3);
2696        }
2697
2698        if (IS_PF(bp) && bp->pending_max) {
2699                bnx2x_update_max_mf_config(bp, bp->pending_max);
2700                bp->pending_max = 0;
2701        }
2702
2703        if (bp->port.pmf) {
2704                rc = bnx2x_initial_phy_init(bp, load_mode);
2705                if (rc)
2706                        LOAD_ERROR_EXIT(bp, load_error3);
2707        }
2708        bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2709
2710        /* Start fast path */
2711
2712        /* Initialize Rx filter. */
2713        netif_addr_lock_bh(bp->dev);
2714        bnx2x_set_rx_mode(bp->dev);
2715        netif_addr_unlock_bh(bp->dev);
2716
2717        /* Start the Tx */
2718        switch (load_mode) {
2719        case LOAD_NORMAL:
2720                /* Tx queue should be only re-enabled */
2721                netif_tx_wake_all_queues(bp->dev);
2722                break;
2723
2724        case LOAD_OPEN:
2725                netif_tx_start_all_queues(bp->dev);
2726                smp_mb__after_clear_bit();
2727                break;
2728
2729        case LOAD_DIAG:
2730        case LOAD_LOOPBACK_EXT:
2731                bp->state = BNX2X_STATE_DIAG;
2732                break;
2733
2734        default:
2735                break;
2736        }
2737
2738        if (bp->port.pmf)
2739                bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2740        else
2741                bnx2x__link_status_update(bp);
2742
2743        /* start the timer */
2744        mod_timer(&bp->timer, jiffies + bp->current_interval);
2745
2746        if (CNIC_ENABLED(bp))
2747                bnx2x_load_cnic(bp);
2748
2749        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2750                /* mark driver is loaded in shmem2 */
2751                u32 val;
2752                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2753                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2754                          val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2755                          DRV_FLAGS_CAPABILITIES_LOADED_L2);
2756        }
2757
2758        /* Wait for all pending SP commands to complete */
2759        if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2760                BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2761                bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2762                return -EBUSY;
2763        }
2764
2765        /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2766        if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2767                bnx2x_dcbx_init(bp, false);
2768
2769        DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2770
2771        return 0;
2772
2773#ifndef BNX2X_STOP_ON_ERROR
2774load_error3:
2775        if (IS_PF(bp)) {
2776                bnx2x_int_disable_sync(bp, 1);
2777
2778                /* Clean queueable objects */
2779                bnx2x_squeeze_objects(bp);
2780        }
2781
2782        /* Free SKBs, SGEs, TPA pool and driver internals */
2783        bnx2x_free_skbs(bp);
2784        for_each_rx_queue(bp, i)
2785                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2786
2787        /* Release IRQs */
2788        bnx2x_free_irq(bp);
2789load_error2:
2790        if (IS_PF(bp) && !BP_NOMCP(bp)) {
2791                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2792                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2793        }
2794
2795        bp->port.pmf = 0;
2796load_error1:
2797        bnx2x_napi_disable(bp);
2798        bnx2x_del_all_napi(bp);
2799
2800        /* clear pf_load status, as it was already set */
2801        if (IS_PF(bp))
2802                bnx2x_clear_pf_load(bp);
2803load_error0:
2804        bnx2x_free_fp_mem(bp);
2805        bnx2x_free_fw_stats_mem(bp);
2806        bnx2x_free_mem(bp);
2807
2808        return rc;
2809#endif /* ! BNX2X_STOP_ON_ERROR */
2810}
2811
2812int bnx2x_drain_tx_queues(struct bnx2x *bp)
2813{
2814        u8 rc = 0, cos, i;
2815
2816        /* Wait until tx fastpath tasks complete */
2817        for_each_tx_queue(bp, i) {
2818                struct bnx2x_fastpath *fp = &bp->fp[i];
2819
2820                for_each_cos_in_tx_queue(fp, cos)
2821                        rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2822                if (rc)
2823                        return rc;
2824        }
2825        return 0;
2826}
2827
2828/* must be called with rtnl_lock */
2829int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2830{
2831        int i;
2832        bool global = false;
2833
2834        DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2835
2836        /* mark driver is unloaded in shmem2 */
2837        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2838                u32 val;
2839                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2840                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2841                          val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2842        }
2843
2844        if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2845            (bp->state == BNX2X_STATE_CLOSED ||
2846             bp->state == BNX2X_STATE_ERROR)) {
2847                /* We can get here if the driver has been unloaded
2848                 * during parity error recovery and is either waiting for a
2849                 * leader to complete or for other functions to unload and
2850                 * then ifdown has been issued. In this case we want to
2851                 * unload and let other functions to complete a recovery
2852                 * process.
2853                 */
2854                bp->recovery_state = BNX2X_RECOVERY_DONE;
2855                bp->is_leader = 0;
2856                bnx2x_release_leader_lock(bp);
2857                smp_mb();
2858
2859                DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2860                BNX2X_ERR("Can't unload in closed or error state\n");
2861                return -EINVAL;
2862        }
2863
2864        /* Nothing to do during unload if previous bnx2x_nic_load()
2865         * have not completed successfully - all resources are released.
2866         *
2867         * we can get here only after unsuccessful ndo_* callback, during which
2868         * dev->IFF_UP flag is still on.
2869         */
2870        if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2871                return 0;
2872
2873        /* It's important to set the bp->state to the value different from
2874         * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2875         * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2876         */
2877        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2878        smp_mb();
2879
2880        /* indicate to VFs that the PF is going down */
2881        bnx2x_iov_channel_down(bp);
2882
2883        if (CNIC_LOADED(bp))
2884                bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2885
2886        /* Stop Tx */
2887        bnx2x_tx_disable(bp);
2888        netdev_reset_tc(bp->dev);
2889
2890        bp->rx_mode = BNX2X_RX_MODE_NONE;
2891
2892        del_timer_sync(&bp->timer);
2893
2894        if (IS_PF(bp)) {
2895                /* Set ALWAYS_ALIVE bit in shmem */
2896                bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2897                bnx2x_drv_pulse(bp);
2898                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2899                bnx2x_save_statistics(bp);
2900        }
2901
2902        /* wait till consumers catch up with producers in all queues */
2903        bnx2x_drain_tx_queues(bp);
2904
2905        /* if VF indicate to PF this function is going down (PF will delete sp
2906         * elements and clear initializations
2907         */
2908        if (IS_VF(bp))
2909                bnx2x_vfpf_close_vf(bp);
2910        else if (unload_mode != UNLOAD_RECOVERY)
2911                /* if this is a normal/close unload need to clean up chip*/
2912                bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2913        else {
2914                /* Send the UNLOAD_REQUEST to the MCP */
2915                bnx2x_send_unload_req(bp, unload_mode);
2916
2917                /* Prevent transactions to host from the functions on the
2918                 * engine that doesn't reset global blocks in case of global
2919                 * attention once global blocks are reset and gates are opened
2920                 * (the engine which leader will perform the recovery
2921                 * last).
2922                 */
2923                if (!CHIP_IS_E1x(bp))
2924                        bnx2x_pf_disable(bp);
2925
2926                /* Disable HW interrupts, NAPI */
2927                bnx2x_netif_stop(bp, 1);
2928                /* Delete all NAPI objects */
2929                bnx2x_del_all_napi(bp);
2930                if (CNIC_LOADED(bp))
2931                        bnx2x_del_all_napi_cnic(bp);
2932                /* Release IRQs */
2933                bnx2x_free_irq(bp);
2934
2935                /* Report UNLOAD_DONE to MCP */
2936                bnx2x_send_unload_done(bp, false);
2937        }
2938
2939        /*
2940         * At this stage no more interrupts will arrive so we may safely clean
2941         * the queueable objects here in case they failed to get cleaned so far.
2942         */
2943        if (IS_PF(bp))
2944                bnx2x_squeeze_objects(bp);
2945
2946        /* There should be no more pending SP commands at this stage */
2947        bp->sp_state = 0;
2948
2949        bp->port.pmf = 0;
2950
2951        /* Free SKBs, SGEs, TPA pool and driver internals */
2952        bnx2x_free_skbs(bp);
2953        if (CNIC_LOADED(bp))
2954                bnx2x_free_skbs_cnic(bp);
2955        for_each_rx_queue(bp, i)
2956                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2957
2958        bnx2x_free_fp_mem(bp);
2959        if (CNIC_LOADED(bp))
2960                bnx2x_free_fp_mem_cnic(bp);
2961
2962        if (IS_PF(bp)) {
2963                if (CNIC_LOADED(bp))
2964                        bnx2x_free_mem_cnic(bp);
2965        }
2966        bnx2x_free_mem(bp);
2967
2968        bp->state = BNX2X_STATE_CLOSED;
2969        bp->cnic_loaded = false;
2970
2971        /* Check if there are pending parity attentions. If there are - set
2972         * RECOVERY_IN_PROGRESS.
2973         */
2974        if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2975                bnx2x_set_reset_in_progress(bp);
2976
2977                /* Set RESET_IS_GLOBAL if needed */
2978                if (global)
2979                        bnx2x_set_reset_global(bp);
2980        }
2981
2982        /* The last driver must disable a "close the gate" if there is no
2983         * parity attention or "process kill" pending.
2984         */
2985        if (IS_PF(bp) &&
2986            !bnx2x_clear_pf_load(bp) &&
2987            bnx2x_reset_is_done(bp, BP_PATH(bp)))
2988                bnx2x_disable_close_the_gate(bp);
2989
2990        DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2991
2992        return 0;
2993}
2994
2995int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2996{
2997        u16 pmcsr;
2998
2999        /* If there is no power capability, silently succeed */
3000        if (!bp->pm_cap) {
3001                BNX2X_DEV_INFO("No power capability. Breaking.\n");
3002                return 0;
3003        }
3004
3005        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3006
3007        switch (state) {
3008        case PCI_D0:
3009                pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3010                                      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3011                                       PCI_PM_CTRL_PME_STATUS));
3012
3013                if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3014                        /* delay required during transition out of D3hot */
3015                        msleep(20);
3016                break;
3017
3018        case PCI_D3hot:
3019                /* If there are other clients above don't
3020                   shut down the power */
3021                if (atomic_read(&bp->pdev->enable_cnt) != 1)
3022                        return 0;
3023                /* Don't shut down the power for emulation and FPGA */
3024                if (CHIP_REV_IS_SLOW(bp))
3025                        return 0;
3026
3027                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3028                pmcsr |= 3;
3029
3030                if (bp->wol)
3031                        pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3032
3033                pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3034                                      pmcsr);
3035
3036                /* No more memory access after this point until
3037                * device is brought back to D0.
3038                */
3039                break;
3040
3041        default:
3042                dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3043                return -EINVAL;
3044        }
3045        return 0;
3046}
3047
3048/*
3049 * net_device service functions
3050 */
3051int bnx2x_poll(struct napi_struct *napi, int budget)
3052{
3053        int work_done = 0;
3054        u8 cos;
3055        struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3056                                                 napi);
3057        struct bnx2x *bp = fp->bp;
3058
3059        while (1) {
3060#ifdef BNX2X_STOP_ON_ERROR
3061                if (unlikely(bp->panic)) {
3062                        napi_complete(napi);
3063                        return 0;
3064                }
3065#endif
3066                if (!bnx2x_fp_lock_napi(fp))
3067                        return work_done;
3068
3069                for_each_cos_in_tx_queue(fp, cos)
3070                        if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3071                                bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3072
3073                if (bnx2x_has_rx_work(fp)) {
3074                        work_done += bnx2x_rx_int(fp, budget - work_done);
3075
3076                        /* must not complete if we consumed full budget */
3077                        if (work_done >= budget) {
3078                                bnx2x_fp_unlock_napi(fp);
3079                                break;
3080                        }
3081                }
3082
3083                /* Fall out from the NAPI loop if needed */
3084                if (!bnx2x_fp_unlock_napi(fp) &&
3085                    !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3086
3087                        /* No need to update SB for FCoE L2 ring as long as
3088                         * it's connected to the default SB and the SB
3089                         * has been updated when NAPI was scheduled.
3090                         */
3091                        if (IS_FCOE_FP(fp)) {
3092                                napi_complete(napi);
3093                                break;
3094                        }
3095                        bnx2x_update_fpsb_idx(fp);
3096                        /* bnx2x_has_rx_work() reads the status block,
3097                         * thus we need to ensure that status block indices
3098                         * have been actually read (bnx2x_update_fpsb_idx)
3099                         * prior to this check (bnx2x_has_rx_work) so that
3100                         * we won't write the "newer" value of the status block
3101                         * to IGU (if there was a DMA right after
3102                         * bnx2x_has_rx_work and if there is no rmb, the memory
3103                         * reading (bnx2x_update_fpsb_idx) may be postponed
3104                         * to right before bnx2x_ack_sb). In this case there
3105                         * will never be another interrupt until there is
3106                         * another update of the status block, while there
3107                         * is still unhandled work.
3108                         */
3109                        rmb();
3110
3111                        if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3112                                napi_complete(napi);
3113                                /* Re-enable interrupts */
3114                                DP(NETIF_MSG_RX_STATUS,
3115                                   "Update index to %d\n", fp->fp_hc_idx);
3116                                bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3117                                             le16_to_cpu(fp->fp_hc_idx),
3118                                             IGU_INT_ENABLE, 1);
3119                                break;
3120                        }
3121                }
3122        }
3123
3124        return work_done;
3125}
3126
3127#ifdef CONFIG_NET_RX_BUSY_POLL
3128/* must be called with local_bh_disable()d */
3129int bnx2x_low_latency_recv(struct napi_struct *napi)
3130{
3131        struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3132                                                 napi);
3133        struct bnx2x *bp = fp->bp;
3134        int found = 0;
3135
3136        if ((bp->state == BNX2X_STATE_CLOSED) ||
3137            (bp->state == BNX2X_STATE_ERROR) ||
3138            (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3139                return LL_FLUSH_FAILED;
3140
3141        if (!bnx2x_fp_lock_poll(fp))
3142                return LL_FLUSH_BUSY;
3143
3144        if (bnx2x_has_rx_work(fp))
3145                found = bnx2x_rx_int(fp, 4);
3146
3147        bnx2x_fp_unlock_poll(fp);
3148
3149        return found;
3150}
3151#endif
3152
3153/* we split the first BD into headers and data BDs
3154 * to ease the pain of our fellow microcode engineers
3155 * we use one mapping for both BDs
3156 */
3157static u16 bnx2x_tx_split(struct bnx2x *bp,
3158                          struct bnx2x_fp_txdata *txdata,
3159                          struct sw_tx_bd *tx_buf,
3160                          struct eth_tx_start_bd **tx_bd, u16 hlen,
3161                          u16 bd_prod)
3162{
3163        struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3164        struct eth_tx_bd *d_tx_bd;
3165        dma_addr_t mapping;
3166        int old_len = le16_to_cpu(h_tx_bd->nbytes);
3167
3168        /* first fix first BD */
3169        h_tx_bd->nbytes = cpu_to_le16(hlen);
3170
3171        DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3172           h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3173
3174        /* now get a new data BD
3175         * (after the pbd) and fill it */
3176        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3177        d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3178
3179        mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3180                           le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3181
3182        d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3183        d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3184        d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3185
3186        /* this marks the BD as one that has no individual mapping */
3187        tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3188
3189        DP(NETIF_MSG_TX_QUEUED,
3190           "TSO split data size is %d (%x:%x)\n",
3191           d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3192
3193        /* update tx_bd */
3194        *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3195
3196        return bd_prod;
3197}
3198
3199#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3200#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3201static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3202{
3203        __sum16 tsum = (__force __sum16) csum;
3204
3205        if (fix > 0)
3206                tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3207                                  csum_partial(t_header - fix, fix, 0)));
3208
3209        else if (fix < 0)
3210                tsum = ~csum_fold(csum_add((__force __wsum) csum,
3211                                  csum_partial(t_header, -fix, 0)));
3212
3213        return bswab16(tsum);
3214}
3215
3216static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3217{
3218        u32 rc;
3219        __u8 prot = 0;
3220        __be16 protocol;
3221
3222        if (skb->ip_summed != CHECKSUM_PARTIAL)
3223                return XMIT_PLAIN;
3224
3225        protocol = vlan_get_protocol(skb);
3226        if (protocol == htons(ETH_P_IPV6)) {
3227                rc = XMIT_CSUM_V6;
3228                prot = ipv6_hdr(skb)->nexthdr;
3229        } else {
3230                rc = XMIT_CSUM_V4;
3231                prot = ip_hdr(skb)->protocol;
3232        }
3233
3234        if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3235                if (inner_ip_hdr(skb)->version == 6) {
3236                        rc |= XMIT_CSUM_ENC_V6;
3237                        if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3238                                rc |= XMIT_CSUM_TCP;
3239                } else {
3240                        rc |= XMIT_CSUM_ENC_V4;
3241                        if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3242                                rc |= XMIT_CSUM_TCP;
3243                }
3244        }
3245        if (prot == IPPROTO_TCP)
3246                rc |= XMIT_CSUM_TCP;
3247
3248        if (skb_is_gso_v6(skb)) {
3249                rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3250                if (rc & XMIT_CSUM_ENC)
3251                        rc |= XMIT_GSO_ENC_V6;
3252        } else if (skb_is_gso(skb)) {
3253                rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3254                if (rc & XMIT_CSUM_ENC)
3255                        rc |= XMIT_GSO_ENC_V4;
3256        }
3257
3258        return rc;
3259}
3260
3261#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3262/* check if packet requires linearization (packet is too fragmented)
3263   no need to check fragmentation if page size > 8K (there will be no
3264   violation to FW restrictions) */
3265static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3266                             u32 xmit_type)
3267{
3268        int to_copy = 0;
3269        int hlen = 0;
3270        int first_bd_sz = 0;
3271
3272        /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3273        if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3274
3275                if (xmit_type & XMIT_GSO) {
3276                        unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3277                        /* Check if LSO packet needs to be copied:
3278                           3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3279                        int wnd_size = MAX_FETCH_BD - 3;
3280                        /* Number of windows to check */
3281                        int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3282                        int wnd_idx = 0;
3283                        int frag_idx = 0;
3284                        u32 wnd_sum = 0;
3285
3286                        /* Headers length */
3287                        hlen = (int)(skb_transport_header(skb) - skb->data) +
3288                                tcp_hdrlen(skb);
3289
3290                        /* Amount of data (w/o headers) on linear part of SKB*/
3291                        first_bd_sz = skb_headlen(skb) - hlen;
3292
3293                        wnd_sum  = first_bd_sz;
3294
3295                        /* Calculate the first sum - it's special */
3296                        for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3297                                wnd_sum +=
3298                                        skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3299
3300                        /* If there was data on linear skb data - check it */
3301                        if (first_bd_sz > 0) {
3302                                if (unlikely(wnd_sum < lso_mss)) {
3303                                        to_copy = 1;
3304                                        goto exit_lbl;
3305                                }
3306
3307                                wnd_sum -= first_bd_sz;
3308                        }
3309
3310                        /* Others are easier: run through the frag list and
3311                           check all windows */
3312                        for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3313                                wnd_sum +=
3314                          skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3315
3316                                if (unlikely(wnd_sum < lso_mss)) {
3317                                        to_copy = 1;
3318                                        break;
3319                                }
3320                                wnd_sum -=
3321                                        skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3322                        }
3323                } else {
3324                        /* in non-LSO too fragmented packet should always
3325                           be linearized */
3326                        to_copy = 1;
3327                }
3328        }
3329
3330exit_lbl:
3331        if (unlikely(to_copy))
3332                DP(NETIF_MSG_TX_QUEUED,
3333                   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3334                   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3335                   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3336
3337        return to_copy;
3338}
3339#endif
3340
3341static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3342                                 u32 xmit_type)
3343{
3344        struct ipv6hdr *ipv6;
3345
3346        *parsing_data |= (skb_shinfo(skb)->gso_size <<
3347                              ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3348                              ETH_TX_PARSE_BD_E2_LSO_MSS;
3349
3350        if (xmit_type & XMIT_GSO_ENC_V6)
3351                ipv6 = inner_ipv6_hdr(skb);
3352        else if (xmit_type & XMIT_GSO_V6)
3353                ipv6 = ipv6_hdr(skb);
3354        else
3355                ipv6 = NULL;
3356
3357        if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3358                *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3359}
3360
3361/**
3362 * bnx2x_set_pbd_gso - update PBD in GSO case.
3363 *
3364 * @skb:        packet skb
3365 * @pbd:        parse BD
3366 * @xmit_type:  xmit flags
3367 */
3368static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3369                              struct eth_tx_parse_bd_e1x *pbd,
3370                              struct eth_tx_start_bd *tx_start_bd,
3371                              u32 xmit_type)
3372{
3373        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3374        pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3375        pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3376
3377        if (xmit_type & XMIT_GSO_V4) {
3378                pbd->ip_id = bswab16(ip_hdr(skb)->id);
3379                pbd->tcp_pseudo_csum =
3380                        bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3381                                                   ip_hdr(skb)->daddr,
3382                                                   0, IPPROTO_TCP, 0));
3383
3384                /* GSO on 57710/57711 needs FW to calculate IP checksum */
3385                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3386        } else {
3387                pbd->tcp_pseudo_csum =
3388                        bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3389                                                 &ipv6_hdr(skb)->daddr,
3390                                                 0, IPPROTO_TCP, 0));
3391        }
3392
3393        pbd->global_data |=
3394                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3395}
3396
3397/**
3398 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3399 *
3400 * @bp:                 driver handle
3401 * @skb:                packet skb
3402 * @parsing_data:       data to be updated
3403 * @xmit_type:          xmit flags
3404 *
3405 * 57712/578xx related, when skb has encapsulation
3406 */
3407static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3408                                 u32 *parsing_data, u32 xmit_type)
3409{
3410        *parsing_data |=
3411                ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3412                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3413                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3414
3415        if (xmit_type & XMIT_CSUM_TCP) {
3416                *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3417                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3418                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3419
3420                return skb_inner_transport_header(skb) +
3421                        inner_tcp_hdrlen(skb) - skb->data;
3422        }
3423
3424        /* We support checksum offload for TCP and UDP only.
3425         * No need to pass the UDP header length - it's a constant.
3426         */
3427        return skb_inner_transport_header(skb) +
3428                sizeof(struct udphdr) - skb->data;
3429}
3430
3431/**
3432 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3433 *
3434 * @bp:                 driver handle
3435 * @skb:                packet skb
3436 * @parsing_data:       data to be updated
3437 * @xmit_type:          xmit flags
3438 *
3439 * 57712/578xx related
3440 */
3441static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3442                                u32 *parsing_data, u32 xmit_type)
3443{
3444        *parsing_data |=
3445                ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3446                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3447                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3448
3449        if (xmit_type & XMIT_CSUM_TCP) {
3450                *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3451                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3452                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3453
3454                return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3455        }
3456        /* We support checksum offload for TCP and UDP only.
3457         * No need to pass the UDP header length - it's a constant.
3458         */
3459        return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3460}
3461
3462/* set FW indication according to inner or outer protocols if tunneled */
3463static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3464                               struct eth_tx_start_bd *tx_start_bd,
3465                               u32 xmit_type)
3466{
3467        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3468
3469        if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3470                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3471
3472        if (!(xmit_type & XMIT_CSUM_TCP))
3473                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3474}
3475
3476/**
3477 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3478 *
3479 * @bp:         driver handle
3480 * @skb:        packet skb
3481 * @pbd:        parse BD to be updated
3482 * @xmit_type:  xmit flags
3483 */
3484static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3485                             struct eth_tx_parse_bd_e1x *pbd,
3486                             u32 xmit_type)
3487{
3488        u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3489
3490        /* for now NS flag is not used in Linux */
3491        pbd->global_data =
3492                cpu_to_le16(hlen |
3493                            ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3494                             ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3495
3496        pbd->ip_hlen_w = (skb_transport_header(skb) -
3497                        skb_network_header(skb)) >> 1;
3498
3499        hlen += pbd->ip_hlen_w;
3500
3501        /* We support checksum offload for TCP and UDP only */
3502        if (xmit_type & XMIT_CSUM_TCP)
3503                hlen += tcp_hdrlen(skb) / 2;
3504        else
3505                hlen += sizeof(struct udphdr) / 2;
3506
3507        pbd->total_hlen_w = cpu_to_le16(hlen);
3508        hlen = hlen*2;
3509
3510        if (xmit_type & XMIT_CSUM_TCP) {
3511                pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3512
3513        } else {
3514                s8 fix = SKB_CS_OFF(skb); /* signed! */
3515
3516                DP(NETIF_MSG_TX_QUEUED,
3517                   "hlen %d  fix %d  csum before fix %x\n",
3518                   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3519
3520                /* HW bug: fixup the CSUM */
3521                pbd->tcp_pseudo_csum =
3522                        bnx2x_csum_fix(skb_transport_header(skb),
3523                                       SKB_CS(skb), fix);
3524
3525                DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3526                   pbd->tcp_pseudo_csum);
3527        }
3528
3529        return hlen;
3530}
3531
3532static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3533                                      struct eth_tx_parse_bd_e2 *pbd_e2,
3534                                      struct eth_tx_parse_2nd_bd *pbd2,
3535                                      u16 *global_data,
3536                                      u32 xmit_type)
3537{
3538        u16 hlen_w = 0;
3539        u8 outerip_off, outerip_len = 0;
3540
3541        /* from outer IP to transport */
3542        hlen_w = (skb_inner_transport_header(skb) -
3543                  skb_network_header(skb)) >> 1;
3544
3545        /* transport len */
3546        hlen_w += inner_tcp_hdrlen(skb) >> 1;
3547
3548        pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3549
3550        /* outer IP header info */
3551        if (xmit_type & XMIT_CSUM_V4) {
3552                struct iphdr *iph = ip_hdr(skb);
3553                u32 csum = (__force u32)(~iph->check) -
3554                           (__force u32)iph->tot_len -
3555                           (__force u32)iph->frag_off;
3556
3557                pbd2->fw_ip_csum_wo_len_flags_frag =
3558                        bswab16(csum_fold((__force __wsum)csum));
3559        } else {
3560                pbd2->fw_ip_hdr_to_payload_w =
3561                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3562        }
3563
3564        pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3565
3566        pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3567
3568        if (xmit_type & XMIT_GSO_V4) {
3569                pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3570
3571                pbd_e2->data.tunnel_data.pseudo_csum =
3572                        bswab16(~csum_tcpudp_magic(
3573                                        inner_ip_hdr(skb)->saddr,
3574                                        inner_ip_hdr(skb)->daddr,
3575                                        0, IPPROTO_TCP, 0));
3576
3577                outerip_len = ip_hdr(skb)->ihl << 1;
3578        } else {
3579                pbd_e2->data.tunnel_data.pseudo_csum =
3580                        bswab16(~csum_ipv6_magic(
3581                                        &inner_ipv6_hdr(skb)->saddr,
3582                                        &inner_ipv6_hdr(skb)->daddr,
3583                                        0, IPPROTO_TCP, 0));
3584        }
3585
3586        outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3587
3588        *global_data |=
3589                outerip_off |
3590                (!!(xmit_type & XMIT_CSUM_V6) <<
3591                        ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3592                (outerip_len <<
3593                        ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3594                ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3595                        ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3596
3597        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3598                SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3599                pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3600        }
3601}
3602
3603/* called with netif_tx_lock
3604 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3605 * netif_wake_queue()
3606 */
3607netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3608{
3609        struct bnx2x *bp = netdev_priv(dev);
3610
3611        struct netdev_queue *txq;
3612        struct bnx2x_fp_txdata *txdata;
3613        struct sw_tx_bd *tx_buf;
3614        struct eth_tx_start_bd *tx_start_bd, *first_bd;
3615        struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3616        struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3617        struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3618        struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3619        u32 pbd_e2_parsing_data = 0;
3620        u16 pkt_prod, bd_prod;
3621        int nbd, txq_index;
3622        dma_addr_t mapping;
3623        u32 xmit_type = bnx2x_xmit_type(bp, skb);
3624        int i;
3625        u8 hlen = 0;
3626        __le16 pkt_size = 0;
3627        struct ethhdr *eth;
3628        u8 mac_type = UNICAST_ADDRESS;
3629
3630#ifdef BNX2X_STOP_ON_ERROR
3631        if (unlikely(bp->panic))
3632                return NETDEV_TX_BUSY;
3633#endif
3634
3635        txq_index = skb_get_queue_mapping(skb);
3636        txq = netdev_get_tx_queue(dev, txq_index);
3637
3638        BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3639
3640        txdata = &bp->bnx2x_txq[txq_index];
3641
3642        /* enable this debug print to view the transmission queue being used
3643        DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3644           txq_index, fp_index, txdata_index); */
3645
3646        /* enable this debug print to view the transmission details
3647        DP(NETIF_MSG_TX_QUEUED,
3648           "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3649           txdata->cid, fp_index, txdata_index, txdata, fp); */
3650
3651        if (unlikely(bnx2x_tx_avail(bp, txdata) <
3652                        skb_shinfo(skb)->nr_frags +
3653                        BDS_PER_TX_PKT +
3654                        NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3655                /* Handle special storage cases separately */
3656                if (txdata->tx_ring_size == 0) {
3657                        struct bnx2x_eth_q_stats *q_stats =
3658                                bnx2x_fp_qstats(bp, txdata->parent_fp);
3659                        q_stats->driver_filtered_tx_pkt++;
3660                        dev_kfree_skb(skb);
3661                        return NETDEV_TX_OK;
3662                }
3663                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3664                netif_tx_stop_queue(txq);
3665                BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3666
3667                return NETDEV_TX_BUSY;
3668        }
3669
3670        DP(NETIF_MSG_TX_QUEUED,
3671           "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3672           txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3673           ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3674           skb->len);
3675
3676        eth = (struct ethhdr *)skb->data;
3677
3678        /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3679        if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3680                if (is_broadcast_ether_addr(eth->h_dest))
3681                        mac_type = BROADCAST_ADDRESS;
3682                else
3683                        mac_type = MULTICAST_ADDRESS;
3684        }
3685
3686#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3687        /* First, check if we need to linearize the skb (due to FW
3688           restrictions). No need to check fragmentation if page size > 8K
3689           (there will be no violation to FW restrictions) */
3690        if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3691                /* Statistics of linearization */
3692                bp->lin_cnt++;
3693                if (skb_linearize(skb) != 0) {
3694                        DP(NETIF_MSG_TX_QUEUED,
3695                           "SKB linearization failed - silently dropping this SKB\n");
3696                        dev_kfree_skb_any(skb);
3697                        return NETDEV_TX_OK;
3698                }
3699        }
3700#endif
3701        /* Map skb linear data for DMA */
3702        mapping = dma_map_single(&bp->pdev->dev, skb->data,
3703                                 skb_headlen(skb), DMA_TO_DEVICE);
3704        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3705                DP(NETIF_MSG_TX_QUEUED,
3706                   "SKB mapping failed - silently dropping this SKB\n");
3707                dev_kfree_skb_any(skb);
3708                return NETDEV_TX_OK;
3709        }
3710        /*
3711        Please read carefully. First we use one BD which we mark as start,
3712        then we have a parsing info BD (used for TSO or xsum),
3713        and only then we have the rest of the TSO BDs.
3714        (don't forget to mark the last one as last,
3715        and to unmap only AFTER you write to the BD ...)
3716        And above all, all pdb sizes are in words - NOT DWORDS!
3717        */
3718
3719        /* get current pkt produced now - advance it just before sending packet
3720         * since mapping of pages may fail and cause packet to be dropped
3721         */
3722        pkt_prod = txdata->tx_pkt_prod;
3723        bd_prod = TX_BD(txdata->tx_bd_prod);
3724
3725        /* get a tx_buf and first BD
3726         * tx_start_bd may be changed during SPLIT,
3727         * but first_bd will always stay first
3728         */
3729        tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3730        tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3731        first_bd = tx_start_bd;
3732
3733        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3734
3735        /* header nbd: indirectly zero other flags! */
3736        tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3737
3738        /* remember the first BD of the packet */
3739        tx_buf->first_bd = txdata->tx_bd_prod;
3740        tx_buf->skb = skb;
3741        tx_buf->flags = 0;
3742
3743        DP(NETIF_MSG_TX_QUEUED,
3744           "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3745           pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3746
3747        if (vlan_tx_tag_present(skb)) {
3748                tx_start_bd->vlan_or_ethertype =
3749                    cpu_to_le16(vlan_tx_tag_get(skb));
3750                tx_start_bd->bd_flags.as_bitfield |=
3751                    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3752        } else {
3753                /* when transmitting in a vf, start bd must hold the ethertype
3754                 * for fw to enforce it
3755                 */
3756                if (IS_VF(bp))
3757                        tx_start_bd->vlan_or_ethertype =
3758                                cpu_to_le16(ntohs(eth->h_proto));
3759                else
3760                        /* used by FW for packet accounting */
3761                        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3762        }
3763
3764        nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3765
3766        /* turn on parsing and get a BD */
3767        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3768
3769        if (xmit_type & XMIT_CSUM)
3770                bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3771
3772        if (!CHIP_IS_E1x(bp)) {
3773                pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3774                memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3775
3776                if (xmit_type & XMIT_CSUM_ENC) {
3777                        u16 global_data = 0;
3778
3779                        /* Set PBD in enc checksum offload case */
3780                        hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3781                                                      &pbd_e2_parsing_data,
3782                                                      xmit_type);
3783
3784                        /* turn on 2nd parsing and get a BD */
3785                        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3786
3787                        pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3788
3789                        memset(pbd2, 0, sizeof(*pbd2));
3790
3791                        pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3792                                (skb_inner_network_header(skb) -
3793                                 skb->data) >> 1;
3794
3795                        if (xmit_type & XMIT_GSO_ENC)
3796                                bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3797                                                          &global_data,
3798                                                          xmit_type);
3799
3800                        pbd2->global_data = cpu_to_le16(global_data);
3801
3802                        /* add addition parse BD indication to start BD */
3803                        SET_FLAG(tx_start_bd->general_data,
3804                                 ETH_TX_START_BD_PARSE_NBDS, 1);
3805                        /* set encapsulation flag in start BD */
3806                        SET_FLAG(tx_start_bd->general_data,
3807                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3808                        nbd++;
3809                } else if (xmit_type & XMIT_CSUM) {
3810                        /* Set PBD in checksum offload case w/o encapsulation */
3811                        hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3812                                                     &pbd_e2_parsing_data,
3813                                                     xmit_type);
3814                }
3815
3816                /* Add the macs to the parsing BD this is a vf */
3817                if (IS_VF(bp)) {
3818                        /* override GRE parameters in BD */
3819                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3820                                              &pbd_e2->data.mac_addr.src_mid,
3821                                              &pbd_e2->data.mac_addr.src_lo,
3822                                              eth->h_source);
3823
3824                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3825                                              &pbd_e2->data.mac_addr.dst_mid,
3826                                              &pbd_e2->data.mac_addr.dst_lo,
3827                                              eth->h_dest);
3828                }
3829
3830                SET_FLAG(pbd_e2_parsing_data,
3831                         ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3832        } else {
3833                u16 global_data = 0;
3834                pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3835                memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3836                /* Set PBD in checksum offload case */
3837                if (xmit_type & XMIT_CSUM)
3838                        hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3839
3840                SET_FLAG(global_data,
3841                         ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3842                pbd_e1x->global_data |= cpu_to_le16(global_data);
3843        }
3844
3845        /* Setup the data pointer of the first BD of the packet */
3846        tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3847        tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3848        tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3849        pkt_size = tx_start_bd->nbytes;
3850
3851        DP(NETIF_MSG_TX_QUEUED,
3852           "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
3853           tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3854           le16_to_cpu(tx_start_bd->nbytes),
3855           tx_start_bd->bd_flags.as_bitfield,
3856           le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3857
3858        if (xmit_type & XMIT_GSO) {
3859
3860                DP(NETIF_MSG_TX_QUEUED,
3861                   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3862                   skb->len, hlen, skb_headlen(skb),
3863                   skb_shinfo(skb)->gso_size);
3864
3865                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3866
3867                if (unlikely(skb_headlen(skb) > hlen)) {
3868                        nbd++;
3869                        bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3870                                                 &tx_start_bd, hlen,
3871                                                 bd_prod);
3872                }
3873                if (!CHIP_IS_E1x(bp))
3874                        bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3875                                             xmit_type);
3876                else
3877                        bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3878        }
3879
3880        /* Set the PBD's parsing_data field if not zero
3881         * (for the chips newer than 57711).
3882         */
3883        if (pbd_e2_parsing_data)
3884                pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3885
3886        tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3887
3888        /* Handle fragmented skb */
3889        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3890                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3891
3892                mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3893                                           skb_frag_size(frag), DMA_TO_DEVICE);
3894                if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3895                        unsigned int pkts_compl = 0, bytes_compl = 0;
3896
3897                        DP(NETIF_MSG_TX_QUEUED,
3898                           "Unable to map page - dropping packet...\n");
3899
3900                        /* we need unmap all buffers already mapped
3901                         * for this SKB;
3902                         * first_bd->nbd need to be properly updated
3903                         * before call to bnx2x_free_tx_pkt
3904                         */
3905                        first_bd->nbd = cpu_to_le16(nbd);
3906                        bnx2x_free_tx_pkt(bp, txdata,
3907                                          TX_BD(txdata->tx_pkt_prod),
3908                                          &pkts_compl, &bytes_compl);
3909                        return NETDEV_TX_OK;
3910                }
3911
3912                bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3913                tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3914                if (total_pkt_bd == NULL)
3915                        total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3916
3917                tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3918                tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3919                tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3920                le16_add_cpu(&pkt_size, skb_frag_size(frag));
3921                nbd++;
3922
3923                DP(NETIF_MSG_TX_QUEUED,
3924                   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
3925                   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3926                   le16_to_cpu(tx_data_bd->nbytes));
3927        }
3928
3929        DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3930
3931        /* update with actual num BDs */
3932        first_bd->nbd = cpu_to_le16(nbd);
3933
3934        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3935
3936        /* now send a tx doorbell, counting the next BD
3937         * if the packet contains or ends with it
3938         */
3939        if (TX_BD_POFF(bd_prod) < nbd)
3940                nbd++;
3941
3942        /* total_pkt_bytes should be set on the first data BD if
3943         * it's not an LSO packet and there is more than one
3944         * data BD. In this case pkt_size is limited by an MTU value.
3945         * However we prefer to set it for an LSO packet (while we don't
3946         * have to) in order to save some CPU cycles in a none-LSO
3947         * case, when we much more care about them.
3948         */
3949        if (total_pkt_bd != NULL)
3950                total_pkt_bd->total_pkt_bytes = pkt_size;
3951
3952        if (pbd_e1x)
3953                DP(NETIF_MSG_TX_QUEUED,
3954                   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
3955                   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3956                   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3957                   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3958                    le16_to_cpu(pbd_e1x->total_hlen_w));
3959        if (pbd_e2)
3960                DP(NETIF_MSG_TX_QUEUED,
3961                   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
3962                   pbd_e2,
3963                   pbd_e2->data.mac_addr.dst_hi,
3964                   pbd_e2->data.mac_addr.dst_mid,
3965                   pbd_e2->data.mac_addr.dst_lo,
3966                   pbd_e2->data.mac_addr.src_hi,
3967                   pbd_e2->data.mac_addr.src_mid,
3968                   pbd_e2->data.mac_addr.src_lo,
3969                   pbd_e2->parsing_data);
3970        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
3971
3972        netdev_tx_sent_queue(txq, skb->len);
3973
3974        skb_tx_timestamp(skb);
3975
3976        txdata->tx_pkt_prod++;
3977        /*
3978         * Make sure that the BD data is updated before updating the producer
3979         * since FW might read the BD right after the producer is updated.
3980         * This is only applicable for weak-ordered memory model archs such
3981         * as IA-64. The following barrier is also mandatory since FW will
3982         * assumes packets must have BDs.
3983         */
3984        wmb();
3985
3986        txdata->tx_db.data.prod += nbd;
3987        barrier();
3988
3989        DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3990
3991        mmiowb();
3992
3993        txdata->tx_bd_prod += nbd;
3994
3995        if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3996                netif_tx_stop_queue(txq);
3997
3998                /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3999                 * ordering of set_bit() in netif_tx_stop_queue() and read of
4000                 * fp->bd_tx_cons */
4001                smp_mb();
4002
4003                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4004                if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4005                        netif_tx_wake_queue(txq);
4006        }
4007        txdata->tx_pkt++;
4008
4009        return NETDEV_TX_OK;
4010}
4011
4012/**
4013 * bnx2x_setup_tc - routine to configure net_device for multi tc
4014 *
4015 * @netdev: net device to configure
4016 * @tc: number of traffic classes to enable
4017 *
4018 * callback connected to the ndo_setup_tc function pointer
4019 */
4020int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4021{
4022        int cos, prio, count, offset;
4023        struct bnx2x *bp = netdev_priv(dev);
4024
4025        /* setup tc must be called under rtnl lock */
4026        ASSERT_RTNL();
4027
4028        /* no traffic classes requested. Aborting */
4029        if (!num_tc) {
4030                netdev_reset_tc(dev);
4031                return 0;
4032        }
4033
4034        /* requested to support too many traffic classes */
4035        if (num_tc > bp->max_cos) {
4036                BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4037                          num_tc, bp->max_cos);
4038                return -EINVAL;
4039        }
4040
4041        /* declare amount of supported traffic classes */
4042        if (netdev_set_num_tc(dev, num_tc)) {
4043                BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4044                return -EINVAL;
4045        }
4046
4047        /* configure priority to traffic class mapping */
4048        for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4049                netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4050                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4051                   "mapping priority %d to tc %d\n",
4052                   prio, bp->prio_to_cos[prio]);
4053        }
4054
4055        /* Use this configuration to differentiate tc0 from other COSes
4056           This can be used for ets or pfc, and save the effort of setting
4057           up a multio class queue disc or negotiating DCBX with a switch
4058        netdev_set_prio_tc_map(dev, 0, 0);
4059        DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4060        for (prio = 1; prio < 16; prio++) {
4061                netdev_set_prio_tc_map(dev, prio, 1);
4062                DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4063        } */
4064
4065        /* configure traffic class to transmission queue mapping */
4066        for (cos = 0; cos < bp->max_cos; cos++) {
4067                count = BNX2X_NUM_ETH_QUEUES(bp);
4068                offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4069                netdev_set_tc_queue(dev, cos, count, offset);
4070                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4071                   "mapping tc %d to offset %d count %d\n",
4072                   cos, offset, count);
4073        }
4074
4075        return 0;
4076}
4077
4078/* called with rtnl_lock */
4079int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4080{
4081        struct sockaddr *addr = p;
4082        struct bnx2x *bp = netdev_priv(dev);
4083        int rc = 0;
4084
4085        if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4086                BNX2X_ERR("Requested MAC address is not valid\n");
4087                return -EINVAL;
4088        }
4089
4090        if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4091            !is_zero_ether_addr(addr->sa_data)) {
4092                BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4093                return -EINVAL;
4094        }
4095
4096        if (netif_running(dev))  {
4097                rc = bnx2x_set_eth_mac(bp, false);
4098                if (rc)
4099                        return rc;
4100        }
4101
4102        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4103
4104        if (netif_running(dev))
4105                rc = bnx2x_set_eth_mac(bp, true);
4106
4107        return rc;
4108}
4109
4110static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4111{
4112        union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4113        struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4114        u8 cos;
4115
4116        /* Common */
4117
4118        if (IS_FCOE_IDX(fp_index)) {
4119                memset(sb, 0, sizeof(union host_hc_status_block));
4120                fp->status_blk_mapping = 0;
4121        } else {
4122                /* status blocks */
4123                if (!CHIP_IS_E1x(bp))
4124                        BNX2X_PCI_FREE(sb->e2_sb,
4125                                       bnx2x_fp(bp, fp_index,
4126                                                status_blk_mapping),
4127                                       sizeof(struct host_hc_status_block_e2));
4128                else
4129                        BNX2X_PCI_FREE(sb->e1x_sb,
4130                                       bnx2x_fp(bp, fp_index,
4131                                                status_blk_mapping),
4132                                       sizeof(struct host_hc_status_block_e1x));
4133        }
4134
4135        /* Rx */
4136        if (!skip_rx_queue(bp, fp_index)) {
4137                bnx2x_free_rx_bds(fp);
4138
4139                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4140                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4141                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4142                               bnx2x_fp(bp, fp_index, rx_desc_mapping),
4143                               sizeof(struct eth_rx_bd) * NUM_RX_BD);
4144
4145                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4146                               bnx2x_fp(bp, fp_index, rx_comp_mapping),
4147                               sizeof(struct eth_fast_path_rx_cqe) *
4148                               NUM_RCQ_BD);
4149
4150                /* SGE ring */
4151                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4152                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4153                               bnx2x_fp(bp, fp_index, rx_sge_mapping),
4154                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4155        }
4156
4157        /* Tx */
4158        if (!skip_tx_queue(bp, fp_index)) {
4159                /* fastpath tx rings: tx_buf tx_desc */
4160                for_each_cos_in_tx_queue(fp, cos) {
4161                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4162
4163                        DP(NETIF_MSG_IFDOWN,
4164                           "freeing tx memory of fp %d cos %d cid %d\n",
4165                           fp_index, cos, txdata->cid);
4166
4167                        BNX2X_FREE(txdata->tx_buf_ring);
4168                        BNX2X_PCI_FREE(txdata->tx_desc_ring,
4169                                txdata->tx_desc_mapping,
4170                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4171                }
4172        }
4173        /* end of fastpath */
4174}
4175
4176void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4177{
4178        int i;
4179        for_each_cnic_queue(bp, i)
4180                bnx2x_free_fp_mem_at(bp, i);
4181}
4182
4183void bnx2x_free_fp_mem(struct bnx2x *bp)
4184{
4185        int i;
4186        for_each_eth_queue(bp, i)
4187                bnx2x_free_fp_mem_at(bp, i);
4188}
4189
4190static void set_sb_shortcuts(struct bnx2x *bp, int index)
4191{
4192        union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4193        if (!CHIP_IS_E1x(bp)) {
4194                bnx2x_fp(bp, index, sb_index_values) =
4195                        (__le16 *)status_blk.e2_sb->sb.index_values;
4196                bnx2x_fp(bp, index, sb_running_index) =
4197                        (__le16 *)status_blk.e2_sb->sb.running_index;
4198        } else {
4199                bnx2x_fp(bp, index, sb_index_values) =
4200                        (__le16 *)status_blk.e1x_sb->sb.index_values;
4201                bnx2x_fp(bp, index, sb_running_index) =
4202                        (__le16 *)status_blk.e1x_sb->sb.running_index;
4203        }
4204}
4205
4206/* Returns the number of actually allocated BDs */
4207static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4208                              int rx_ring_size)
4209{
4210        struct bnx2x *bp = fp->bp;
4211        u16 ring_prod, cqe_ring_prod;
4212        int i, failure_cnt = 0;
4213
4214        fp->rx_comp_cons = 0;
4215        cqe_ring_prod = ring_prod = 0;
4216
4217        /* This routine is called only during fo init so
4218         * fp->eth_q_stats.rx_skb_alloc_failed = 0
4219         */
4220        for (i = 0; i < rx_ring_size; i++) {
4221                if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4222                        failure_cnt++;
4223                        continue;
4224                }
4225                ring_prod = NEXT_RX_IDX(ring_prod);
4226                cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4227                WARN_ON(ring_prod <= (i - failure_cnt));
4228        }
4229
4230        if (failure_cnt)
4231                BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4232                          i - failure_cnt, fp->index);
4233
4234        fp->rx_bd_prod = ring_prod;
4235        /* Limit the CQE producer by the CQE ring size */
4236        fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4237                               cqe_ring_prod);
4238        fp->rx_pkt = fp->rx_calls = 0;
4239
4240        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4241
4242        return i - failure_cnt;
4243}
4244
4245static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4246{
4247        int i;
4248
4249        for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4250                struct eth_rx_cqe_next_page *nextpg;
4251
4252                nextpg = (struct eth_rx_cqe_next_page *)
4253                        &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4254                nextpg->addr_hi =
4255                        cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4256                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4257                nextpg->addr_lo =
4258                        cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4259                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4260        }
4261}
4262
4263static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4264{
4265        union host_hc_status_block *sb;
4266        struct bnx2x_fastpath *fp = &bp->fp[index];
4267        int ring_size = 0;
4268        u8 cos;
4269        int rx_ring_size = 0;
4270
4271        if (!bp->rx_ring_size &&
4272            (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4273                rx_ring_size = MIN_RX_SIZE_NONTPA;
4274                bp->rx_ring_size = rx_ring_size;
4275        } else if (!bp->rx_ring_size) {
4276                rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4277
4278                if (CHIP_IS_E3(bp)) {
4279                        u32 cfg = SHMEM_RD(bp,
4280                                           dev_info.port_hw_config[BP_PORT(bp)].
4281                                           default_cfg);
4282
4283                        /* Decrease ring size for 1G functions */
4284                        if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4285                            PORT_HW_CFG_NET_SERDES_IF_SGMII)
4286                                rx_ring_size /= 10;
4287                }
4288
4289                /* allocate at least number of buffers required by FW */
4290                rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4291                                     MIN_RX_SIZE_TPA, rx_ring_size);
4292
4293                bp->rx_ring_size = rx_ring_size;
4294        } else /* if rx_ring_size specified - use it */
4295                rx_ring_size = bp->rx_ring_size;
4296
4297        DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4298
4299        /* Common */
4300        sb = &bnx2x_fp(bp, index, status_blk);
4301
4302        if (!IS_FCOE_IDX(index)) {
4303                /* status blocks */
4304                if (!CHIP_IS_E1x(bp))
4305                        BNX2X_PCI_ALLOC(sb->e2_sb,
4306                                &bnx2x_fp(bp, index, status_blk_mapping),
4307                                sizeof(struct host_hc_status_block_e2));
4308                else
4309                        BNX2X_PCI_ALLOC(sb->e1x_sb,
4310                                &bnx2x_fp(bp, index, status_blk_mapping),
4311                            sizeof(struct host_hc_status_block_e1x));
4312        }
4313
4314        /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4315         * set shortcuts for it.
4316         */
4317        if (!IS_FCOE_IDX(index))
4318                set_sb_shortcuts(bp, index);
4319
4320        /* Tx */
4321        if (!skip_tx_queue(bp, index)) {
4322                /* fastpath tx rings: tx_buf tx_desc */
4323                for_each_cos_in_tx_queue(fp, cos) {
4324                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4325
4326                        DP(NETIF_MSG_IFUP,
4327                           "allocating tx memory of fp %d cos %d\n",
4328                           index, cos);
4329
4330                        BNX2X_ALLOC(txdata->tx_buf_ring,
4331                                sizeof(struct sw_tx_bd) * NUM_TX_BD);
4332                        BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4333                                &txdata->tx_desc_mapping,
4334                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4335                }
4336        }
4337
4338        /* Rx */
4339        if (!skip_rx_queue(bp, index)) {
4340                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4341                BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4342                                sizeof(struct sw_rx_bd) * NUM_RX_BD);
4343                BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4344                                &bnx2x_fp(bp, index, rx_desc_mapping),
4345                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4346
4347                /* Seed all CQEs by 1s */
4348                BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4349                                 &bnx2x_fp(bp, index, rx_comp_mapping),
4350                                 sizeof(struct eth_fast_path_rx_cqe) *
4351                                 NUM_RCQ_BD);
4352
4353                /* SGE ring */
4354                BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4355                                sizeof(struct sw_rx_page) * NUM_RX_SGE);
4356                BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4357                                &bnx2x_fp(bp, index, rx_sge_mapping),
4358                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4359                /* RX BD ring */
4360                bnx2x_set_next_page_rx_bd(fp);
4361
4362                /* CQ ring */
4363                bnx2x_set_next_page_rx_cq(fp);
4364
4365                /* BDs */
4366                ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4367                if (ring_size < rx_ring_size)
4368                        goto alloc_mem_err;
4369        }
4370
4371        return 0;
4372
4373/* handles low memory cases */
4374alloc_mem_err:
4375        BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4376                                                index, ring_size);
4377        /* FW will drop all packets if queue is not big enough,
4378         * In these cases we disable the queue
4379         * Min size is different for OOO, TPA and non-TPA queues
4380         */
4381        if (ring_size < (fp->disable_tpa ?
4382                                MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4383                        /* release memory allocated for this queue */
4384                        bnx2x_free_fp_mem_at(bp, index);
4385                        return -ENOMEM;
4386        }
4387        return 0;
4388}
4389
4390int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4391{
4392        if (!NO_FCOE(bp))
4393                /* FCoE */
4394                if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4395                        /* we will fail load process instead of mark
4396                         * NO_FCOE_FLAG
4397                         */
4398                        return -ENOMEM;
4399
4400        return 0;
4401}
4402
4403int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4404{
4405        int i;
4406
4407        /* 1. Allocate FP for leading - fatal if error
4408         * 2. Allocate RSS - fix number of queues if error
4409         */
4410
4411        /* leading */
4412        if (bnx2x_alloc_fp_mem_at(bp, 0))
4413                return -ENOMEM;
4414
4415        /* RSS */
4416        for_each_nondefault_eth_queue(bp, i)
4417                if (bnx2x_alloc_fp_mem_at(bp, i))
4418                        break;
4419
4420        /* handle memory failures */
4421        if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4422                int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4423
4424                WARN_ON(delta < 0);
4425                bnx2x_shrink_eth_fp(bp, delta);
4426                if (CNIC_SUPPORT(bp))
4427                        /* move non eth FPs next to last eth FP
4428                         * must be done in that order
4429                         * FCOE_IDX < FWD_IDX < OOO_IDX
4430                         */
4431
4432                        /* move FCoE fp even NO_FCOE_FLAG is on */
4433                        bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4434                bp->num_ethernet_queues -= delta;
4435                bp->num_queues = bp->num_ethernet_queues +
4436                                 bp->num_cnic_queues;
4437                BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4438                          bp->num_queues + delta, bp->num_queues);
4439        }
4440
4441        return 0;
4442}
4443
4444void bnx2x_free_mem_bp(struct bnx2x *bp)
4445{
4446        int i;
4447
4448        for (i = 0; i < bp->fp_array_size; i++)
4449                kfree(bp->fp[i].tpa_info);
4450        kfree(bp->fp);
4451        kfree(bp->sp_objs);
4452        kfree(bp->fp_stats);
4453        kfree(bp->bnx2x_txq);
4454        kfree(bp->msix_table);
4455        kfree(bp->ilt);
4456}
4457
4458int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4459{
4460        struct bnx2x_fastpath *fp;
4461        struct msix_entry *tbl;
4462        struct bnx2x_ilt *ilt;
4463        int msix_table_size = 0;
4464        int fp_array_size, txq_array_size;
4465        int i;
4466
4467        /*
4468         * The biggest MSI-X table we might need is as a maximum number of fast
4469         * path IGU SBs plus default SB (for PF only).
4470         */
4471        msix_table_size = bp->igu_sb_cnt;
4472        if (IS_PF(bp))
4473                msix_table_size++;
4474        BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4475
4476        /* fp array: RSS plus CNIC related L2 queues */
4477        fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4478        bp->fp_array_size = fp_array_size;
4479        BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4480
4481        fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4482        if (!fp)
4483                goto alloc_err;
4484        for (i = 0; i < bp->fp_array_size; i++) {
4485                fp[i].tpa_info =
4486                        kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4487                                sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4488                if (!(fp[i].tpa_info))
4489                        goto alloc_err;
4490        }
4491
4492        bp->fp = fp;
4493
4494        /* allocate sp objs */
4495        bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4496                              GFP_KERNEL);
4497        if (!bp->sp_objs)
4498                goto alloc_err;
4499
4500        /* allocate fp_stats */
4501        bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4502                               GFP_KERNEL);
4503        if (!bp->fp_stats)
4504                goto alloc_err;
4505
4506        /* Allocate memory for the transmission queues array */
4507        txq_array_size =
4508                BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4509        BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4510
4511        bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4512                                GFP_KERNEL);
4513        if (!bp->bnx2x_txq)
4514                goto alloc_err;
4515
4516        /* msix table */
4517        tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4518        if (!tbl)
4519                goto alloc_err;
4520        bp->msix_table = tbl;
4521
4522        /* ilt */
4523        ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4524        if (!ilt)
4525                goto alloc_err;
4526        bp->ilt = ilt;
4527
4528        return 0;
4529alloc_err:
4530        bnx2x_free_mem_bp(bp);
4531        return -ENOMEM;
4532}
4533
4534int bnx2x_reload_if_running(struct net_device *dev)
4535{
4536        struct bnx2x *bp = netdev_priv(dev);
4537
4538        if (unlikely(!netif_running(dev)))
4539                return 0;
4540
4541        bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4542        return bnx2x_nic_load(bp, LOAD_NORMAL);
4543}
4544
4545int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4546{
4547        u32 sel_phy_idx = 0;
4548        if (bp->link_params.num_phys <= 1)
4549                return INT_PHY;
4550
4551        if (bp->link_vars.link_up) {
4552                sel_phy_idx = EXT_PHY1;
4553                /* In case link is SERDES, check if the EXT_PHY2 is the one */
4554                if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4555                    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4556                        sel_phy_idx = EXT_PHY2;
4557        } else {
4558
4559                switch (bnx2x_phy_selection(&bp->link_params)) {
4560                case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4561                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4562                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4563                       sel_phy_idx = EXT_PHY1;
4564                       break;
4565                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4566                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4567                       sel_phy_idx = EXT_PHY2;
4568                       break;
4569                }
4570        }
4571
4572        return sel_phy_idx;
4573}
4574int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4575{
4576        u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4577        /*
4578         * The selected activated PHY is always after swapping (in case PHY
4579         * swapping is enabled). So when swapping is enabled, we need to reverse
4580         * the configuration
4581         */
4582
4583        if (bp->link_params.multi_phy_config &
4584            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4585                if (sel_phy_idx == EXT_PHY1)
4586                        sel_phy_idx = EXT_PHY2;
4587                else if (sel_phy_idx == EXT_PHY2)
4588                        sel_phy_idx = EXT_PHY1;
4589        }
4590        return LINK_CONFIG_IDX(sel_phy_idx);
4591}
4592
4593#ifdef NETDEV_FCOE_WWNN
4594int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4595{
4596        struct bnx2x *bp = netdev_priv(dev);
4597        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4598
4599        switch (type) {
4600        case NETDEV_FCOE_WWNN:
4601                *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4602                                cp->fcoe_wwn_node_name_lo);
4603                break;
4604        case NETDEV_FCOE_WWPN:
4605                *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4606                                cp->fcoe_wwn_port_name_lo);
4607                break;
4608        default:
4609                BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4610                return -EINVAL;
4611        }
4612
4613        return 0;
4614}
4615#endif
4616
4617/* called with rtnl_lock */
4618int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4619{
4620        struct bnx2x *bp = netdev_priv(dev);
4621
4622        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4623                BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4624                return -EAGAIN;
4625        }
4626
4627        if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4628            ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4629                BNX2X_ERR("Can't support requested MTU size\n");
4630                return -EINVAL;
4631        }
4632
4633        /* This does not race with packet allocation
4634         * because the actual alloc size is
4635         * only updated as part of load
4636         */
4637        dev->mtu = new_mtu;
4638
4639        return bnx2x_reload_if_running(dev);
4640}
4641
4642netdev_features_t bnx2x_fix_features(struct net_device *dev,
4643                                     netdev_features_t features)
4644{
4645        struct bnx2x *bp = netdev_priv(dev);
4646
4647        /* TPA requires Rx CSUM offloading */
4648        if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4649                features &= ~NETIF_F_LRO;
4650                features &= ~NETIF_F_GRO;
4651        }
4652
4653        return features;
4654}
4655
4656int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4657{
4658        struct bnx2x *bp = netdev_priv(dev);
4659        u32 flags = bp->flags;
4660        u32 changes;
4661        bool bnx2x_reload = false;
4662
4663        if (features & NETIF_F_LRO)
4664                flags |= TPA_ENABLE_FLAG;
4665        else
4666                flags &= ~TPA_ENABLE_FLAG;
4667
4668        if (features & NETIF_F_GRO)
4669                flags |= GRO_ENABLE_FLAG;
4670        else
4671                flags &= ~GRO_ENABLE_FLAG;
4672
4673        if (features & NETIF_F_LOOPBACK) {
4674                if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4675                        bp->link_params.loopback_mode = LOOPBACK_BMAC;
4676                        bnx2x_reload = true;
4677                }
4678        } else {
4679                if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4680                        bp->link_params.loopback_mode = LOOPBACK_NONE;
4681                        bnx2x_reload = true;
4682                }
4683        }
4684
4685        changes = flags ^ bp->flags;
4686
4687        /* if GRO is changed while LRO is enabled, don't force a reload */
4688        if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4689                changes &= ~GRO_ENABLE_FLAG;
4690
4691        if (changes)
4692                bnx2x_reload = true;
4693
4694        bp->flags = flags;
4695
4696        if (bnx2x_reload) {
4697                if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4698                        return bnx2x_reload_if_running(dev);
4699                /* else: bnx2x_nic_load() will be called at end of recovery */
4700        }
4701
4702        return 0;
4703}
4704
4705void bnx2x_tx_timeout(struct net_device *dev)
4706{
4707        struct bnx2x *bp = netdev_priv(dev);
4708
4709#ifdef BNX2X_STOP_ON_ERROR
4710        if (!bp->panic)
4711                bnx2x_panic();
4712#endif
4713
4714        smp_mb__before_clear_bit();
4715        set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4716        smp_mb__after_clear_bit();
4717
4718        /* This allows the netif to be shutdown gracefully before resetting */
4719        schedule_delayed_work(&bp->sp_rtnl_task, 0);
4720}
4721
4722int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4723{
4724        struct net_device *dev = pci_get_drvdata(pdev);
4725        struct bnx2x *bp;
4726
4727        if (!dev) {
4728                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4729                return -ENODEV;
4730        }
4731        bp = netdev_priv(dev);
4732
4733        rtnl_lock();
4734
4735        pci_save_state(pdev);
4736
4737        if (!netif_running(dev)) {
4738                rtnl_unlock();
4739                return 0;
4740        }
4741
4742        netif_device_detach(dev);
4743
4744        bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4745
4746        bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4747
4748        rtnl_unlock();
4749
4750        return 0;
4751}
4752
4753int bnx2x_resume(struct pci_dev *pdev)
4754{
4755        struct net_device *dev = pci_get_drvdata(pdev);
4756        struct bnx2x *bp;
4757        int rc;
4758
4759        if (!dev) {
4760                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4761                return -ENODEV;
4762        }
4763        bp = netdev_priv(dev);
4764
4765        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4766                BNX2X_ERR("Handling parity error recovery. Try again later\n");
4767                return -EAGAIN;
4768        }
4769
4770        rtnl_lock();
4771
4772        pci_restore_state(pdev);
4773
4774        if (!netif_running(dev)) {
4775                rtnl_unlock();
4776                return 0;
4777        }
4778
4779        bnx2x_set_power_state(bp, PCI_D0);
4780        netif_device_attach(dev);
4781
4782        rc = bnx2x_nic_load(bp, LOAD_OPEN);
4783
4784        rtnl_unlock();
4785
4786        return rc;
4787}
4788
4789void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4790                              u32 cid)
4791{
4792        /* ustorm cxt validation */
4793        cxt->ustorm_ag_context.cdu_usage =
4794                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4795                        CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4796        /* xcontext validation */
4797        cxt->xstorm_ag_context.cdu_reserved =
4798                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4799                        CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4800}
4801
4802static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4803                                    u8 fw_sb_id, u8 sb_index,
4804                                    u8 ticks)
4805{
4806        u32 addr = BAR_CSTRORM_INTMEM +
4807                   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4808        REG_WR8(bp, addr, ticks);
4809        DP(NETIF_MSG_IFUP,
4810           "port %x fw_sb_id %d sb_index %d ticks %d\n",
4811           port, fw_sb_id, sb_index, ticks);
4812}
4813
4814static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4815                                    u16 fw_sb_id, u8 sb_index,
4816                                    u8 disable)
4817{
4818        u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4819        u32 addr = BAR_CSTRORM_INTMEM +
4820                   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4821        u8 flags = REG_RD8(bp, addr);
4822        /* clear and set */
4823        flags &= ~HC_INDEX_DATA_HC_ENABLED;
4824        flags |= enable_flag;
4825        REG_WR8(bp, addr, flags);
4826        DP(NETIF_MSG_IFUP,
4827           "port %x fw_sb_id %d sb_index %d disable %d\n",
4828           port, fw_sb_id, sb_index, disable);
4829}
4830
4831void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4832                                    u8 sb_index, u8 disable, u16 usec)
4833{
4834        int port = BP_PORT(bp);
4835        u8 ticks = usec / BNX2X_BTR;
4836
4837        storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4838
4839        disable = disable ? 1 : (usec ? 0 : 1);
4840        storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4841}
4842